- Important iSCSI stability on connection close fixes.

- Latency measurements improvements
 - Minor improvements and logging changes


git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@246 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2008-01-18 19:13:04 +00:00
parent 7ab4d3e638
commit 1964293ff1
8 changed files with 330 additions and 170 deletions

View File

@@ -398,28 +398,28 @@ int conn_del(struct iscsi_session *session, struct conn_info *info)
void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn)
{
if (unlikely(current != conn->rd_task)) {
while(in_softirq())
local_bh_enable();
printk(KERN_EMERG "conn %p rd_task != current %p (pid %d)\n", conn,
current, current->pid);
while(in_softirq())
local_bh_enable();
printk(KERN_EMERG "rd_state %x\n", conn->rd_state);
printk(KERN_EMERG "rd_task %p\n", conn->rd_task);
printk(KERN_EMERG "rd_task->pid %d\n", conn->rd_task->pid);
sBUG();
BUG();
}
}
void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn)
{
if (unlikely(current != conn->wr_task)) {
while(in_softirq())
local_bh_enable();
printk(KERN_EMERG "conn %p wr_task != current %p (pid %d)\n", conn,
current, current->pid);
while(in_softirq())
local_bh_enable();
printk(KERN_EMERG "wr_state %x\n", conn->wr_state);
printk(KERN_EMERG "wr_task %p\n", conn->wr_task);
printk(KERN_EMERG "wr_task->pid %d\n", conn->wr_task->pid);
sBUG();
BUG();
}
}

View File

@@ -97,6 +97,32 @@ static inline void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
SCST_CONTEXT_THREAD);
}
static inline void iscsi_restart_waiting_cmnd(struct iscsi_cmnd *cmnd)
{
/*
* There is no race with conn_abort(), since all functions
* called from single read thread
*/
iscsi_extracheck_is_rd_thread(cmnd->conn);
cmnd->data_waiting = 0;
iscsi_restart_cmnd(cmnd);
}
static inline void iscsi_fail_waiting_cmnd(struct iscsi_cmnd *cmnd)
{
TRACE_DBG("Failing data waiting cmd %p", cmnd);
/*
* There is no race with conn_abort(), since all functions
* called from single read thread
*/
iscsi_extracheck_is_rd_thread(cmnd->conn);
cmnd->data_waiting = 0;
req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
}
struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn, struct iscsi_cmnd *parent)
{
struct iscsi_cmnd *cmnd;
@@ -175,9 +201,12 @@ void cmnd_done(struct iscsi_cmnd *cmnd)
cmnd->parent_req);
}
EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
if (cmnd->parent_req == NULL) {
struct iscsi_conn *conn = cmnd->conn;
TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
spin_lock_bh(&conn->cmd_list_lock);
list_del(&cmnd->cmd_list_entry);
spin_unlock_bh(&conn->cmd_list_lock);
@@ -192,14 +221,14 @@ void cmnd_done(struct iscsi_cmnd *cmnd)
if (cmnd->scst_cmd) {
switch(cmnd->scst_state) {
case ISCSI_CMD_STATE_AFTER_PREPROC:
TRACE_DBG("%s", "AFTER_PREPROC");
TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
scst_restart_cmd(cmnd->scst_cmd,
SCST_PREPROCESS_STATUS_ERROR_FATAL,
SCST_CONTEXT_THREAD);
SCST_CONTEXT_DIRECT);
break;
case ISCSI_CMD_STATE_PROCESSED:
TRACE_DBG("%s", "PROCESSED");
TRACE_DBG("cmd %p PROCESSED", cmnd);
scst_tgt_cmd_done(cmnd->scst_cmd);
break;
default:
@@ -211,10 +240,10 @@ void cmnd_done(struct iscsi_cmnd *cmnd)
}
} else {
EXTRACHECKS_BUG_ON(cmnd->scst_cmd != NULL);
spin_lock_bh(&cmnd->parent_req->rsp_cmd_lock);
TRACE_DBG("Deleting rsp %p from parent %p", cmnd,
cmnd->parent_req);
spin_lock_bh(&cmnd->parent_req->rsp_cmd_lock);
list_del(&cmnd->rsp_cmd_list_entry);
spin_unlock_bh(&cmnd->parent_req->rsp_cmd_lock);
@@ -285,21 +314,32 @@ void req_cmnd_release_force(struct iscsi_cmnd *req, int flags)
again_rsp:
spin_lock_bh(&req->rsp_cmd_lock);
list_for_each_entry_reverse(rsp, &req->rsp_cmd_list, rsp_cmd_list_entry) {
/*
* It's OK to check not under write_list_lock, since
* iscsi_cmnds_init_write() can't be called in parallel with us
* and once on_write_list or write_processing_started get set,
* rsp gets out of this function responsibility.
*/
if (rsp->on_write_list || rsp->write_processing_started ||
rsp->force_cleanup_done)
bool r;
if (rsp->force_cleanup_done)
continue;
rsp->force_cleanup_done = 1;
if (cmnd_get_check(rsp))
continue;
spin_unlock_bh(&req->rsp_cmd_lock);
rsp->force_cleanup_done = 1;
spin_lock(&conn->write_list_lock);
r = rsp->on_write_list || rsp->write_processing_started;
spin_unlock(&conn->write_list_lock);
cmnd_put(rsp);
if (r)
continue;
/*
* If both on_write_list and write_processing_started not set,
* we can safely put() cmnd
*/
cmnd_put(rsp);
goto again_rsp;
}
spin_unlock_bh(&req->rsp_cmd_lock);
@@ -336,9 +376,7 @@ void req_cmnd_release(struct iscsi_cmnd *req)
list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
rx_ddigest_cmd_list_entry) {
TRACE_DBG("Deleting RX ddigest cmd %p from digest "
"list of req %p", c, req);
list_del(&c->rx_ddigest_cmd_list_entry);
cmd_del_from_rx_ddigest_list(c);
cmnd_put(c);
}
@@ -390,7 +428,9 @@ void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
*/
static struct iscsi_cmnd *iscsi_cmnd_create_rsp_cmnd(struct iscsi_cmnd *parent)
{
struct iscsi_cmnd *rsp = cmnd_alloc(parent->conn, parent);
struct iscsi_cmnd *rsp;
rsp = cmnd_alloc(parent->conn, parent);
spin_lock_bh(&parent->rsp_cmd_lock);
TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
@@ -428,6 +468,7 @@ static void iscsi_cmnds_init_write(struct list_head *send, int flags)
* erroneously rejected as a duplicate.
*/
if ((flags & ISCSI_INIT_WRITE_REMOVE_HASH) && rsp->parent_req->hashed &&
(rsp->parent_req->r2t_length == 0) &&
(rsp->parent_req->outstanding_r2t == 0))
cmnd_remove_hash(rsp->parent_req);
@@ -744,8 +785,10 @@ static struct iscsi_cmnd *cmnd_find_hash_get(struct iscsi_session *session,
spin_lock(&session->cmnd_hash_lock);
cmnd = __cmnd_find_hash(session, itt, ttt);
if (cmnd)
cmnd_get(cmnd);
if (cmnd != NULL) {
if (unlikely(cmnd_get_check(cmnd)))
cmnd = NULL;
}
spin_unlock(&session->cmnd_hash_lock);
return cmnd;
@@ -966,12 +1009,12 @@ static void send_r2t(struct iscsi_cmnd *req)
"(r2t_length %d, outstanding_r2t %d)", req,
req->scst_cmd, req->r2t_length, req->outstanding_r2t);
if (req->outstanding_r2t == 0)
iscsi_session_push_cmnd(req);
iscsi_fail_waiting_cmnd(req);
goto out;
}
/*
* There is no race with data_out_start() and __cmnd_abort(), since
* There is no race with data_out_start() and conn_abort(), since
* all functions called from single read thread
*/
iscsi_extracheck_is_rd_thread(req->conn);
@@ -1041,8 +1084,7 @@ static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
*/
goto out;
}
TRACE_DBG("Deleting RX digest cmd %p from digest list", c);
list_del(&c->rx_ddigest_cmd_list_entry);
cmd_del_from_rx_ddigest_list(c);
cmnd_put(c);
}
@@ -1316,7 +1358,7 @@ static int data_out_start(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
TRACE_ENTRY();
/*
* There is no race with send_r2t() and __cmnd_abort(), since
* There is no race with send_r2t() and conn_abort(), since
* all functions called from single read thread
*/
iscsi_extracheck_is_rd_thread(cmnd->conn);
@@ -1382,10 +1424,7 @@ static void data_out_end(struct iscsi_cmnd *cmnd)
iscsi_extracheck_is_rd_thread(cmnd->conn);
if (!(cmnd->conn->ddigest_type & DIGEST_NONE)) {
TRACE_DBG("Adding RX ddigest cmd %p to digest list "
"of req %p", cmnd, req);
list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
&req->rx_ddigest_cmd_list);
cmd_add_on_rx_ddigest_list(req, cmnd);
cmnd_get(cmnd);
}
@@ -1393,9 +1432,11 @@ static void data_out_end(struct iscsi_cmnd *cmnd)
TRACE_DBG("ISCSI_RESERVED_TAG, FINAL %x",
req_hdr->flags & ISCSI_FLG_FINAL);
if (req_hdr->flags & ISCSI_FLG_FINAL)
if (req_hdr->flags & ISCSI_FLG_FINAL) {
req->is_unsolicited_data = 0;
else
if (req->pending)
goto out_put;
} else
goto out_put;
} else {
TRACE_DBG("FINAL %x, outstanding_r2t %d, r2t_length %d",
@@ -1419,60 +1460,34 @@ static void data_out_end(struct iscsi_cmnd *cmnd)
if (!req->is_unsolicited_data)
send_r2t(req);
} else
iscsi_session_push_cmnd(req);
iscsi_restart_waiting_cmnd(req);
out_put:
cmnd_put(cmnd);
return;
}
/*
* Must be called from the read thread. Also called under cmd_list_lock,
* but may drop it inside.
*
* Returns >0 if cmd_list_lock was dropped inside, 0 otherwise.
*/
static int __cmnd_abort(struct iscsi_cmnd *cmnd)
static void __cmnd_abort(struct iscsi_cmnd *cmnd)
{
int res = 0;
struct iscsi_conn *conn = cmnd->conn;
TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
"ref_cnt %d, itt %x, sn %u, op %x, r2t_len %x, CDB op %x, "
"size to write %u, is_unsolicited_data %d, "
"outstanding_r2t %d, data_waiting %d, sess->exp_cmd_sn %u, "
"conn %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
"conn %p, rd_task %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
atomic_read(&cmnd->ref_cnt), cmnd_itt(cmnd), cmnd->pdu.bhs.sn,
cmnd_opcode(cmnd), cmnd->r2t_length, cmnd_scsicode(cmnd),
cmnd_write_size(cmnd), cmnd->is_unsolicited_data,
cmnd->outstanding_r2t, cmnd->data_waiting,
conn->session->exp_cmd_sn, conn);
cmnd->conn->session->exp_cmd_sn, cmnd->conn,
cmnd->conn->rd_task);
#ifdef NET_PAGE_CALLBACKS_DEFINED
TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
#endif
iscsi_extracheck_is_rd_thread(conn);
cmnd->tmfabort = 1;
if (cmnd->data_waiting && conn->closing) {
spin_unlock_bh(&conn->cmd_list_lock);
res = 1;
/* ToDo: this is racy for MC/S */
TRACE_MGMT_DBG("Pushing data waiting cmd %p", cmnd);
iscsi_session_push_cmnd(cmnd);
/*
* We are in the read thread, so we may not worry that after
* cmnd release conn gets released as well.
*/
spin_lock_bh(&conn->cmd_list_lock);
}
return res;
return;
}
/* Must be called from the read thread */
@@ -1566,17 +1581,13 @@ static int target_abort(struct iscsi_cmnd *req, int all)
list_for_each_entry(session, &target->session_list, session_list_entry) {
list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
spin_lock_bh(&conn->cmd_list_lock);
again:
list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
int again = 0;
if (cmnd == req)
continue;
if (all)
again = __cmnd_abort(cmnd);
__cmnd_abort(cmnd);
else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
again = __cmnd_abort(cmnd);
if (again)
goto again;
__cmnd_abort(cmnd);
}
spin_unlock_bh(&conn->cmd_list_lock);
}
@@ -1600,7 +1611,6 @@ static void task_set_abort(struct iscsi_cmnd *req)
list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
spin_lock_bh(&conn->cmd_list_lock);
again:
list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
if (cmnd == req)
@@ -1610,8 +1620,7 @@ again:
if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
req_hdr->cmd_sn == hdr->cmd_sn)
continue;
if (__cmnd_abort(cmnd))
goto again;
__cmnd_abort(cmnd);
}
spin_unlock_bh(&conn->cmd_list_lock);
}
@@ -1627,13 +1636,36 @@ void conn_abort(struct iscsi_conn *conn)
TRACE_MGMT_DBG("Aborting conn %p", conn);
iscsi_extracheck_is_rd_thread(conn);
spin_lock_bh(&conn->cmd_list_lock);
again:
list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
if (__cmnd_abort(cmnd))
goto again;
__cmnd_abort(cmnd);
if (cmnd->data_waiting) {
if (!cmnd_get_check(cmnd)) {
spin_unlock_bh(&conn->cmd_list_lock);
/* ToDo: this is racy for MC/S */
TRACE_MGMT_DBG("Restarting data waiting cmd %p",
cmnd);
iscsi_fail_waiting_cmnd(cmnd);
cmnd_put(cmnd);
/*
* We are in the read thread, so we may not
* worry that after cmnd release conn gets
* released as well.
*/
spin_lock_bh(&conn->cmd_list_lock);
goto again;
}
}
}
spin_unlock_bh(&conn->cmd_list_lock);
return;
}
static void execute_task_management(struct iscsi_cmnd *req)
@@ -1646,8 +1678,12 @@ static void execute_task_management(struct iscsi_cmnd *req)
struct scst_rx_mgmt_params params;
TRACE((function == ISCSI_FUNCTION_ABORT_TASK) ? TRACE_MGMT_MINOR : TRACE_MGMT,
"TM cmd: req %p, itt %x, fn %d, rtt %x, sn %u", req, cmnd_itt(req),
function, req_hdr->rtt, req_hdr->cmd_sn);
"TM fn %d", function);
TRACE_MGMT_DBG("TM req %p, itt %x, rtt %x, sn %u, con %p", req,
cmnd_itt(req), req_hdr->rtt, req_hdr->cmd_sn, conn);
iscsi_extracheck_is_rd_thread(conn);
spin_lock(&sess->sn_lock);
sess->tm_active = 1;
@@ -1828,13 +1864,21 @@ static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
goto out;
}
iscsi_extracheck_is_rd_thread(cmnd->conn);
switch (cmnd_opcode(cmnd)) {
case ISCSI_OP_SCSI_CMD:
if (cmnd->r2t_length != 0) {
if (!cmnd->is_unsolicited_data) {
send_r2t(cmnd);
break;
}
} else
iscsi_restart_cmnd(cmnd);
break;
case ISCSI_OP_NOOP_OUT:
noop_out_exec(cmnd);
break;
case ISCSI_OP_SCSI_CMD:
iscsi_restart_cmnd(cmnd);
break;
case ISCSI_OP_SCSI_TASK_MGT_MSG:
execute_task_management(cmnd);
break;
@@ -2009,12 +2053,6 @@ static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
sBUG_ON(cmnd->parent_req != NULL);
/*
* There is no race with __cmnd_abort(), since all functions
* called from single read thread
*/
cmnd->data_waiting = 0;
if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
cmnd->pdu.bhs.sn);
@@ -2056,6 +2094,7 @@ static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
break;
list_del(&cmnd->pending_list_entry);
cmnd->pending = 0;
TRACE_DBG("Processing pending cmd %p (cmd_sn %u)",
cmnd, cmd_sn);
@@ -2129,6 +2168,7 @@ static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
}
list_add_tail(&cmnd->pending_list_entry, entry);
cmnd->pending = 1;
}
out:
return;
@@ -2217,12 +2257,6 @@ void cmnd_rx_end(struct iscsi_cmnd *cmnd)
switch (cmnd_opcode(cmnd)) {
case ISCSI_OP_SCSI_CMD:
if (cmnd->r2t_length != 0) {
if (!cmnd->is_unsolicited_data)
send_r2t(cmnd);
break;
}
/* else go through */
case ISCSI_OP_SCSI_REJECT:
case ISCSI_OP_NOOP_OUT:
case ISCSI_OP_SCSI_TASK_MGT_MSG:
@@ -2530,12 +2564,14 @@ static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
(struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
struct iscsi_task_rsp_hdr *rsp_hdr;
struct iscsi_session *sess = req->conn->session;
int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
TRACE_ENTRY();
TRACE_MGMT_DBG("TM req %p finished", req);
TRACE((req_hdr->function == ISCSI_FUNCTION_ABORT_TASK) ?
TRACE_MGMT_MINOR : TRACE_MGMT,
"TM req %p finished, status %d", req, status);
"TM fn %d finished, status %d", fn, status);
rsp = iscsi_cmnd_create_rsp_cmnd(req);
rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
@@ -2545,8 +2581,7 @@ static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
rsp_hdr->itt = req_hdr->itt;
rsp_hdr->response = status;
if ((req_hdr->function & ISCSI_FUNCTION_MASK) ==
ISCSI_FUNCTION_TARGET_COLD_RESET)
if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET)
rsp->should_close_conn = 1;
sBUG_ON(sess->tm_rsp != NULL);

View File

@@ -138,12 +138,6 @@ struct iscsi_conn {
spinlock_t cmd_list_lock; /* BH lock */
/*
* IMPORTANT! If you find a cmd in cmd_list and immediately get_cmnd()
* it, it still can be destroyed immediately after you drop
* cmd_list_lock no matter how big is its ref_cnt!
*/
/* Protected by cmd_list_lock */
struct list_head cmd_list; /* in/outcoming pdus */
@@ -236,6 +230,7 @@ struct iscsi_cmnd {
/* Some flags protected by conn->write_list_lock */
unsigned int hashed:1;
unsigned int should_close_conn:1;
unsigned int pending:1;
unsigned int own_sg:1;
unsigned int on_write_list:1;
unsigned int write_processing_started:1;
@@ -243,6 +238,7 @@ struct iscsi_cmnd {
unsigned int force_cleanup_done:1;
unsigned int dec_active_cmnds:1;
#ifdef EXTRACHECKS
unsigned int on_rx_digest_list:1;
unsigned int release_called:1;
#endif
@@ -254,12 +250,6 @@ struct iscsi_cmnd {
/* Unions are for readability and grepability */
/*
* IMPORTANT! If you find a cmd in rsp_cmd_list and immediately
* get_cmnd() it, it still can be destroyed immediately after you drop
* rsp_cmd_lock no matter how big is its ref_cnt!
*/
union {
/* Protected by rsp_cmd_lock */
struct list_head rsp_cmd_list;
@@ -413,6 +403,27 @@ static inline void iscsi_cmnd_set_length(struct iscsi_pdu *pdu)
extern struct scst_tgt_template iscsi_template;
/*
* Skip this command if result is not 0. Must be called under
* corresponding lock.
*/
static inline bool cmnd_get_check(struct iscsi_cmnd *cmnd)
{
int r = atomic_inc_return(&cmnd->ref_cnt);
int res;
if (unlikely(r == 1)) {
TRACE_DBG("cmnd %p is being destroyed", cmnd);
atomic_dec(&cmnd->ref_cnt);
res = 1;
/* Necessary code is serialized by locks in cmnd_done() */
} else {
TRACE_DBG("cmnd %p, new ref_cnt %d", cmnd,
atomic_read(&cmnd->ref_cnt));
res = 0;
}
return res;
}
static inline void cmnd_get(struct iscsi_cmnd *cmnd)
{
atomic_inc(&cmnd->ref_cnt);
@@ -428,7 +439,7 @@ static inline void cmnd_get_ordered(struct iscsi_cmnd *cmnd)
static inline void cmnd_put(struct iscsi_cmnd *cmnd)
{
TRACE_DBG("cmnd %p, new cmnd->ref_cnt %d", cmnd,
TRACE_DBG("cmnd %p, new ref_cnt %d", cmnd,
atomic_read(&cmnd->ref_cnt)-1);
sBUG_ON(atomic_read(&cmnd->ref_cnt) == 0);
if (atomic_dec_and_test(&cmnd->ref_cnt))
@@ -452,6 +463,27 @@ static inline void cmd_del_from_write_list(struct iscsi_cmnd *cmnd)
cmnd->on_write_list = 0;
}
static inline void cmd_add_on_rx_ddigest_list(struct iscsi_cmnd *req,
struct iscsi_cmnd *cmnd)
{
TRACE_DBG("Adding RX ddigest cmd %p to digest list "
"of req %p", cmnd, req);
list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
&req->rx_ddigest_cmd_list);
#ifdef EXTRACHECKS
cmnd->on_rx_digest_list = 1;
#endif
}
static inline void cmd_del_from_rx_ddigest_list(struct iscsi_cmnd *cmnd)
{
TRACE_DBG("Deleting RX digest cmd %p from digest list", cmnd);
list_del(&cmnd->rx_ddigest_cmd_list_entry);
#ifdef EXTRACHECKS
cmnd->on_rx_digest_list = 0;
#endif
}
static inline int test_write_ready(struct iscsi_conn *conn)
{
/*
@@ -482,7 +514,7 @@ static inline void conn_put(struct iscsi_conn *conn)
/*
* It always ordered to protect from undesired side effects like
* accessing just destroyed obeject because of this *_dec() reordering.
* accessing just destroyed object because of this *_dec() reordering.
*/
smp_mb__before_atomic_dec();
atomic_dec(&conn->conn_ref_cnt);

View File

@@ -76,21 +76,30 @@ static void iscsi_check_closewait(struct iscsi_conn *conn)
again:
spin_lock_bh(&conn->cmd_list_lock);
list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
struct iscsi_cmnd *rsp;
int restart = 0;
TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, data_waiting %d, "
"ref_cnt %d, parent_req %p, net_ref_cnt %d, sg %p",
cmnd, cmnd->scst_state, cmnd->data_waiting,
atomic_read(&cmnd->ref_cnt), cmnd->parent_req,
atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
sBUG_ON(cmnd->parent_req != NULL);
if (cmnd->sg != NULL) {
int sg_cnt, i, restart = 0;
int sg_cnt, i;
sg_cnt = get_pgcnt(cmnd->bufflen,
cmnd->sg[0].offset);
cmnd_get(cmnd);
if (cmnd_get_check(cmnd))
continue;
for(i = 0; i < sg_cnt; i++) {
TRACE_CONN_CLOSE_DBG("page %p, net_priv %p, _count %d",
cmnd->sg[i].page, cmnd->sg[i].page->net_priv,
atomic_read(&cmnd->sg[i].page->_count));
if (cmnd->sg[i].page->net_priv != NULL) {
if (restart == 0) {
spin_unlock_bh(&conn->cmd_list_lock);
@@ -101,9 +110,49 @@ again:
}
}
cmnd_put(cmnd);
if (restart)
goto again;
}
spin_lock_bh(&cmnd->rsp_cmd_lock);
list_for_each_entry(rsp, &cmnd->rsp_cmd_list, rsp_cmd_list_entry) {
TRACE_CONN_CLOSE_DBG(" rsp %p, ref_cnt %d, net_ref_cnt %d, "
"sg %p", rsp, atomic_read(&rsp->ref_cnt),
atomic_read(&rsp->net_ref_cnt), rsp->sg);
if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
int sg_cnt, i;
sg_cnt = get_pgcnt(rsp->bufflen,
rsp->sg[0].offset);
sBUG_ON(rsp->sg_cnt != sg_cnt);
if (cmnd_get_check(rsp))
continue;
for(i = 0; i < sg_cnt; i++) {
TRACE_CONN_CLOSE_DBG(" page %p, net_priv %p, "
"_count %d", rsp->sg[i].page,
rsp->sg[i].page->net_priv,
atomic_read(&rsp->sg[i].page->_count));
if (rsp->sg[i].page->net_priv != NULL) {
if (restart == 0) {
spin_unlock_bh(&cmnd->rsp_cmd_lock);
spin_unlock_bh(&conn->cmd_list_lock);
restart = 1;
}
while(rsp->sg[i].page->net_priv != NULL)
iscsi_put_page_callback(rsp->sg[i].page);
}
}
cmnd_put(rsp);
if (restart)
goto again;
}
}
spin_unlock_bh(&cmnd->rsp_cmd_lock);
}
spin_unlock_bh(&conn->cmd_list_lock);
@@ -194,6 +243,7 @@ static void close_conn(struct iscsi_conn *conn)
cmnd);
list_del(&cmnd->pending_list_entry);
cmnd->pending = 0;
session->exp_cmd_sn++;
@@ -225,6 +275,7 @@ static void close_conn(struct iscsi_conn *conn)
"pending cmd %p", cmnd);
list_del(&cmnd->pending_list_entry);
cmnd->pending = 0;
if (session->exp_cmd_sn == cmnd->pdu.bhs.sn)
session->exp_cmd_sn++;
@@ -254,17 +305,20 @@ static void close_conn(struct iscsi_conn *conn)
#ifdef NET_PAGE_CALLBACKS_DEFINED
struct iscsi_cmnd *rsp;
#endif
#if 0
if (time_after(jiffies, start_waiting+10*HZ))
trace_flag |= TRACE_CONN_OC_DBG;
#endif
spin_lock_bh(&conn->cmd_list_lock);
list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, scst_cmd "
"state %d, data_waiting %d, ref_cnt %d, sn %u, "
"parent_req %p", cmnd, cmnd->scst_state,
"parent_req %p, pending %d", cmnd, cmnd->scst_state,
(cmnd->scst_cmd != NULL) ? cmnd->scst_cmd->state : -1,
cmnd->data_waiting, atomic_read(&cmnd->ref_cnt),
cmnd->pdu.bhs.sn, cmnd->parent_req);
cmnd->pdu.bhs.sn, cmnd->parent_req, cmnd->pending);
#ifdef NET_PAGE_CALLBACKS_DEFINED
TRACE_CONN_CLOSE_DBG("net_ref_cnt %d, sg %p",
atomic_read(&cmnd->net_ref_cnt), cmnd->sg);
@@ -333,6 +387,39 @@ static void close_conn(struct iscsi_conn *conn)
return;
}
static int close_conn_thr(void *arg)
{
struct iscsi_conn *conn = (struct iscsi_conn *)arg;
TRACE_ENTRY();
#ifdef EXTRACHECKS
conn->rd_task = current;
#endif
close_conn(conn);
TRACE_EXIT();
return 0;
}
/* No locks */
static void start_close_conn(struct iscsi_conn *conn)
{
struct task_struct *t;
TRACE_ENTRY();
t = kthread_run(close_conn_thr, conn, "iscsi_conn_cleanup");
if (IS_ERR(t)) {
PRINT_ERROR("kthread_run() failed (%ld), closing conn %p "
"directly", PTR_ERR(t), conn);
close_conn(conn);
}
TRACE_EXIT();
return;
}
static inline void iscsi_conn_init_read(struct iscsi_conn *conn, void *data, size_t len)
{
len = (len + 3) & -4; // XXX ???
@@ -525,12 +612,8 @@ static int recv(struct iscsi_conn *conn)
case RX_CHECK_DDIGEST:
conn->read_state = RX_END;
if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
TRACE_DBG("Adding RX ddigest cmd %p to digest list "
"of self", cmnd);
list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
&cmnd->rx_ddigest_cmd_list);
cmd_add_on_rx_ddigest_list(cmnd, cmnd);
cmnd_get(cmnd);
conn->read_state = RX_END;
} else if (cmnd_opcode(cmnd) != ISCSI_OP_SCSI_DATA_OUT) {
/*
* We could get here only for NOP-Out. ISCSI RFC doesn't
@@ -584,7 +667,7 @@ static int process_read_io(struct iscsi_conn *conn, int *closed)
do {
res = recv(conn);
if (unlikely(conn->closing)) {
close_conn(conn);
start_close_conn(conn);
*closed = 1;
break;
}

View File

@@ -949,8 +949,8 @@ struct scst_session
#ifdef MEASURE_LATENCY
spinlock_t meas_lock;
uint64_t processing_time;
int processed_cmds;
uint64_t scst_time, processing_time;
unsigned int processed_cmds;
#endif
};

View File

@@ -732,17 +732,6 @@ static int vdisk_do_job(struct scst_cmd *cmd)
lba_start |= ((u64)cdb[5]);
data_len = cmd->bufflen;
break;
case SYNCHRONIZE_CACHE:
lba_start |= ((u64)cdb[2]) << 24;
lba_start |= ((u64)cdb[3]) << 16;
lba_start |= ((u64)cdb[4]) << 8;
lba_start |= ((u64)cdb[5]);
data_len = ((cdb[7] << (BYTE * 1)) + (cdb[8] << (BYTE * 0)))
<< virt_dev->block_shift;
if (data_len == 0)
data_len = virt_dev->file_size -
((loff_t)lba_start << virt_dev->block_shift);
break;
case READ_16:
case WRITE_16:
case WRITE_VERIFY_16:
@@ -757,6 +746,17 @@ static int vdisk_do_job(struct scst_cmd *cmd)
lba_start |= ((u64)cdb[9]);
data_len = cmd->bufflen;
break;
case SYNCHRONIZE_CACHE:
lba_start |= ((u64)cdb[2]) << 24;
lba_start |= ((u64)cdb[3]) << 16;
lba_start |= ((u64)cdb[4]) << 8;
lba_start |= ((u64)cdb[5]);
data_len = ((cdb[7] << (BYTE * 1)) + (cdb[8] << (BYTE * 0)))
<< virt_dev->block_shift;
if (data_len == 0)
data_len = virt_dev->file_size -
((loff_t)lba_start << virt_dev->block_shift);
break;
}
loff = (loff_t)lba_start << virt_dev->block_shift;

View File

@@ -395,43 +395,45 @@ static int lat_info_show(struct seq_file *seq, void *v)
goto out;
}
seq_printf(seq, "%-20s %-45s %-35s\n", "Target name", "Initiator name",
"Processing latency (us)");
seq_printf(seq, "%-20s %-45s %-15s %15s\n", "Target name", "Initiator name",
"SCST latency", "Processing latency (us)");
list_for_each_entry(acg, &scst_acg_list, scst_acg_list_entry) {
list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
unsigned long lat = 0;
uint64_t processing_time;
uint64_t processed_cmds;
unsigned long proc_lat = 0, scst_lat = 0;
uint64_t proc_time, scst_time;
unsigned int processed_cmds;
spin_lock_bh(&sess->meas_lock);
processing_time = sess->processing_time;
proc_time = sess->processing_time;
scst_time = sess->scst_time;
processed_cmds = sess->processed_cmds;
spin_unlock_bh(&sess->meas_lock);
TRACE_DBG("sess %p, processing_time %Ld, "
"processed_cmds %Ld", sess, processing_time,
processed_cmds);
TRACE_DBG("sess %p, scst_time %Ld, proc_time %Ld, "
"processed_cmds %d", sess, scst_time,
proc_time, processed_cmds);
#if BITS_PER_LONG == 32
while((processing_time & 0xFFFFFFFF00000000LL) != 0) {
TRACE_DBG("Processing time too big ("
"processing_time %Ld, processed_cmds %Ld",
processing_time, processed_cmds);
processing_time >>= 1;
/* Unfortunately, do_div() doesn't work too well */
while(((proc_time & 0xFFFFFFFF00000000LL) != 0) ||
((scst_time & 0xFFFFFFFF00000000LL) != 0)) {
TRACE_DBG("%s", "Gathered time too big");
proc_time >>= 1;
scst_time >>= 1;
processed_cmds >>= 1;
}
#endif
if (sess->processed_cmds != 0) {
lat = (unsigned long)processing_time /
(unsigned long)processed_cmds;
proc_lat = (unsigned long)proc_time / processed_cmds;
scst_lat = (unsigned long)scst_time / processed_cmds;
}
seq_printf(seq, "%-20s %-45s %-15ld\n",
seq_printf(seq, "%-20s %-45s %-15ld %-15ld\n",
sess->tgt->tgtt->name,
sess->initiator_name,
lat);
scst_lat, proc_lat);
}
}
@@ -1837,12 +1839,12 @@ static int scst_sessions_info_show(struct seq_file *seq, void *v)
goto out;
}
seq_printf(seq, "%-20s %-35s %-35s %-15s\n", "Target name", "Initiator name",
seq_printf(seq, "%-20s %-45s %-35s %-15s\n", "Target name", "Initiator name",
"Group name", "Command Count");
list_for_each_entry(acg, &scst_acg_list, scst_acg_list_entry) {
list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
seq_printf(seq, "%-20s %-35s %-35s %-15d\n",
seq_printf(seq, "%-20s %-45s %-35s %-15d\n",
sess->tgt->tgtt->name,
sess->initiator_name,
acg->acg_name,

View File

@@ -2475,21 +2475,27 @@ out:
#ifdef MEASURE_LATENCY
{
struct timespec ts;
uint64_t finish, processing_time;
uint64_t finish, scst_time, proc_time;
struct scst_session *sess = cmd->sess;
getnstimeofday(&ts);
finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
spin_lock_bh(&sess->meas_lock);
processing_time = cmd->pre_exec_finish - cmd->start;
processing_time += finish - cmd->post_exec_start;
sess->processing_time += processing_time;
scst_time = cmd->pre_exec_finish - cmd->start;
scst_time += finish - cmd->post_exec_start;
proc_time = finish - cmd->start;
sess->scst_time += scst_time;
sess->processing_time += proc_time;
sess->processed_cmds++;
TRACE_DBG("cmd %p (sess %p): finish %Ld (tv_sec %ld, "
"tv_nsec %ld), processing_time %Ld", cmd, sess, finish,
ts.tv_sec, ts.tv_nsec, processing_time);
spin_unlock_bh(&sess->meas_lock);
TRACE_DBG("cmd %p (sess %p): finish %Ld (tv_sec %ld, "
"tv_nsec %ld), scst_time %Ld, proc_time %Ld", cmd, sess,
finish, ts.tv_sec, ts.tv_nsec, scst_time, proc_time);
}
#endif
TRACE_EXIT_HRES(res);
@@ -3389,9 +3395,9 @@ void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
* we must wait here to be sure that we won't receive
* double commands with the same tag.
*/
TRACE((mcmd->fn == SCST_ABORT_TASK) ? TRACE_MGMT_MINOR : TRACE_MGMT,
"cmd %p (tag %llu) being executed/xmitted (state %d), "
"deferring ABORT...", cmd, cmd->tag, cmd->state);
TRACE_MGMT_DBG("cmd %p (tag %llu) being executed/xmitted "
"(state %d), deferring ABORT...", cmd, cmd->tag,
cmd->state);
#ifdef EXTRACHECKS
if (cmd->mgmt_cmnd) {
printk(KERN_ALERT "cmd %p (tag %llu, state %d) "
@@ -3706,7 +3712,7 @@ static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
}
scst_cmd_get(cmd);
spin_unlock_irq(&sess->sess_list_lock);
TRACE(TRACE_MGMT_MINOR, "Cmd %p for tag %llu (sn %ld, set %d, "
TRACE_MGMT_DBG("Cmd %p for tag %llu (sn %ld, set %d, "
"queue_type %x) found, aborting it", cmd, mcmd->tag,
cmd->sn, cmd->sn_set, cmd->queue_type);
mcmd->cmd_to_abort = cmd;
@@ -4419,8 +4425,10 @@ int scst_rx_mgmt_fn(struct scst_session *sess,
mcmd->cmd_sn = params->cmd_sn;
TRACE((params->fn == SCST_ABORT_TASK) ? TRACE_MGMT_MINOR : TRACE_MGMT,
"sess=%p, fn %x, tag_set %d, tag %Ld, lun_set %d, "
"lun=%Ld, cmd_sn_set %d, cmd_sn %d", sess, params->fn,
"TM fn %x", params->fn);
TRACE_MGMT_DBG("sess=%p, tag_set %d, tag %Ld, lun_set %d, "
"lun=%Ld, cmd_sn_set %d, cmd_sn %d", sess,
params->tag_set, params->tag, params->lun_set,
(uint64_t)mcmd->lun, params->cmd_sn_set, params->cmd_sn);