Remove tgt->finished_cmds logic

1. It doesn't work well with NPIV-like targets
2. Without it struct scst_tgt is read-mostly



git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@4811 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2013-03-25 23:40:44 +00:00
parent 82f7d66e5e
commit f9d666ace2
5 changed files with 129 additions and 166 deletions

View File

@@ -1544,7 +1544,6 @@ struct scst_tgt {
*/
bool retry_timer_active;
struct timer_list retry_timer;
atomic_t finished_cmds;
int retry_cmds;
spinlock_t tgt_lock;
struct list_head retry_cmd_list;

View File

@@ -3034,50 +3034,31 @@ out:
EXPORT_SYMBOL(__scst_get_resid);
/* No locks */
int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
void scst_queue_retry_cmd(struct scst_cmd *cmd)
{
struct scst_tgt *tgt = cmd->tgt;
int res = 0;
unsigned long flags;
TRACE_ENTRY();
spin_lock_irqsave(&tgt->tgt_lock, flags);
tgt->retry_cmds++;
/*
* Memory barrier is needed here, because we need the exact order
* between the read and write between retry_cmds and finished_cmds to
* not miss the case when a command finished while we queueing it for
* retry after the finished_cmds check.
*/
smp_mb();
TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
tgt->retry_cmds);
if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
/* At least one cmd finished, so try again */
tgt->retry_cmds--;
TRACE_RETRY("Some command(s) finished, direct retry "
"(finished_cmds=%d, tgt->finished_cmds=%d, "
"retry_cmds=%d)", finished_cmds,
atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
res = -1;
goto out_unlock_tgt;
}
TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
if (!tgt->retry_timer_active) {
TRACE_DBG("Activating retry timer for tgt %p", tgt);
tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
add_timer(&tgt->retry_timer);
tgt->retry_timer_active = 1;
}
out_unlock_tgt:
spin_unlock_irqrestore(&tgt->tgt_lock, flags);
TRACE_EXIT_RES(res);
return res;
TRACE_EXIT();
return;
}
/**
@@ -3310,7 +3291,6 @@ int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt)
t->sg_tablesize = tgtt->sg_tablesize;
spin_lock_init(&t->tgt_lock);
INIT_LIST_HEAD(&t->retry_cmd_list);
atomic_set(&t->finished_cmds, 0);
init_timer(&t->retry_timer);
t->retry_timer.data = (unsigned long)t;
t->retry_timer.function = scst_tgt_retry_timer_fn;
@@ -5489,13 +5469,6 @@ void scst_check_retries(struct scst_tgt *tgt)
TRACE_ENTRY();
/*
* We don't worry about overflow of finished_cmds, because we check
* only for its change.
*/
atomic_inc(&tgt->finished_cmds);
/* See comment in scst_queue_retry_cmd() */
smp_mb__after_atomic_inc();
if (unlikely(tgt->retry_cmds > 0)) {
struct scst_cmd *c, *tc;
unsigned long flags;
@@ -5541,6 +5514,15 @@ static void scst_tgt_retry_timer_fn(unsigned long arg)
scst_check_retries(tgt);
spin_lock_irqsave(&tgt->tgt_lock, flags);
if ((tgt->retry_cmds > 0) && !tgt->retry_timer_active) {
TRACE_DBG("Reactivating retry timer for tgt %p", tgt);
tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
add_timer(&tgt->retry_timer);
tgt->retry_timer_active = 1;
}
spin_unlock_irqrestore(&tgt->tgt_lock, flags);
TRACE_EXIT();
return;
}

View File

@@ -2306,10 +2306,9 @@ static int __init init_scst(void)
INIT_CACHEP_ALIGN(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
INIT_CACHEP_ALIGN(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
INIT_CACHEP_ALIGN(scst_dev_cachep, scst_device, out_destroy_sess_cache);
INIT_CACHEP_ALIGN(scst_tgt_cachep, scst_tgt, out_destroy_dev_cache);
/* They are read-mostly */
INIT_CACHEP(scst_tgt_cachep, scst_tgt, out_destroy_dev_cache); /* read-mostly */
INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_tgt_cache);
INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgtd_cache);
INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgtd_cache); /* read-mostly */
scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
mempool_free_slab, scst_mgmt_cachep);

View File

@@ -109,7 +109,7 @@ extern unsigned long scst_trace_flag;
#define SCST_MAX_DEV_COMMANDS 256
#endif
#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
#define SCST_TGT_RETRY_TIMEOUT 1 /* 1 jiffy */
#define SCST_DEF_LBA_DATA_LEN -1
@@ -304,7 +304,7 @@ void scst_zero_write_rest(struct scst_cmd *cmd);
void scst_limit_sg_write_len(struct scst_cmd *cmd);
void scst_adjust_resp_data_len(struct scst_cmd *cmd);
int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds);
void scst_queue_retry_cmd(struct scst_cmd *cmd);
int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt);
void scst_free_tgt(struct scst_tgt *tgt);

View File

@@ -1333,66 +1333,59 @@ static int scst_rdy_to_xfer(struct scst_cmd *cmd)
goto out;
}
while (1) {
int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_DATA_WAIT;
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_DATA_WAIT;
if (tgtt->on_hw_pending_cmd_timeout != NULL) {
struct scst_session *sess = cmd->sess;
cmd->hw_pending_start = jiffies;
cmd->cmd_hw_pending = 1;
if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
TRACE_DBG("Sched HW pending work for sess %p "
"(max time %d)", sess,
tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
&sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
if (tgtt->on_hw_pending_cmd_timeout != NULL) {
struct scst_session *sess = cmd->sess;
cmd->hw_pending_start = jiffies;
cmd->cmd_hw_pending = 1;
if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
TRACE_DBG("Sched HW pending work for sess %p "
"(max time %d)", sess,
tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
&sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
}
scst_set_cur_start(cmd);
scst_set_cur_start(cmd);
TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
#ifdef CONFIG_SCST_DEBUG_RETRY
if (((scst_random() % 100) == 75))
rc = SCST_TGT_RES_QUEUE_FULL;
else
if (((scst_random() % 100) == 75))
rc = SCST_TGT_RES_QUEUE_FULL;
else
#endif
rc = tgtt->rdy_to_xfer(cmd);
TRACE_DBG("rdy_to_xfer() returned %d", rc);
rc = tgtt->rdy_to_xfer(cmd);
TRACE_DBG("rdy_to_xfer() returned %d", rc);
if (likely(rc == SCST_TGT_RES_SUCCESS))
goto out;
if (likely(rc == SCST_TGT_RES_SUCCESS))
goto out;
scst_set_rdy_to_xfer_time(cmd);
scst_set_rdy_to_xfer_time(cmd);
cmd->cmd_hw_pending = 0;
cmd->cmd_hw_pending = 0;
/* Restore the previous state */
cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
/* Restore the previous state */
cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
switch (rc) {
case SCST_TGT_RES_QUEUE_FULL:
if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
break;
else
continue;
switch (rc) {
case SCST_TGT_RES_QUEUE_FULL:
scst_queue_retry_cmd(cmd);
goto out;
case SCST_TGT_RES_NEED_THREAD_CTX:
TRACE_DBG("Target driver %s "
"rdy_to_xfer() requested thread "
"context, rescheduling", tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
case SCST_TGT_RES_NEED_THREAD_CTX:
TRACE_DBG("Target driver %s "
"rdy_to_xfer() requested thread "
"context, rescheduling", tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
default:
goto out_error_rc;
}
break;
default:
goto out_error_rc;
}
out:
@@ -3596,113 +3589,103 @@ static int scst_xmit_response(struct scst_cmd *cmd)
goto out;
}
while (1) {
int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_XMIT_WAIT;
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_XMIT_WAIT;
TRACE_DBG("Calling xmit_response(%p)", cmd);
TRACE_DBG("Calling xmit_response(%p)", cmd);
#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
if (unlikely(trace_flag & TRACE_DATA_SEND) &&
(cmd->data_direction & SCST_DATA_READ)) {
int i, sg_cnt;
struct scatterlist *sg, *sgi;
if (cmd->tgt_i_sg != NULL) {
sg = cmd->tgt_i_sg;
sg_cnt = cmd->tgt_i_sg_cnt;
} else {
sg = cmd->sg;
sg_cnt = cmd->sg_cnt;
}
if (sg != NULL) {
PRINT_INFO("Xmitting data for cmd %p "
"(sg_cnt %d, sg %p, sg[0].page %p, buf %p, "
"resp len %d)", cmd, sg_cnt, sg,
(void *)sg_page(&sg[0]), sg_virt(sg),
cmd->resp_data_len);
for_each_sg(sg, sgi, sg_cnt, i) {
PRINT_INFO("sg %d", i);
PRINT_BUFFER("data", sg_virt(sgi),
sgi->length);
}
if (unlikely(trace_flag & TRACE_DATA_SEND) &&
(cmd->data_direction & SCST_DATA_READ)) {
int i, sg_cnt;
struct scatterlist *sg, *sgi;
if (cmd->tgt_i_sg != NULL) {
sg = cmd->tgt_i_sg;
sg_cnt = cmd->tgt_i_sg_cnt;
} else {
sg = cmd->sg;
sg_cnt = cmd->sg_cnt;
}
if (sg != NULL) {
PRINT_INFO("Xmitting data for cmd %p "
"(sg_cnt %d, sg %p, sg[0].page %p, buf %p, "
"resp len %d)", cmd, sg_cnt, sg,
(void *)sg_page(&sg[0]), sg_virt(sg),
cmd->resp_data_len);
for_each_sg(sg, sgi, sg_cnt, i) {
PRINT_INFO("sg %d", i);
PRINT_BUFFER("data", sg_virt(sgi),
sgi->length);
}
}
}
#endif
if (tgtt->on_hw_pending_cmd_timeout != NULL) {
struct scst_session *sess = cmd->sess;
cmd->hw_pending_start = jiffies;
cmd->cmd_hw_pending = 1;
if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
TRACE_DBG("Sched HW pending work for sess %p "
"(max time %d)", sess,
tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
&sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
if (tgtt->on_hw_pending_cmd_timeout != NULL) {
struct scst_session *sess = cmd->sess;
cmd->hw_pending_start = jiffies;
cmd->cmd_hw_pending = 1;
if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
TRACE_DBG("Sched HW pending work for sess %p "
"(max time %d)", sess,
tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
&sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
}
scst_set_cur_start(cmd);
scst_set_cur_start(cmd);
#ifdef CONFIG_SCST_DEBUG_RETRY
if (((scst_random() % 100) == 77))
rc = SCST_TGT_RES_QUEUE_FULL;
else
if (((scst_random() % 100) == 77))
rc = SCST_TGT_RES_QUEUE_FULL;
else
#endif
rc = tgtt->xmit_response(cmd);
TRACE_DBG("xmit_response() returned %d", rc);
rc = tgtt->xmit_response(cmd);
TRACE_DBG("xmit_response() returned %d", rc);
if (likely(rc == SCST_TGT_RES_SUCCESS))
goto out;
if (likely(rc == SCST_TGT_RES_SUCCESS))
goto out;
scst_set_xmit_time(cmd);
scst_set_xmit_time(cmd);
cmd->cmd_hw_pending = 0;
cmd->cmd_hw_pending = 0;
/* Restore the previous state */
cmd->state = SCST_CMD_STATE_XMIT_RESP;
/* Restore the previous state */
cmd->state = SCST_CMD_STATE_XMIT_RESP;
switch (rc) {
case SCST_TGT_RES_QUEUE_FULL:
if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
break;
else
continue;
switch (rc) {
case SCST_TGT_RES_QUEUE_FULL:
scst_queue_retry_cmd(cmd);
goto out;
case SCST_TGT_RES_NEED_THREAD_CTX:
TRACE_DBG("Target driver %s xmit_response() "
"requested thread context, rescheduling",
tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
case SCST_TGT_RES_NEED_THREAD_CTX:
TRACE_DBG("Target driver %s xmit_response() "
"requested thread context, rescheduling",
tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
default:
goto out_error;
default:
if (rc == SCST_TGT_RES_FATAL_ERROR) {
PRINT_ERROR("Target driver %s xmit_response() returned "
"fatal error", tgtt->name);
} else {
PRINT_ERROR("Target driver %s xmit_response() returned "
"invalid value %d", tgtt->name, rc);
}
break;
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_FINISHED;
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
out:
/* Caution: cmd can be already dead here */
TRACE_EXIT_HRES(res);
return res;
out_error:
if (rc == SCST_TGT_RES_FATAL_ERROR) {
PRINT_ERROR("Target driver %s xmit_response() returned "
"fatal error", tgtt->name);
} else {
PRINT_ERROR("Target driver %s xmit_response() returned "
"invalid value %d", tgtt->name, rc);
}
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_FINISHED;
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
static void scst_find_free_slot(struct scst_order_data *order_data)