- Implemented abort on timeout of stuck in tgt hardware commands

- Cleanups



git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@979 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2009-07-27 16:59:49 +00:00
parent db5fbe625c
commit eb144f2e6f
6 changed files with 345 additions and 106 deletions

View File

@@ -29,10 +29,6 @@ To be done
- Investigate possible missed emulated UA cases.
- More target drivers errors handling (timeouts for commands, sent
to target drivers, e.g. via xmit_response(), etc.) with some kind
of error recovery.
- Additional ability for target drivers to ask for command's retry also
after xmit_response() and rdy_to_xfer() returned (for example, if a
command was successfully sent to the target card, but later it was

View File

@@ -398,6 +398,13 @@ enum scst_exec_context {
/* Set if session is shutting down */
#define SCST_SESS_SPH_SHUTDOWN 1
/*************************************************************
** Session's async (atomic) flags
*************************************************************/
/* Set if the sess's hw pending work is scheduled */
#define SCST_SESS_HW_PENDING_WORK_SCHEDULED 0
/*************************************************************
** Cmd's async (atomic) flags
*************************************************************/
@@ -565,6 +572,16 @@ struct scst_tgt_template {
/* True, if the template doesn't need the entry in /proc */
unsigned no_proc_entry:1;
/*
* The maximum time in seconds cmd can stay inside the target
* hardware, i.e. after rdy_to_xfer() and xmit_response(), before
* on_hw_pending_cmd_timeout() will be called, if defined.
*
* In the current implementation a cmd will be aborted in time t
* max_hw_pending_time <= t < 2*max_hw_pending_time.
*/
int max_hw_pending_time;
/*
* This function is equivalent to the SCSI
* queuecommand. The target should transmit the response
@@ -606,6 +623,16 @@ struct scst_tgt_template {
*/
int (*rdy_to_xfer) (struct scst_cmd *cmd);
/*
* Called if cmd stays inside the target hardware, i.e. after
* rdy_to_xfer() and xmit_response(), more than max_hw_pending_time
* time. The target driver supposed to cleanup this command and
* resume cmd's processing.
*
* OPTIONAL
*/
void (*on_hw_pending_cmd_timeout) (struct scst_cmd *cmd);
/*
* Called to notify the driver that the command is about to be freed.
* Necessary, because for aborted commands xmit_response() could not
@@ -964,16 +991,17 @@ struct scst_tgt {
void *tgt_priv;
/*
* The following fields used to store and retry cmds if
* target's internal queue is full, so the target is unable to accept
* the cmd returning QUEUE FULL
* The following fields used to store and retry cmds if target's
* internal queue is full, so the target is unable to accept
* the cmd returning QUEUE FULL.
* They protected by tgt_lock, where necessary.
*/
bool retry_timer_active;
struct timer_list retry_timer;
atomic_t finished_cmds;
int retry_cmds; /* protected by tgt_lock */
int retry_cmds;
spinlock_t tgt_lock;
struct list_head retry_cmd_list; /* protected by tgt_lock */
struct list_head retry_cmd_list;
/* Used to wait until session finished to unregister */
wait_queue_head_t unreg_waitQ;
@@ -987,7 +1015,7 @@ struct scst_tgt {
/* Hash size and hash fn for hash based lun translation */
#define TGT_DEV_HASH_SHIFT 5
#define TGT_DEV_HASH_SIZE (1<<TGT_DEV_HASH_SHIFT)
#define TGT_DEV_HASH_SIZE (1 << TGT_DEV_HASH_SHIFT)
#define HASH_VAL(_val) (_val & (TGT_DEV_HASH_SIZE - 1))
struct scst_session {
@@ -1002,6 +1030,8 @@ struct scst_session {
/* Used for storage of target driver private stuff */
void *tgt_priv;
unsigned long sess_aflags; /* session's async flags */
/*
* Hash list of tgt_dev's for this session, protected by scst_mutex
* and suspended activity
@@ -1014,6 +1044,15 @@ struct scst_session {
*/
struct list_head search_cmd_list;
spinlock_t sess_list_lock; /* protects search_cmd_list, etc */
/*
* List of cmds in this in the state after PRE_XMIT_RESP. All the cmds
* moved here from search_cmd_list. Needed for hw_pending_work.
* Protected by sess_list_lock.
*/
struct list_head after_pre_xmit_cmd_list;
atomic_t refcnt; /* get/put counter */
/*
@@ -1022,14 +1061,18 @@ struct scst_session {
*/
atomic_t sess_cmd_count;
spinlock_t sess_list_lock; /* protects search_cmd_list, etc */
/* Access control for this session and list entry there */
struct scst_acg *acg;
/* List entry for the sessions list inside ACG */
struct list_head acg_sess_list_entry;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
struct delayed_work hw_pending_work;
#else
struct work_struct hw_pending_work;
#endif
/* Name of attached initiator */
const char *initiator_name;
@@ -1045,7 +1088,7 @@ struct scst_session {
struct list_head sess_shut_list_entry;
/*
* Lists of deffered during session initialization commands.
* Lists of deferred during session initialization commands.
* Protected by sess_list_lock.
*/
struct list_head init_deferred_cmd_list;
@@ -1138,6 +1181,9 @@ struct scst_cmd {
/* Set if scst_dec_on_dev_cmd() call is needed on the cmd's finish */
unsigned int dec_on_dev_needed:1;
/* Set if cmd is queued as hw pending */
unsigned int cmd_hw_pending:1;
/*
* Set if the target driver wants to alloc data buffers on its own.
* In this case alloc_data_buf() must be provided in the target driver
@@ -1157,18 +1203,6 @@ struct scst_cmd {
/* Set if the target driver called scst_set_expected() */
unsigned int expected_values_set:1;
/*
* Set if the cmd was delayed by task management debugging code.
* Used only if CONFIG_SCST_DEBUG_TM is on.
*/
unsigned int tm_dbg_delayed:1;
/*
* Set if the cmd must be ignored by task management debugging code.
* Used only if CONFIG_SCST_DEBUG_TM is on.
*/
unsigned int tm_dbg_immut:1;
/*
* Set if the SG buffer was modified by scst_set_resp_data_len()
*/
@@ -1217,6 +1251,18 @@ struct scst_cmd {
/* Set if cmd is finished */
unsigned int finished:1;
/*
* Set if the cmd was delayed by task management debugging code.
* Used only if CONFIG_SCST_DEBUG_TM is on.
*/
unsigned int tm_dbg_delayed:1;
/*
* Set if the cmd must be ignored by task management debugging code.
* Used only if CONFIG_SCST_DEBUG_TM is on.
*/
unsigned int tm_dbg_immut:1;
/**************************************************************/
unsigned long cmd_flags; /* cmd's async flags */
@@ -1247,8 +1293,8 @@ struct scst_cmd {
/* The corresponding sn_slot in tgt_dev->sn_slots */
atomic_t *sn_slot;
/* List entry for session's search_cmd_list */
struct list_head search_cmd_list_entry;
/* List entry for sess's search_cmd_list and after_pre_xmit_cmd_list */
struct list_head sess_cmd_list_entry;
/*
* Used to found the cmd by scst_find_cmd_by_tag(). Set by the
@@ -1337,6 +1383,9 @@ struct scst_cmd {
uint8_t *sense; /* pointer to sense buffer */
unsigned short sense_bufflen; /* length of the sense buffer, if any */
/* Start time when cmd was sent to rdy_to_xfer() or xmit_response() */
unsigned long hw_pending_start;
/* Used for storage of target driver private stuff */
void *tgt_priv;
@@ -2820,6 +2869,21 @@ static inline int scst_get_in_buf_count(struct scst_cmd *cmd)
return (cmd->in_sg_cnt == 0) ? 1 : cmd->in_sg_cnt;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
static inline int cancel_delayed_work_sync(struct delayed_work *work)
#else
static inline int cancel_delayed_work_sync(struct work_struct *work)
#endif
{
int res;
res = cancel_delayed_work(work);
flush_scheduled_work();
return res;
}
#endif
/*
* Suspends and resumes any activity.
* Function scst_suspend_activity() doesn't return 0, until there are any

View File

@@ -809,6 +809,174 @@ out:
}
EXPORT_SYMBOL(scst_set_resp_data_len);
/* No locks */
int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
{
struct scst_tgt *tgt = cmd->tgt;
int res = 0;
unsigned long flags;
TRACE_ENTRY();
spin_lock_irqsave(&tgt->tgt_lock, flags);
tgt->retry_cmds++;
/*
* Memory barrier is needed here, because we need the exact order
* between the read and write between retry_cmds and finished_cmds to
* not miss the case when a command finished while we queuing it for
* retry after the finished_cmds check.
*/
smp_mb();
TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
tgt->retry_cmds);
if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
/* At least one cmd finished, so try again */
tgt->retry_cmds--;
TRACE_RETRY("Some command(s) finished, direct retry "
"(finished_cmds=%d, tgt->finished_cmds=%d, "
"retry_cmds=%d)", finished_cmds,
atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
res = -1;
goto out_unlock_tgt;
}
TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
if (!tgt->retry_timer_active) {
tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
add_timer(&tgt->retry_timer);
tgt->retry_timer_active = 1;
}
out_unlock_tgt:
spin_unlock_irqrestore(&tgt->tgt_lock, flags);
TRACE_EXIT_RES(res);
return res;
}
/* Returns 0 to continue, >0 to restart, <0 to break */
static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
unsigned long cur_time, unsigned long max_time,
struct scst_session *sess, unsigned long *flags,
struct scst_tgt_template *tgtt)
{
int res = -1; /* break */
TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
"pending time %ld", cmd, cmd->cmd_hw_pending,
(long)(cur_time - cmd->start_time) / HZ,
(long)(cur_time - cmd->hw_pending_start) / HZ);
if (time_before_eq(cur_time, cmd->start_time + max_time)) {
/* Cmds are ordered, so no need to check more */
goto out;
}
if (!cmd->cmd_hw_pending) {
res = 0; /* continue */
goto out;
}
if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
/* Cmds are ordered, so no need to check more */
goto out;
}
TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
cmd, (cur_time - cmd->hw_pending_start) / HZ,
cmd->state);
cmd->cmd_hw_pending = 0;
spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
tgtt->on_hw_pending_cmd_timeout(cmd);
spin_lock_irqsave(&sess->sess_list_lock, *flags);
res = 1; /* restart */
out:
TRACE_EXIT_RES(res);
return res;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void scst_hw_pending_work_fn(void *p)
#else
static void scst_hw_pending_work_fn(struct delayed_work *work)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
struct scst_session *sess = (struct scst_session *)p;
#else
struct scst_session *sess = container_of(work, struct scst_session,
hw_pending_work);
#endif
struct scst_tgt_template *tgtt = sess->tgt->tgtt;
struct scst_cmd *cmd;
unsigned long cur_time = jiffies;
unsigned long flags;
unsigned long max_time = tgtt->max_hw_pending_time * HZ;
TRACE_ENTRY();
TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
spin_lock_irqsave(&sess->sess_list_lock, flags);
restart:
list_for_each_entry(cmd, &sess->search_cmd_list,
sess_cmd_list_entry) {
int rc;
rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
&flags, tgtt);
if (rc < 0)
break;
else if (rc == 0)
continue;
else
goto restart;
}
restart1:
list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
sess_cmd_list_entry) {
int rc;
rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
&flags, tgtt);
if (rc < 0)
break;
else if (rc == 0)
continue;
else
goto restart1;
}
if (!list_empty(&sess->search_cmd_list) ||
!list_empty(&sess->after_pre_xmit_cmd_list)) {
/*
* For stuck cmds if there is no activity we might need to have
* one more run to release them, so reschedule once again.
*/
TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
sess, tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
spin_unlock_irqrestore(&sess->sess_list_lock, flags);
TRACE_EXIT();
return;
}
/* Called under scst_mutex and suspended activity */
int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
{
@@ -1860,9 +2028,16 @@ struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
}
spin_lock_init(&sess->sess_list_lock);
INIT_LIST_HEAD(&sess->search_cmd_list);
INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
sess->tgt = tgt;
INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
INIT_DELAYED_WORK(&sess->hw_pending_work,
(void (*)(struct work_struct *))scst_hw_pending_work_fn);
#else
INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
#endif
#ifdef CONFIG_SCST_MEASURE_LATENCY
spin_lock_init(&sess->meas_lock);
@@ -1920,6 +2095,8 @@ void scst_free_session_callback(struct scst_session *sess)
TRACE_DBG("Freeing session %p", sess);
cancel_delayed_work_sync(&sess->hw_pending_work);
c = sess->shutdown_compl;
if (sess->unreg_done_fn) {
@@ -4054,7 +4231,7 @@ void scst_process_reset(struct scst_device *dev,
TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
list_for_each_entry(cmd, &sess->search_cmd_list,
search_cmd_list_entry) {
sess_cmd_list_entry) {
if (cmd == exclude_cmd)
continue;
if ((cmd->tgt_dev == tgt_dev) ||

View File

@@ -1410,7 +1410,7 @@ void sgv_pool_deinit(struct sgv_pool *pool)
TRACE_ENTRY();
cancel_delayed_work(&pool->sgv_purge_work);
cancel_delayed_work_sync(&pool->sgv_purge_work);
sgv_pool_flush(pool);

View File

@@ -284,6 +284,8 @@ int scst_global_mgmt_thread(void *arg);
int scst_add_dev_threads(struct scst_device *dev, int num);
void scst_del_dev_threads(struct scst_device *dev, int num);
int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds);
int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev);
void scst_free_device(struct scst_device *dev);

View File

@@ -236,8 +236,8 @@ void scst_cmd_init_done(struct scst_cmd *cmd,
* TM processing. This check is needed because there might be
* old, i.e. deferred, commands and new, i.e. just coming, ones.
*/
if (cmd->search_cmd_list_entry.next == NULL)
list_add_tail(&cmd->search_cmd_list_entry,
if (cmd->sess_cmd_list_entry.next == NULL)
list_add_tail(&cmd->sess_cmd_list_entry,
&sess->search_cmd_list);
switch (sess->init_phase) {
case SCST_SESS_IPH_SUCCESS:
@@ -258,7 +258,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd,
sBUG();
}
} else
list_add_tail(&cmd->search_cmd_list_entry,
list_add_tail(&cmd->sess_cmd_list_entry,
&sess->search_cmd_list);
spin_unlock_irqrestore(&sess->sess_list_lock, flags);
@@ -889,56 +889,10 @@ void scst_restart_cmd(struct scst_cmd *cmd, int status,
}
EXPORT_SYMBOL(scst_restart_cmd);
/* No locks */
static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
{
struct scst_tgt *tgt = cmd->sess->tgt;
int res = 0;
unsigned long flags;
TRACE_ENTRY();
spin_lock_irqsave(&tgt->tgt_lock, flags);
tgt->retry_cmds++;
/*
* Memory barrier is needed here, because we need the exact order
* between the read and write between retry_cmds and finished_cmds to
* not miss the case when a command finished while we queuing it for
* retry after the finished_cmds check.
*/
smp_mb();
TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
tgt->retry_cmds);
if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
/* At least one cmd finished, so try again */
tgt->retry_cmds--;
TRACE_RETRY("Some command(s) finished, direct retry "
"(finished_cmds=%d, tgt->finished_cmds=%d, "
"retry_cmds=%d)", finished_cmds,
atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
res = -1;
goto out_unlock_tgt;
}
TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
if (!tgt->retry_timer_active) {
tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
add_timer(&tgt->retry_timer);
tgt->retry_timer_active = 1;
}
out_unlock_tgt:
spin_unlock_irqrestore(&tgt->tgt_lock, flags);
TRACE_EXIT_RES(res);
return res;
}
static int scst_rdy_to_xfer(struct scst_cmd *cmd)
{
int res, rc;
struct scst_tgt_template *tgtt = cmd->tgtt;
TRACE_ENTRY();
@@ -947,41 +901,58 @@ static int scst_rdy_to_xfer(struct scst_cmd *cmd)
goto out_dev_done;
}
if ((cmd->tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
if (unlikely(!cmd->tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
/*
* It shouldn't be because of SCST_TGT_DEV_AFTER_*
* optimization.
*/
TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
"context, rescheduling", cmd->tgtt->name);
"context, rescheduling", tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
}
while (1) {
int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_DATA_WAIT;
if (tgtt->on_hw_pending_cmd_timeout != NULL) {
struct scst_session *sess = cmd->sess;
cmd->hw_pending_start = jiffies;
cmd->cmd_hw_pending = 1;
if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
TRACE_DBG("Sched HW pending work for sess %p "
"(max time %d)", sess,
tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
&sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
}
TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
#ifdef CONFIG_SCST_DEBUG_RETRY
if (((scst_random() % 100) == 75))
rc = SCST_TGT_RES_QUEUE_FULL;
else
#endif
rc = cmd->tgtt->rdy_to_xfer(cmd);
rc = tgtt->rdy_to_xfer(cmd);
TRACE_DBG("rdy_to_xfer() returned %d", rc);
if (likely(rc == SCST_TGT_RES_SUCCESS))
goto out;
cmd->cmd_hw_pending = 0;
/* Restore the previous state */
cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
@@ -995,7 +966,7 @@ static int scst_rdy_to_xfer(struct scst_cmd *cmd)
case SCST_TGT_RES_NEED_THREAD_CTX:
TRACE_DBG("Target driver %s "
"rdy_to_xfer() requested thread "
"context, rescheduling", cmd->tgtt->name);
"context, rescheduling", tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
break;
@@ -1012,10 +983,10 @@ out:
out_error_rc:
if (rc == SCST_TGT_RES_FATAL_ERROR) {
PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
"fatal error", cmd->tgtt->name);
"fatal error", tgtt->name);
} else {
PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
"value %d", cmd->tgtt->name, rc);
"value %d", tgtt->name, rc);
}
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
@@ -1029,6 +1000,7 @@ out_dev_done:
static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
enum scst_exec_context context, int check_retries)
{
struct scst_tgt *tgt = cmd->tgt;
unsigned long flags;
TRACE_ENTRY();
@@ -1046,7 +1018,7 @@ static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
case SCST_CONTEXT_DIRECT:
if (check_retries)
scst_check_retries(cmd->tgt);
scst_check_retries(tgt);
scst_process_active_cmd(cmd, false);
break;
@@ -1056,7 +1028,7 @@ static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
/* go through */
case SCST_CONTEXT_THREAD:
if (check_retries)
scst_check_retries(cmd->tgt);
scst_check_retries(tgt);
spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
TRACE_DBG("Adding cmd %p to active cmd list", cmd);
if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
@@ -1071,7 +1043,7 @@ static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
case SCST_CONTEXT_TASKLET:
if (check_retries)
scst_check_retries(cmd->tgt);
scst_check_retries(tgt);
scst_schedule_tasklet(cmd);
break;
}
@@ -1088,6 +1060,8 @@ void scst_rx_data(struct scst_cmd *cmd, int status,
TRACE_DBG("Preferred context: %d", pref_context);
TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
cmd->cmd_hw_pending = 0;
#ifdef CONFIG_SCST_EXTRACHECKS
if ((in_irq() || irqs_disabled()) &&
((pref_context == SCST_CONTEXT_DIRECT) ||
@@ -2885,6 +2859,7 @@ out:
static int scst_pre_xmit_response(struct scst_cmd *cmd)
{
int res;
struct scst_session *sess = cmd->sess;
TRACE_ENTRY();
@@ -2931,9 +2906,10 @@ static int scst_pre_xmit_response(struct scst_cmd *cmd)
* initiator sends cmd with the same tag => it is possible that
* a wrong cmd will be found by find() functions.
*/
spin_lock_irq(&cmd->sess->sess_list_lock);
list_del(&cmd->search_cmd_list_entry);
spin_unlock_irq(&cmd->sess->sess_list_lock);
spin_lock_irq(&sess->sess_list_lock);
list_move_tail(&cmd->sess_cmd_list_entry,
&sess->after_pre_xmit_cmd_list);
spin_unlock_irq(&sess->sess_list_lock);
cmd->done = 1;
smp_mb(); /* to sync with scst_abort_cmd() */
@@ -2964,7 +2940,6 @@ out:
{
struct timespec ts;
uint64_t finish, scst_time, proc_time;
struct scst_session *sess = cmd->sess;
getnstimeofday(&ts);
finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
@@ -2993,26 +2968,27 @@ out:
static int scst_xmit_response(struct scst_cmd *cmd)
{
struct scst_tgt_template *tgtt = cmd->tgtt;
int res, rc;
TRACE_ENTRY();
EXTRACHECKS_BUG_ON(cmd->internal);
if (unlikely(!cmd->tgtt->xmit_response_atomic &&
if (unlikely(!tgtt->xmit_response_atomic &&
scst_cmd_atomic(cmd))) {
/*
* It shouldn't be because of SCST_TGT_DEV_AFTER_*
* optimization.
*/
TRACE_DBG("Target driver %s xmit_response() needs thread "
"context, rescheduling", cmd->tgtt->name);
"context, rescheduling", tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
}
while (1) {
int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_XMIT_WAIT;
@@ -3041,17 +3017,34 @@ static int scst_xmit_response(struct scst_cmd *cmd)
}
#endif
if (tgtt->on_hw_pending_cmd_timeout != NULL) {
struct scst_session *sess = cmd->sess;
cmd->hw_pending_start = jiffies;
cmd->cmd_hw_pending = 1;
if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
TRACE_DBG("Sched HW pending work for sess %p "
"(max time %d)", sess,
tgtt->max_hw_pending_time);
set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
&sess->sess_aflags);
schedule_delayed_work(&sess->hw_pending_work,
tgtt->max_hw_pending_time * HZ);
}
}
#ifdef CONFIG_SCST_DEBUG_RETRY
if (((scst_random() % 100) == 77))
rc = SCST_TGT_RES_QUEUE_FULL;
else
#endif
rc = cmd->tgtt->xmit_response(cmd);
rc = tgtt->xmit_response(cmd);
TRACE_DBG("xmit_response() returned %d", rc);
if (likely(rc == SCST_TGT_RES_SUCCESS))
goto out;
cmd->cmd_hw_pending = 0;
/* Restore the previous state */
cmd->state = SCST_CMD_STATE_XMIT_RESP;
@@ -3065,7 +3058,7 @@ static int scst_xmit_response(struct scst_cmd *cmd)
case SCST_TGT_RES_NEED_THREAD_CTX:
TRACE_DBG("Target driver %s xmit_response() "
"requested thread context, rescheduling",
cmd->tgtt->name);
tgtt->name);
res = SCST_CMD_STATE_RES_NEED_THREAD;
break;
@@ -3083,10 +3076,10 @@ out:
out_error:
if (rc == SCST_TGT_RES_FATAL_ERROR) {
PRINT_ERROR("Target driver %s xmit_response() returned "
"fatal error", cmd->tgtt->name);
"fatal error", tgtt->name);
} else {
PRINT_ERROR("Target driver %s xmit_response() returned "
"invalid value %d", cmd->tgtt->name, rc);
"invalid value %d", tgtt->name, rc);
}
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_FINISHED;
@@ -3101,6 +3094,8 @@ void scst_tgt_cmd_done(struct scst_cmd *cmd,
sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
cmd->cmd_hw_pending = 0;
cmd->state = SCST_CMD_STATE_FINISHED;
scst_proccess_redirect_cmd(cmd, pref_context, 1);
@@ -3112,10 +3107,15 @@ EXPORT_SYMBOL(scst_tgt_cmd_done);
static int scst_finish_cmd(struct scst_cmd *cmd)
{
int res;
struct scst_session *sess = cmd->sess;
TRACE_ENTRY();
atomic_dec(&cmd->sess->sess_cmd_count);
atomic_dec(&sess->sess_cmd_count);
spin_lock_irq(&sess->sess_list_lock);
list_del(&cmd->sess_cmd_list_entry);
spin_unlock_irq(&sess->sess_list_lock);
cmd->finished = 1;
smp_mb(); /* to sync with scst_abort_cmd() */
@@ -4274,7 +4274,7 @@ static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
list_for_each_entry(cmd, &sess->search_cmd_list,
search_cmd_list_entry) {
sess_cmd_list_entry) {
if ((cmd->tgt_dev == tgt_dev) ||
((cmd->tgt_dev == NULL) &&
(cmd->lun == tgt_dev->lun))) {
@@ -4374,7 +4374,7 @@ static int scst_clear_task_set(struct scst_mgmt_cmd *mcmd)
TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
list_for_each_entry(cmd, &sess->search_cmd_list,
search_cmd_list_entry) {
sess_cmd_list_entry) {
if ((cmd->dev == dev) ||
((cmd->dev == NULL) &&
scst_is_cmd_belongs_to_dev(cmd, dev))) {
@@ -5678,7 +5678,7 @@ static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
TRACE_DBG("%s (sess=%p, tag=%llu)", "Searching in search cmd list",
sess, (long long unsigned int)tag);
list_for_each_entry(cmd, &sess->search_cmd_list,
search_cmd_list_entry) {
sess_cmd_list_entry) {
if (cmd->tag == tag)
goto out;
}
@@ -5704,7 +5704,7 @@ struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
list_for_each_entry(cmd, &sess->search_cmd_list,
search_cmd_list_entry) {
sess_cmd_list_entry) {
if (cmp_fn(cmd, data))
goto out_unlock;
}