mirror of
https://github.com/SCST-project/scst.git
synced 2026-05-17 18:51:27 +00:00
Simplify, fix and improve commands ordering
git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@4972 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
@@ -533,6 +533,11 @@ enum scst_exec_context {
|
||||
*/
|
||||
#define SCST_CMD_DEVICE_TAS 4
|
||||
|
||||
#ifdef CONFIG_SCST_EXTRACHECKS
|
||||
/* Set if scst_inc_expected_sn() passed for this cmd */
|
||||
#define SCST_CMD_INC_EXPECTED_SN_PASSED 10
|
||||
#endif
|
||||
|
||||
/*************************************************************
|
||||
** Tgt_dev's async. flags (tgt_dev_flags)
|
||||
*************************************************************/
|
||||
@@ -1819,25 +1824,25 @@ struct scst_cmd_threads {
|
||||
* Used to execute cmd's in order of arrival, honoring SCSI task attributes
|
||||
*/
|
||||
struct scst_order_data {
|
||||
spinlock_t sn_lock;
|
||||
/*
|
||||
* Protected by sn_lock, except expected_sn, which is protected by
|
||||
* itself. Curr_sn must have the same size as expected_sn to
|
||||
* overflow simultaneously.
|
||||
* All fields, when needed, protected by sn_lock. Curr_sn must have
|
||||
* the same type as expected_sn to overflow simultaneously!
|
||||
*/
|
||||
|
||||
struct list_head skipped_sn_list;
|
||||
struct list_head deferred_cmd_list;
|
||||
|
||||
spinlock_t sn_lock;
|
||||
|
||||
int hq_cmd_count;
|
||||
|
||||
/* Set if the prev cmd was ORDERED */
|
||||
bool prev_cmd_ordered;
|
||||
|
||||
int def_cmd_count;
|
||||
unsigned int expected_sn;
|
||||
unsigned int curr_sn;
|
||||
atomic_t sn_cmd_count;
|
||||
int hq_cmd_count;
|
||||
struct list_head deferred_cmd_list;
|
||||
struct list_head skipped_sn_list;
|
||||
|
||||
/*
|
||||
* Set if the prev cmd was ORDERED. Size and, hence, alignment must
|
||||
* allow unprotected modifications independently to the neighbour fields.
|
||||
*/
|
||||
unsigned long prev_cmd_ordered;
|
||||
int pending_simple_inc_expected_sn;
|
||||
|
||||
atomic_t *cur_sn_slot;
|
||||
atomic_t sn_slots[15];
|
||||
@@ -1873,11 +1878,19 @@ struct scst_cmd {
|
||||
*************************************************************/
|
||||
|
||||
/*
|
||||
* Set if expected_sn should be incremented, i.e. cmd was sent
|
||||
* for execution
|
||||
* Set if cmd was sent for execution to optimize aborts waiting.
|
||||
* Also it is a sign under contract that if inc_expected_sn_on_done
|
||||
* is not set, the thread setting it is committing obligation to
|
||||
* call scst_inc_expected_sn() after this cmd was sent to exec.
|
||||
*/
|
||||
unsigned int sent_for_exec:1;
|
||||
|
||||
/* Set if cmd's SN was set */
|
||||
unsigned int sn_set:1;
|
||||
|
||||
/* Set if increment expected_sn in cmd->scst_cmd_done() */
|
||||
unsigned int inc_expected_sn_on_done:1;
|
||||
|
||||
/* Set if the cmd's action is completed */
|
||||
unsigned int completed:1;
|
||||
|
||||
@@ -1955,9 +1968,6 @@ struct scst_cmd {
|
||||
*/
|
||||
unsigned int preprocessing_only:1;
|
||||
|
||||
/* Set if cmd's SN was set */
|
||||
unsigned int sn_set:1;
|
||||
|
||||
/* Set if hq_cmd_count was incremented */
|
||||
unsigned int hq_cmd_inced:1;
|
||||
|
||||
@@ -1980,9 +1990,6 @@ struct scst_cmd {
|
||||
/* Set if the cmd was done or aborted out of its SN */
|
||||
unsigned int out_of_sn:1;
|
||||
|
||||
/* Set if increment expected_sn in cmd->scst_cmd_done() */
|
||||
unsigned int inc_expected_sn_on_done:1;
|
||||
|
||||
/* Set if tgt_sn field is valid */
|
||||
unsigned int tgt_sn_set:1;
|
||||
|
||||
|
||||
@@ -3339,7 +3339,6 @@ static void scst_init_order_data(struct scst_order_data *order_data)
|
||||
INIT_LIST_HEAD(&order_data->skipped_sn_list);
|
||||
order_data->curr_sn = (typeof(order_data->curr_sn))(-20);
|
||||
order_data->expected_sn = order_data->curr_sn;
|
||||
atomic_set(&order_data->sn_cmd_count, 0);
|
||||
order_data->cur_sn_slot = &order_data->sn_slots[0];
|
||||
for (i = 0; i < (int)ARRAY_SIZE(order_data->sn_slots); i++)
|
||||
atomic_set(&order_data->sn_slots[i], 0);
|
||||
@@ -5433,24 +5432,15 @@ void scst_free_cmd(struct scst_cmd *cmd)
|
||||
}
|
||||
|
||||
if (likely(cmd->tgt_dev != NULL)) {
|
||||
#ifdef CONFIG_SCST_EXTRACHECKS
|
||||
if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
|
||||
PRINT_ERROR("Finishing not executed cmd %p (opcode "
|
||||
"%d, target %s, LUN %lld, sn %d, expected_sn %d)",
|
||||
cmd, cmd->cdb[0], cmd->tgtt->name,
|
||||
(long long unsigned int)cmd->lun,
|
||||
cmd->sn, cmd->cur_order_data->expected_sn);
|
||||
scst_unblock_deferred(cmd->cur_order_data, cmd);
|
||||
}
|
||||
#endif
|
||||
|
||||
EXTRACHECKS_BUG_ON(!test_bit(SCST_CMD_INC_EXPECTED_SN_PASSED,
|
||||
&cmd->cmd_flags) && cmd->sn_set && !cmd->out_of_sn);
|
||||
if (unlikely(cmd->out_of_sn)) {
|
||||
destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
|
||||
&cmd->cmd_flags);
|
||||
TRACE_SN("Out of SN cmd %p (tag %llu, sn %d), "
|
||||
"destroy=%d", cmd,
|
||||
(long long unsigned int)cmd->tag,
|
||||
cmd->sn, destroy);
|
||||
destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
|
||||
&cmd->cmd_flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8025,16 +8015,21 @@ static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
|
||||
return;
|
||||
}
|
||||
|
||||
/* No locks */
|
||||
struct scst_cmd *__scst_check_deferred_commands(struct scst_order_data *order_data)
|
||||
/*
|
||||
* sn_lock supposed to be locked and IRQs off. Might drop then reacquire
|
||||
* it inside.
|
||||
*/
|
||||
struct scst_cmd *__scst_check_deferred_commands_locked(
|
||||
struct scst_order_data *order_data, bool return_first)
|
||||
{
|
||||
struct scst_cmd *res = NULL, *cmd, *t;
|
||||
typeof(order_data->expected_sn) expected_sn = order_data->expected_sn;
|
||||
bool activate = !return_first, first = true, found = false;
|
||||
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
TRACE_ENTRY();
|
||||
|
||||
if (unlikely(order_data->hq_cmd_count != 0))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
restart:
|
||||
list_for_each_entry_safe(cmd, t, &order_data->deferred_cmd_list,
|
||||
@@ -8042,34 +8037,42 @@ restart:
|
||||
EXTRACHECKS_BUG_ON((cmd->queue_type != SCST_CMD_QUEUE_SIMPLE) &&
|
||||
(cmd->queue_type != SCST_CMD_QUEUE_ORDERED));
|
||||
if (cmd->sn == expected_sn) {
|
||||
bool stop = (cmd->sn_slot == NULL);
|
||||
|
||||
TRACE_SN("Deferred command %p (sn %d, set %d) found",
|
||||
cmd, cmd->sn, cmd->sn_set);
|
||||
|
||||
order_data->def_cmd_count--;
|
||||
list_del(&cmd->sn_cmd_list_entry);
|
||||
if (res == NULL) {
|
||||
res = cmd;
|
||||
if ((cmd->sn_slot == NULL) && !cmd->done) {
|
||||
/*
|
||||
* Then there can be only one command
|
||||
* with this SN, so there's no point
|
||||
* to iterate further.
|
||||
*/
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
|
||||
if (activate) {
|
||||
spin_lock(&cmd->cmd_threads->cmd_list_lock);
|
||||
TRACE_SN("Adding cmd %p to active cmd list",
|
||||
cmd);
|
||||
TRACE_SN("Adding cmd %p to active cmd list", cmd);
|
||||
list_add_tail(&cmd->cmd_list_entry,
|
||||
&cmd->cmd_threads->active_cmd_list);
|
||||
wake_up(&cmd->cmd_threads->cmd_list_waitQ);
|
||||
spin_unlock(&cmd->cmd_threads->cmd_list_lock);
|
||||
/* !! At this point cmd can be already dead !! */
|
||||
}
|
||||
if (first) {
|
||||
if (!activate)
|
||||
res = cmd;
|
||||
if (stop) {
|
||||
/*
|
||||
* Then there can be only one command
|
||||
* with this SN, so there's no point
|
||||
* to iterate further.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
first = false;
|
||||
activate = true;
|
||||
}
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if (res != NULL)
|
||||
goto out_unlock;
|
||||
if (found)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(cmd, &order_data->skipped_sn_list,
|
||||
sn_cmd_list_entry) {
|
||||
@@ -8095,19 +8098,42 @@ restart:
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(&order_data->sn_lock);
|
||||
out:
|
||||
TRACE_EXIT_HRES((unsigned long)res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static void __scst_unblock_deferred(struct scst_order_data *order_data,
|
||||
/* No locks */
|
||||
struct scst_cmd *__scst_check_deferred_commands(
|
||||
struct scst_order_data *order_data, bool return_first)
|
||||
{
|
||||
struct scst_cmd *res;
|
||||
|
||||
TRACE_ENTRY();
|
||||
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
res = __scst_check_deferred_commands_locked(order_data, return_first);
|
||||
spin_unlock_irq(&order_data->sn_lock);
|
||||
|
||||
TRACE_EXIT_HRES((unsigned long)res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void scst_unblock_deferred(struct scst_order_data *order_data,
|
||||
struct scst_cmd *out_of_sn_cmd)
|
||||
{
|
||||
EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
|
||||
TRACE_ENTRY();
|
||||
|
||||
if (out_of_sn_cmd->sn == order_data->expected_sn)
|
||||
if (!out_of_sn_cmd->sn_set) {
|
||||
TRACE_SN("cmd %p without sn", out_of_sn_cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (out_of_sn_cmd->sn == order_data->expected_sn) {
|
||||
TRACE_SN("out of sn cmd %p (expected sn %d)",
|
||||
out_of_sn_cmd, order_data->expected_sn);
|
||||
scst_inc_expected_sn(out_of_sn_cmd);
|
||||
else {
|
||||
} else {
|
||||
out_of_sn_cmd->out_of_sn = 1;
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
order_data->def_cmd_count++;
|
||||
@@ -8125,21 +8151,6 @@ static void __scst_unblock_deferred(struct scst_order_data *order_data,
|
||||
|
||||
scst_make_deferred_commands_active(order_data);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void scst_unblock_deferred(struct scst_order_data *order_data,
|
||||
struct scst_cmd *out_of_sn_cmd)
|
||||
{
|
||||
TRACE_ENTRY();
|
||||
|
||||
if (!out_of_sn_cmd->sn_set) {
|
||||
TRACE_SN("cmd %p without sn", out_of_sn_cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
__scst_unblock_deferred(order_data, out_of_sn_cmd);
|
||||
|
||||
out:
|
||||
TRACE_EXIT();
|
||||
return;
|
||||
@@ -9435,42 +9446,25 @@ int tm_dbg_is_release(void)
|
||||
#ifdef CONFIG_SCST_DEBUG_SN
|
||||
void scst_check_debug_sn(struct scst_cmd *cmd)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
static int type;
|
||||
static int cnt;
|
||||
unsigned long flags;
|
||||
int old = cmd->queue_type;
|
||||
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
|
||||
if (cnt == 0) {
|
||||
if ((scst_random() % 1000) == 500) {
|
||||
if ((scst_random() % 3) == 1)
|
||||
type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
|
||||
else
|
||||
type = SCST_CMD_QUEUE_ORDERED;
|
||||
do {
|
||||
cnt = scst_random() % 10;
|
||||
} while (cnt == 0);
|
||||
} else
|
||||
goto out_unlock;
|
||||
/* To simulate from time to time queue flushing */
|
||||
if (!in_interrupt() && (scst_random() % 120) == 8) {
|
||||
int t = scst_random() % 1200;
|
||||
TRACE_SN("Delaying IO on %d ms", t);
|
||||
msleep(t);
|
||||
}
|
||||
|
||||
cmd->queue_type = type;
|
||||
cnt--;
|
||||
|
||||
if (((scst_random() % 1000) == 750))
|
||||
if ((scst_random() % 15) == 7)
|
||||
cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
|
||||
else if (((scst_random() % 1000) == 751))
|
||||
else if ((scst_random() % 1000) == 751)
|
||||
cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
|
||||
else if (((scst_random() % 1000) == 752))
|
||||
else if ((scst_random() % 1000) == 752)
|
||||
cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
|
||||
|
||||
TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
|
||||
cmd->queue_type, cnt);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
if (old != cmd->queue_type)
|
||||
TRACE_SN("DbgSN queue type changed for cmd %p from %d to %d",
|
||||
cmd, old, cmd->queue_type);
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_SCST_DEBUG_SN */
|
||||
|
||||
@@ -57,6 +57,7 @@ extern unsigned long scst_trace_flag;
|
||||
|
||||
#define TRACE_RETRY(args...) TRACE_DBG_FLAG(TRACE_RTRY, args)
|
||||
#define TRACE_SN(args...) TRACE_DBG_FLAG(TRACE_SCSI_SERIALIZING, args)
|
||||
#define TRACE_SN_SPECIAL(args...) TRACE_DBG_FLAG(TRACE_SCSI_SERIALIZING|TRACE_SPECIAL, args)
|
||||
|
||||
#else /* CONFIG_SCST_DEBUG */
|
||||
|
||||
@@ -69,6 +70,7 @@ extern unsigned long scst_trace_flag;
|
||||
|
||||
#define TRACE_RETRY(args...)
|
||||
#define TRACE_SN(args...)
|
||||
#define TRACE_SN_SPECIAL(args...)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -252,38 +254,41 @@ extern void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev);
|
||||
|
||||
extern struct scst_dev_type scst_null_devtype;
|
||||
|
||||
extern struct scst_cmd *__scst_check_deferred_commands_locked(
|
||||
struct scst_order_data *order_data, bool return_first);
|
||||
extern struct scst_cmd *__scst_check_deferred_commands(
|
||||
struct scst_order_data *order_data);
|
||||
struct scst_order_data *order_data, bool return_first);
|
||||
|
||||
/* Used to save the function call on the fast path */
|
||||
static inline struct scst_cmd *scst_check_deferred_commands(
|
||||
struct scst_order_data *order_data)
|
||||
struct scst_order_data *order_data, bool return_first)
|
||||
{
|
||||
if (order_data->def_cmd_count == 0)
|
||||
return NULL;
|
||||
else
|
||||
return __scst_check_deferred_commands(order_data);
|
||||
return __scst_check_deferred_commands(order_data, return_first);
|
||||
}
|
||||
|
||||
static inline void scst_make_deferred_commands_active(
|
||||
struct scst_order_data *order_data)
|
||||
{
|
||||
struct scst_cmd *c;
|
||||
|
||||
c = scst_check_deferred_commands(order_data);
|
||||
if (c != NULL) {
|
||||
TRACE_SN("Adding cmd %p to active cmd list", c);
|
||||
spin_lock_irq(&c->cmd_threads->cmd_list_lock);
|
||||
list_add_tail(&c->cmd_list_entry,
|
||||
&c->cmd_threads->active_cmd_list);
|
||||
wake_up(&c->cmd_threads->cmd_list_waitQ);
|
||||
spin_unlock_irq(&c->cmd_threads->cmd_list_lock);
|
||||
}
|
||||
|
||||
scst_check_deferred_commands(order_data, false);
|
||||
return;
|
||||
}
|
||||
|
||||
bool scst_inc_expected_sn(struct scst_cmd *cmd);
|
||||
/*
|
||||
* sn_lock supposed to be locked and IRQs off. Might drop then reacquire
|
||||
* it inside.
|
||||
*/
|
||||
static inline void scst_make_deferred_commands_active_locked(
|
||||
struct scst_order_data *order_data)
|
||||
{
|
||||
if (order_data->def_cmd_count != 0)
|
||||
__scst_check_deferred_commands_locked(order_data, false);
|
||||
return;
|
||||
}
|
||||
|
||||
bool scst_inc_expected_sn(const struct scst_cmd const * const cmd);
|
||||
int scst_check_hq_cmd(struct scst_cmd *cmd);
|
||||
|
||||
void scst_unblock_deferred(struct scst_order_data *order_data,
|
||||
|
||||
@@ -2537,8 +2537,17 @@ out_uncomplete:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__scst_check_local_events);
|
||||
|
||||
/* No locks. Returns true, if expected_sn was incremented. */
|
||||
bool scst_inc_expected_sn(struct scst_cmd *cmd)
|
||||
/*
|
||||
* No locks. Returns true, if expected_sn was incremented.
|
||||
*
|
||||
* !! At this point cmd can be processed in parallel by some other thread!
|
||||
* !! As consecuence, no pointer in cmd, except cur_order_data and
|
||||
* !! sn_slot, can be touched here! The same is for aasignments to cmd's
|
||||
* !! fields. As protection cmd declared as const.
|
||||
*
|
||||
* Overall, cmd is passed here only for extra correctness checking.
|
||||
*/
|
||||
bool scst_inc_expected_sn(const struct scst_cmd *cmd)
|
||||
{
|
||||
bool res = false;
|
||||
struct scst_order_data *order_data = cmd->cur_order_data;
|
||||
@@ -2546,27 +2555,17 @@ bool scst_inc_expected_sn(struct scst_cmd *cmd)
|
||||
|
||||
TRACE_ENTRY();
|
||||
|
||||
/*
|
||||
* !! At this point any pointer in cmd, except cur_order_data !!
|
||||
* !! and sn_slot could be already destroyed! !!
|
||||
*/
|
||||
|
||||
EXTRACHECKS_BUG_ON(!cmd->sn_set);
|
||||
|
||||
/*
|
||||
* Optimized for lockless fast path of sequence of SIMPLE or
|
||||
* ORDERED commands
|
||||
*/
|
||||
#ifdef CONFIG_SCST_EXTRACHECKS
|
||||
sBUG_ON(test_bit(SCST_CMD_INC_EXPECTED_SN_PASSED, &cmd->cmd_flags));
|
||||
set_bit(SCST_CMD_INC_EXPECTED_SN_PASSED, &((struct scst_cmd *)cmd)->cmd_flags);
|
||||
#endif
|
||||
|
||||
atomic_dec(&order_data->sn_cmd_count);
|
||||
/* Optimized for lockless fast path of sequence of SIMPLE commands */
|
||||
|
||||
if (slot == NULL) {
|
||||
/* SIMPLE command can have slot NULL as well, if there was no free slots */
|
||||
EXTRACHECKS_BUG_ON((cmd->queue_type != SCST_CMD_QUEUE_SIMPLE) &&
|
||||
(cmd->queue_type != SCST_CMD_QUEUE_ORDERED));
|
||||
smp_mb(); /* sync with scst_inc_cur_sn() */
|
||||
goto inc;
|
||||
}
|
||||
if (slot == NULL)
|
||||
goto ordered;
|
||||
|
||||
TRACE_SN("Slot %zd, value %d", slot - order_data->sn_slots,
|
||||
atomic_read(slot));
|
||||
@@ -2574,40 +2573,50 @@ bool scst_inc_expected_sn(struct scst_cmd *cmd)
|
||||
if (!atomic_dec_and_test(slot))
|
||||
goto out;
|
||||
|
||||
/* atomic_dec_and_test() implies memory barrier to sync with scst_inc_cur_sn() */
|
||||
/*
|
||||
* atomic_dec_and_test() implies memory barrier to sync with
|
||||
* scst_inc_cur_sn() for pending_simple_inc_expected_sn
|
||||
*/
|
||||
|
||||
inc:
|
||||
if (order_data->expected_sn != order_data->curr_sn) {
|
||||
bool lock = (atomic_read(&order_data->sn_cmd_count) == 0);
|
||||
/*
|
||||
* No protection of expected_sn is needed, because only one
|
||||
* thread at time can be here (serialized by sn). Lock is to
|
||||
* sync with curr_sn decrement in scst_inc_cur_sn(). It
|
||||
* is supposed that there could not be half-incremented halves.
|
||||
*/
|
||||
if (lock)
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
if (likely(order_data->expected_sn != order_data->curr_sn)) {
|
||||
order_data->expected_sn++;
|
||||
TRACE_SN("New expected_sn: %d", order_data->expected_sn);
|
||||
res = true;
|
||||
/*
|
||||
* Write must be before def_cmd_count read to be in
|
||||
* sync with scst_post_exec_sn(). See comment in
|
||||
* scst_exec_check_sn(). Just in case if lock isn't used
|
||||
* or spin_unlock() isn't memory a barrier. Although,
|
||||
* checking of def_cmd_count is far from here, but
|
||||
* who knows, let's be safer.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
if (lock)
|
||||
spin_unlock_irq(&order_data->sn_lock);
|
||||
}
|
||||
if (likely(order_data->pending_simple_inc_expected_sn == 0))
|
||||
goto out;
|
||||
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
|
||||
if (unlikely(order_data->pending_simple_inc_expected_sn == 0))
|
||||
goto out_unlock;
|
||||
|
||||
order_data->pending_simple_inc_expected_sn--;
|
||||
TRACE_SN("New dec pending_simple_inc_expected_sn: %d",
|
||||
order_data->pending_simple_inc_expected_sn);
|
||||
EXTRACHECKS_BUG_ON(order_data->pending_simple_inc_expected_sn < 0);
|
||||
|
||||
inc_expected_sn_locked:
|
||||
order_data->expected_sn++;
|
||||
/*
|
||||
* Write must be before def_cmd_count read to be in
|
||||
* sync with scst_post_exec_sn(). See comment in
|
||||
* scst_exec_check_sn(). Just in case if spin_unlock() isn't
|
||||
* memory a barrier. Although, checking of def_cmd_count
|
||||
* is far from here, but who knows, let's be safer.
|
||||
*/
|
||||
smp_mb();
|
||||
TRACE_SN("New expected_sn: %d", order_data->expected_sn);
|
||||
res = true;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(&order_data->sn_lock);
|
||||
|
||||
out:
|
||||
TRACE_EXIT_RES(res);
|
||||
return res;
|
||||
|
||||
ordered:
|
||||
/* SIMPLE command can have slot NULL as well, if there were no free slots */
|
||||
EXTRACHECKS_BUG_ON((cmd->queue_type != SCST_CMD_QUEUE_SIMPLE) &&
|
||||
(cmd->queue_type != SCST_CMD_QUEUE_ORDERED));
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
goto inc_expected_sn_locked;
|
||||
}
|
||||
|
||||
/* No locks */
|
||||
@@ -2628,7 +2637,7 @@ static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
|
||||
if (make_active)
|
||||
scst_make_deferred_commands_active(cmd->cur_order_data);
|
||||
else
|
||||
res = scst_check_deferred_commands(cmd->cur_order_data);
|
||||
res = scst_check_deferred_commands(cmd->cur_order_data, true);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -2892,6 +2901,17 @@ static int scst_exec_check_blocking(struct scst_cmd **active_cmd)
|
||||
while (1) {
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_SCST_DEBUG_SN
|
||||
if ((scst_random() % 120) == 7) {
|
||||
int t = scst_random() % 200;
|
||||
TRACE_SN("Delaying IO on %d ms", t);
|
||||
msleep(t);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* After sent_for_exec set, scst_post_exec_sn() must be called
|
||||
* before exiting this function!
|
||||
*/
|
||||
cmd->sent_for_exec = 1;
|
||||
/*
|
||||
* To sync with scst_abort_cmd(). The above assignment must
|
||||
@@ -2980,7 +3000,7 @@ static int scst_exec_check_sn(struct scst_cmd **active_cmd)
|
||||
|
||||
sBUG_ON(!cmd->sn_set);
|
||||
|
||||
expected_sn = order_data->expected_sn;
|
||||
expected_sn = ACCESS_ONCE(order_data->expected_sn);
|
||||
/* Optimized for lockless fast path */
|
||||
if ((cmd->sn != expected_sn) || (order_data->hq_cmd_count > 0)) {
|
||||
spin_lock_irq(&order_data->sn_lock);
|
||||
@@ -3553,12 +3573,14 @@ static int scst_pre_xmit_response(struct scst_cmd *cmd)
|
||||
#endif
|
||||
if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
|
||||
scst_on_hq_cmd_response(cmd);
|
||||
|
||||
if (unlikely(!cmd->sent_for_exec)) {
|
||||
TRACE_SN("cmd %p was not sent for exec (sn %d, set %d)",
|
||||
cmd, cmd->sn, cmd->sn_set);
|
||||
else if (unlikely(!cmd->sent_for_exec)) {
|
||||
/*
|
||||
* scst_post_exec_sn() can't be called in parallel
|
||||
* due to the sent_for_exec contract obligation
|
||||
*/
|
||||
TRACE_SN("cmd %p was not sent for exec (sn %d, "
|
||||
"set %d)", cmd, cmd->sn, cmd->sn_set);
|
||||
scst_unblock_deferred(cmd->cur_order_data, cmd);
|
||||
cmd->sent_for_exec = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3801,23 +3823,21 @@ out:
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Called only from scst_cmd_set_sn() with the same locking expectations */
|
||||
static inline void scst_inc_cur_sn(struct scst_order_data *order_data)
|
||||
/* Must be called under sn_lock with IRQs off */
|
||||
static inline void scst_inc_expected_sn_idle(struct scst_order_data *order_data)
|
||||
{
|
||||
if (atomic_read(&order_data->sn_cmd_count) > 0) {
|
||||
unsigned long flags;
|
||||
order_data->curr_sn++;
|
||||
TRACE_SN("Incremented curr_sn %d", order_data->curr_sn);
|
||||
smp_mb(); /* sync with scst_inc_expected_sn() */
|
||||
if (unlikely(atomic_read(&order_data->sn_cmd_count) == 0)) {
|
||||
spin_lock_irqsave(&order_data->sn_lock, flags);
|
||||
if (order_data->expected_sn == (order_data->curr_sn-1)) {
|
||||
order_data->curr_sn--;
|
||||
TRACE_SN("Decremented curr_sn %d", order_data->curr_sn);
|
||||
}
|
||||
spin_unlock_irqrestore(&order_data->sn_lock, flags);
|
||||
}
|
||||
}
|
||||
order_data->expected_sn++;
|
||||
/*
|
||||
* Write must be before def_cmd_count read to be in
|
||||
* sync with scst_post_exec_sn(). See comment in
|
||||
* scst_exec_check_sn(). Just in case if spin_unlock() isn't
|
||||
* memory a barrier. Although, checking of def_cmd_count
|
||||
* is far from here, but who knows, let's be safer.
|
||||
*/
|
||||
smp_mb();
|
||||
TRACE_SN("New expected_sn: %d", order_data->expected_sn);
|
||||
|
||||
scst_make_deferred_commands_active_locked(order_data);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3828,9 +3848,7 @@ static inline void scst_inc_cur_sn(struct scst_order_data *order_data)
|
||||
* number. A command that must be executed after previously received commands
|
||||
* is assigned a new and higher slot number.
|
||||
*
|
||||
* No locks expected, but it must be externally serialized (see comment before
|
||||
* scst_cmd_init_done() definition) to protect SIMPLE and ORDERED commands
|
||||
* lockless fast path.
|
||||
* No locks expected.
|
||||
*
|
||||
* Note: This approach in full compliance with SAM may result in the reordering
|
||||
* of conflicting SIMPLE READ and/or WRITE commands (commands with at least
|
||||
@@ -3857,24 +3875,26 @@ static void scst_cmd_set_sn(struct scst_cmd *cmd)
|
||||
|
||||
EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
|
||||
|
||||
/*
|
||||
* Optimized for lockless fast path of sequence of SIMPLE or
|
||||
* ORDERED commands.
|
||||
*/
|
||||
/* Optimized for lockless fast path of sequence of SIMPLE commands */
|
||||
|
||||
scst_check_debug_sn(cmd);
|
||||
|
||||
#ifdef CONFIG_SCST_STRICT_SERIALIZING
|
||||
cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
|
||||
if (likely(cmd->queue_type != SCST_CMD_QUEUE_HEAD_OF_QUEUE))
|
||||
cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
|
||||
#endif
|
||||
|
||||
if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
|
||||
/*
|
||||
* Not the best way, but good enough until there is a
|
||||
* possibility to specify queue type during pass-through
|
||||
* commands submission.
|
||||
*/
|
||||
cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
|
||||
if (likely(cmd->queue_type != SCST_CMD_QUEUE_HEAD_OF_QUEUE)) {
|
||||
/*
|
||||
* Not the best way, but good enough until there is a
|
||||
* possibility to specify queue type during pass-through
|
||||
* commands submission.
|
||||
*/
|
||||
TRACE_SN("Restricted reorder dev %s (cmd %p)",
|
||||
cmd->dev->virt_name, cmd);
|
||||
cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
|
||||
}
|
||||
}
|
||||
|
||||
again:
|
||||
@@ -3882,49 +3902,83 @@ again:
|
||||
case SCST_CMD_QUEUE_SIMPLE:
|
||||
if (order_data->prev_cmd_ordered) {
|
||||
if (atomic_read(order_data->cur_sn_slot) != 0) {
|
||||
atomic_t *old_cur_sn_slot = order_data->cur_sn_slot;
|
||||
while (1) {
|
||||
order_data->cur_sn_slot++;
|
||||
if (order_data->cur_sn_slot == order_data->sn_slots + ARRAY_SIZE(order_data->sn_slots))
|
||||
order_data->cur_sn_slot = order_data->sn_slots;
|
||||
if (atomic_read(order_data->cur_sn_slot) == 0)
|
||||
break;
|
||||
if (order_data->cur_sn_slot == old_cur_sn_slot) {
|
||||
static int q;
|
||||
if (q++ < 10)
|
||||
PRINT_WARNING("Not enough SN slots");
|
||||
goto ordered;
|
||||
}
|
||||
order_data->cur_sn_slot++;
|
||||
if (order_data->cur_sn_slot == order_data->sn_slots +
|
||||
ARRAY_SIZE(order_data->sn_slots))
|
||||
order_data->cur_sn_slot = order_data->sn_slots;
|
||||
if (unlikely(atomic_read(order_data->cur_sn_slot) != 0)) {
|
||||
static int q;
|
||||
if (q++ < 10)
|
||||
PRINT_WARNING("Not enough SN slots "
|
||||
"(dev %s)", cmd->dev->virt_name);
|
||||
goto ordered;
|
||||
}
|
||||
TRACE_SN("New cur SN slot %zd",
|
||||
order_data->cur_sn_slot - order_data->sn_slots);
|
||||
}
|
||||
scst_inc_cur_sn(order_data);
|
||||
|
||||
order_data->curr_sn++;
|
||||
TRACE_SN("Incremented curr_sn %d", order_data->curr_sn);
|
||||
|
||||
order_data->prev_cmd_ordered = 0;
|
||||
/*
|
||||
* expected_sn will be/was incremented by the
|
||||
* previous ORDERED cmd
|
||||
*/
|
||||
}
|
||||
|
||||
atomic_inc(order_data->cur_sn_slot);
|
||||
|
||||
cmd->sn_slot = order_data->cur_sn_slot;
|
||||
atomic_inc(cmd->sn_slot);
|
||||
cmd->sn = order_data->curr_sn;
|
||||
cmd->sn_set = 1;
|
||||
|
||||
atomic_inc(&order_data->sn_cmd_count);
|
||||
TRACE_SN("New sn_cmd_count: %d", atomic_read(&order_data->sn_cmd_count));
|
||||
break;
|
||||
|
||||
case SCST_CMD_QUEUE_UNTAGGED: /* put here with goto for better SIMPLE fast path */
|
||||
/* It is processed further as SIMPLE */
|
||||
cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
|
||||
goto again;
|
||||
|
||||
case SCST_CMD_QUEUE_ORDERED:
|
||||
TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
|
||||
ordered:
|
||||
order_data->prev_cmd_ordered = 1;
|
||||
order_data->curr_sn++;
|
||||
TRACE_SN("Incremented curr_sn %d", order_data->curr_sn);
|
||||
|
||||
scst_inc_cur_sn(order_data);
|
||||
if (order_data->prev_cmd_ordered) {
|
||||
TRACE_SN("Prev cmd ordered set");
|
||||
/*
|
||||
* expected_sn will be/was incremented by the
|
||||
* previous ORDERED cmd
|
||||
*/
|
||||
} else {
|
||||
order_data->prev_cmd_ordered = 1;
|
||||
|
||||
spin_lock_irqsave(&order_data->sn_lock, flags);
|
||||
|
||||
/*
|
||||
* If no commands are going to reach
|
||||
* scst_inc_expected_sn(), inc expected_sn here.
|
||||
*/
|
||||
if (atomic_read(order_data->cur_sn_slot) == 0)
|
||||
scst_inc_expected_sn_idle(order_data);
|
||||
else {
|
||||
order_data->pending_simple_inc_expected_sn++;
|
||||
TRACE_SN("New inc pending_simple_inc_expected_sn: %d",
|
||||
order_data->pending_simple_inc_expected_sn);
|
||||
smp_mb(); /* to sync with scst_inc_expected_sn() */
|
||||
if (unlikely(atomic_read(order_data->cur_sn_slot) == 0)) {
|
||||
order_data->pending_simple_inc_expected_sn--;
|
||||
TRACE_SN("New dec pending_simple_inc_expected_sn: %d",
|
||||
order_data->pending_simple_inc_expected_sn);
|
||||
EXTRACHECKS_BUG_ON(order_data->pending_simple_inc_expected_sn < 0);
|
||||
scst_inc_expected_sn_idle(order_data);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&order_data->sn_lock, flags);
|
||||
}
|
||||
|
||||
cmd->sn = order_data->curr_sn;
|
||||
cmd->sn_set = 1;
|
||||
|
||||
atomic_inc(&order_data->sn_cmd_count);
|
||||
TRACE_SN("New sn_cmd_count: %d", atomic_read(&order_data->sn_cmd_count));
|
||||
break;
|
||||
|
||||
case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
|
||||
@@ -3935,19 +3989,14 @@ ordered:
|
||||
cmd->hq_cmd_inced = 1;
|
||||
goto out;
|
||||
|
||||
case SCST_CMD_QUEUE_UNTAGGED: /* put here with goto for better fast path */
|
||||
/* It is processed as SIMPLE */
|
||||
cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
|
||||
goto again;
|
||||
|
||||
default:
|
||||
sBUG();
|
||||
}
|
||||
|
||||
TRACE_SN("cmd(%p)->sn: %d (order_data %p, *cur_sn_slot %d, "
|
||||
"sn_cmd_count %d, prev_cmd_ordered %ld, cur_sn_slot %zd)", cmd,
|
||||
"prev_cmd_ordered %d, cur_sn_slot %zd)", cmd,
|
||||
cmd->sn, order_data, atomic_read(order_data->cur_sn_slot),
|
||||
atomic_read(&order_data->sn_cmd_count), order_data->prev_cmd_ordered,
|
||||
order_data->prev_cmd_ordered,
|
||||
order_data->cur_sn_slot - order_data->sn_slots);
|
||||
|
||||
out:
|
||||
|
||||
Reference in New Issue
Block a user