scst_sysfs: Do not suspend I/O for LUN management

Protect modifications of sess_tgt_dev_list with the new mutex
tgt_dev_list_mutex. Protect read-only accesses of this list via
RCU. Do no longer lock scst_mutex when invoking any of the
following functions:
* scst_queue_report_luns_changed_UA().
* scst_report_luns_changed_sess().
* scst_lookup_tgt_dev() when invoked outside of command context.
* scst_nexus_loss().
* scst_do_nexus_loss_sess().
* scst_abort_all_nexus_loss_sess().
* scst_do_nexus_loss_tgt().

That change makes it safe to remove a LUN from sess_tgt_dev_list
while I/O is in progress. Hence allow LUN management without
suspending I/O. When removing a LUN, the LUN visibility changes
immediately without suspending I/O but the corresponding
scst_tgt_dev structures are only freed after a RCU grace period.



git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@7866 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Bart Van Assche
2019-01-05 22:32:45 +00:00
parent 941e324b99
commit 3e64094b0c
7 changed files with 307 additions and 184 deletions

View File

@@ -1967,10 +1967,14 @@ struct scst_session {
/* session's async flags */
unsigned long sess_aflags;
/* protects sess_tgt_dev_list[] modifications */
struct mutex tgt_dev_list_mutex;
/*
* Hash list for tgt_dev's for this session with size and fn. It isn't
* hlist_entry, because we need ability to go over the list in the
* reverse order. Protected by scst_mutex and suspended activity.
* Hash list for tgt_dev's for this session with size and fn. Reading
* is allowed either when holding an RCU read lock or when holding
* tgt_dev_list_mutex. Modifying is only allowed when holding
* tgt_dev_list_mutex.
*/
#define SESS_TGT_DEV_LIST_HASH_SIZE (1 << 5)
#define SESS_TGT_DEV_LIST_HASH_FN(val) ((val) & (SESS_TGT_DEV_LIST_HASH_SIZE - 1))

View File

@@ -2504,15 +2504,14 @@ static bool scst_cm_is_lun_free(unsigned int lun)
TRACE_ENTRY();
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
rcu_read_lock();
list_for_each_entry_rcu(tgt_dev, head, sess_tgt_dev_list_entry) {
if (tgt_dev->lun == lun) {
res = false;
break;
}
}
rcu_read_unlock();
TRACE_EXIT_RES(res);
return res;
@@ -2526,22 +2525,23 @@ static unsigned int scst_cm_get_lun(const struct scst_device *dev)
TRACE_ENTRY();
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &scst_cm_sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (tgt_dev->dev == dev) {
res = tgt_dev->lun;
rcu_read_unlock();
TRACE_DBG("LUN %d found (full LUN %lld)",
res, tgt_dev->lun);
goto out;
}
}
}
rcu_read_unlock();
out:
TRACE_EXIT_RES(res);
@@ -2562,12 +2562,15 @@ static int scst_cm_dev_register(struct scst_device *dev, uint64_t lun)
TRACE_DBG("dev %s, LUN %ld", dev->virt_name, (unsigned long)lun);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct scst_tgt_dev *tgt_dev;
struct list_head *head = &scst_cm_sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (tgt_dev->dev == dev) {
rcu_read_unlock();
/*
* It's OK, because the copy manager could
* auto register some devices
@@ -2579,6 +2582,7 @@ static int scst_cm_dev_register(struct scst_device *dev, uint64_t lun)
}
}
}
rcu_read_unlock();
if (lun == SCST_MAX_LUN) {
add_lun = true;
@@ -2629,10 +2633,10 @@ static void scst_cm_dev_unregister(struct scst_device *dev, bool del_lun)
{
int i;
struct scst_cm_desig *des, *t;
u32 lun = SCST_MAX_LUN;
TRACE_ENTRY();
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
TRACE_DBG("dev %s, del_lun %d", dev->virt_name, del_lun);
@@ -2648,18 +2652,23 @@ static void scst_cm_dev_unregister(struct scst_device *dev, bool del_lun)
if (!del_lun)
goto out;
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct scst_tgt_dev *tgt_dev;
struct list_head *head = &scst_cm_sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (tgt_dev->dev == dev) {
scst_acg_del_lun(scst_cm_tgt->default_acg,
tgt_dev->lun, false);
lun = tgt_dev->lun;
break;
}
}
}
rcu_read_unlock();
if (lun != SCST_MAX_LUN)
scst_acg_del_lun(scst_cm_tgt->default_acg, lun, false);
out:
TRACE_EXIT();
@@ -2804,7 +2813,6 @@ bool scst_cm_on_del_lun(struct scst_acg_dev *acg_dev, bool gen_report_luns_chang
TRACE_ENTRY();
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
if (acg_dev->acg != scst_cm_tgt->default_acg)

View File

@@ -1671,6 +1671,7 @@ static const struct scst_sdbops scst_scsi_op_table[] = {
#define SCST_CDB_TBL_SIZE ((int)ARRAY_SIZE(scst_scsi_op_table))
static void scst_del_tgt_dev(struct scst_tgt_dev *tgt_dev);
static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
static void scst_check_internal_sense(struct scst_device *dev, int result,
uint8_t *sense, int sense_len);
@@ -2440,14 +2441,13 @@ void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
asc, ascq);
/* To protect sess_tgt_dev_list */
mutex_lock(&scst_mutex);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
spin_lock_bh(&tgt_dev->tgt_dev_lock);
if (!list_empty(&tgt_dev->UA_list)) {
struct scst_tgt_dev_UA *ua;
@@ -2472,8 +2472,7 @@ void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
spin_unlock_bh(&tgt_dev->tgt_dev_lock);
}
}
mutex_unlock(&scst_mutex);
rcu_read_unlock();
TRACE_EXIT();
return;
@@ -2517,7 +2516,23 @@ void scst_free_aen(struct scst_aen *aen)
return;
}
/* Must be called under scst_mutex */
#ifdef CONFIG_SCST_EXTRACHECKS
static bool scst_is_active_tgt_dev(struct scst_tgt_dev *tgt_dev)
{
bool is_active;
rcu_read_lock();
is_active = scst_lookup_tgt_dev(tgt_dev->sess, tgt_dev->lun) == tgt_dev;
rcu_read_unlock();
return is_active;
}
#endif
/*
* The caller must ensure that tgt_dev does not disappear while this function
* is in progress.
*/
void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
int key, int asc, int ascq)
{
@@ -2623,7 +2638,6 @@ static inline bool scst_is_report_luns_changed_type(int type)
}
}
/* scst_mutex supposed to be held */
static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
int flags)
{
@@ -2639,11 +2653,13 @@ static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
local_bh_disable();
rcu_read_lock();
#if !defined(__CHECKER__)
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
/* Lockdep triggers here a false positive.. */
spin_lock_nolockdep(&tgt_dev->tgt_dev_lock);
@@ -2654,7 +2670,8 @@ static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
int sl;
if (!scst_is_report_luns_changed_type(
@@ -2674,20 +2691,21 @@ static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
for (i = SESS_TGT_DEV_LIST_HASH_SIZE-1; i >= 0; i--) {
head = &sess->sess_tgt_dev_list[i];
list_for_each_entry_reverse(tgt_dev, head,
sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
spin_unlock_nolockdep(&tgt_dev->tgt_dev_lock);
}
}
#endif
rcu_read_unlock();
local_bh_enable();
TRACE_EXIT();
return;
}
/* The activity supposed to be suspended and scst_mutex held */
static void scst_report_luns_changed_sess(struct scst_session *sess)
{
int i;
@@ -2703,13 +2721,14 @@ static void scst_report_luns_changed_sess(struct scst_session *sess)
TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head;
struct scst_tgt_dev *tgt_dev;
head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (scst_is_report_luns_changed_type(
tgt_dev->dev->type)) {
@@ -2721,6 +2740,8 @@ static void scst_report_luns_changed_sess(struct scst_session *sess)
}
found:
rcu_read_unlock();
if (tgtt->report_aen != NULL) {
struct scst_aen *aen;
int rc;
@@ -2760,7 +2781,6 @@ void scst_report_luns_changed(struct scst_acg *acg)
TRACE_ENTRY();
/* To protect acg_sess_list */
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
TRACE_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
@@ -2800,10 +2820,8 @@ void scst_aen_done(struct scst_aen *aen)
if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
scst_sense_reported_luns_data_changed))) {
mutex_lock(&scst_mutex);
scst_queue_report_luns_changed_UA(aen->sess,
SCST_SET_UA_FLAG_AT_HEAD);
mutex_unlock(&scst_mutex);
} else {
struct scst_session *sess = aen->sess;
struct scst_tgt_dev *tgt_dev;
@@ -2811,8 +2829,7 @@ void scst_aen_done(struct scst_aen *aen)
lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
mutex_lock(&scst_mutex);
rcu_read_lock();
/* tgt_dev might get dead, so we need to reseek it */
tgt_dev = scst_lookup_tgt_dev(sess, lun);
if (tgt_dev) {
@@ -2822,8 +2839,7 @@ void scst_aen_done(struct scst_aen *aen)
aen->aen_sense_len,
SCST_SET_UA_FLAG_AT_HEAD);
}
mutex_unlock(&scst_mutex);
rcu_read_unlock();
}
out_free:
@@ -2847,10 +2863,8 @@ void scst_requeue_ua(struct scst_cmd *cmd, const uint8_t *buf, int size)
SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
"for delivery failed cmd %p", cmd);
mutex_lock(&scst_mutex);
scst_queue_report_luns_changed_UA(cmd->sess,
SCST_SET_UA_FLAG_AT_HEAD);
mutex_unlock(&scst_mutex);
} else {
TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
scst_check_set_UA(cmd->tgt_dev, buf, size, SCST_SET_UA_FLAG_AT_HEAD);
@@ -2921,10 +2935,11 @@ retry_add:
list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
bool inq_changed_ua_needed = false;
mutex_lock(&sess->tgt_dev_list_mutex);
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if ((tgt_dev->dev == acg_dev->dev) &&
(tgt_dev->lun == acg_dev->lun) &&
@@ -2934,16 +2949,20 @@ retry_add:
sess, tgt_dev,
(unsigned long long)tgt_dev->lun);
tgt_dev->acg_dev = acg_dev;
mutex_unlock(&sess->tgt_dev_list_mutex);
goto next;
} else if (tgt_dev->lun == acg_dev->lun) {
TRACE_MGMT_DBG("Replacing LUN %lld",
(long long)tgt_dev->lun);
scst_del_tgt_dev(tgt_dev);
scst_free_tgt_dev(tgt_dev);
inq_changed_ua_needed = 1;
break;
}
}
}
mutex_unlock(&sess->tgt_dev_list_mutex);
luns_changed = true;
@@ -2964,6 +2983,8 @@ next:
}
something_freed = false;
mutex_lock(&sess->tgt_dev_list_mutex);
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct scst_tgt_dev *t;
@@ -2978,10 +2999,12 @@ next:
(unsigned long long)tgt_dev->lun);
luns_changed = true;
something_freed = true;
scst_del_tgt_dev(tgt_dev);
scst_free_tgt_dev(tgt_dev);
}
}
}
mutex_unlock(&sess->tgt_dev_list_mutex);
if (add_failed && something_freed) {
TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
@@ -3004,10 +3027,11 @@ next:
if (luns_changed) {
scst_report_luns_changed_sess(sess);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (tgt_dev->inq_changed_ua_needed) {
TRACE_MGMT_DBG("sess %p: Setting "
@@ -3021,6 +3045,7 @@ next:
}
}
}
rcu_read_unlock();
}
out:
@@ -4304,11 +4329,17 @@ out:
* The activity supposed to be suspended and scst_mutex held or the
* corresponding target supposed to be stopped.
*/
static void scst_del_acg_dev(struct scst_acg_dev *acg_dev, bool del_sysfs)
static void scst_del_acg_dev(struct scst_acg_dev *acg_dev,
bool del_acg_dev_list, bool del_sysfs)
{
TRACE_DBG("Removing acg_dev %p from dev_acg_dev_list", acg_dev);
list_del(&acg_dev->dev_acg_dev_list_entry);
if (del_acg_dev_list) {
TRACE_DBG("Removing acg_dev %p from acg_dev_list", acg_dev);
list_del(&acg_dev->acg_dev_list_entry);
}
if (del_sysfs)
scst_acg_dev_sysfs_del(acg_dev);
}
@@ -4329,9 +4360,7 @@ static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
static void scst_del_free_acg_dev(struct scst_acg_dev *acg_dev, bool del_sysfs)
{
TRACE_ENTRY();
TRACE_DBG("Removing acg_dev %p from acg_dev_list", acg_dev);
list_del(&acg_dev->acg_dev_list_entry);
scst_del_acg_dev(acg_dev, del_sysfs);
scst_del_acg_dev(acg_dev, true, del_sysfs);
scst_free_acg_dev(acg_dev);
TRACE_EXIT();
return;
@@ -4497,14 +4526,17 @@ out_free:
/* Delete a LUN without generating a unit attention. */
static struct scst_acg_dev *__scst_acg_del_lun(struct scst_acg *acg,
uint64_t lun,
struct list_head *tgt_dev_list,
bool *report_luns_changed)
{
struct scst_acg_dev *acg_dev = NULL, *a;
struct scst_tgt_dev *tgt_dev, *tt;
struct scst_session *sess;
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
INIT_LIST_HEAD(tgt_dev_list);
list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
if (a->lun == lun) {
acg_dev = a;
@@ -4519,11 +4551,19 @@ static struct scst_acg_dev *__scst_acg_del_lun(struct scst_acg *acg,
list_for_each_entry_safe(tgt_dev, tt, &acg_dev->dev->dev_tgt_dev_list,
dev_tgt_dev_list_entry) {
if (tgt_dev->acg_dev == acg_dev)
scst_free_tgt_dev(tgt_dev);
if (tgt_dev->acg_dev == acg_dev) {
sess = tgt_dev->sess;
mutex_lock(&sess->tgt_dev_list_mutex);
scst_del_tgt_dev(tgt_dev);
mutex_unlock(&sess->tgt_dev_list_mutex);
list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
tgt_dev_list);
}
}
scst_del_free_acg_dev(acg_dev, true);
scst_del_acg_dev(acg_dev, true, true);
PRINT_INFO("Removed LUN %lld from group %s (target %s)",
lun, acg->acg_name, acg->tgt ? acg->tgt->tgt_name : "?");
@@ -4532,22 +4572,37 @@ out:
return acg_dev;
}
/*
* Delete a LUN and generate a unit attention if gen_report_luns_changed is
* true.
*/
static int scst_tgt_devs_cmds(struct list_head *tgt_dev_list)
{
struct scst_tgt_dev *tgt_dev;
int res = 0;
list_for_each_entry(tgt_dev, tgt_dev_list, extra_tgt_dev_list_entry)
res += atomic_read(&tgt_dev->tgt_dev_cmd_count);
return res;
}
static void scst_wait_for_tgt_devs(struct list_head *tgt_dev_list)
{
while (scst_tgt_devs_cmds(tgt_dev_list) > 0)
mdelay(100);
}
int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
bool gen_report_luns_changed)
{
int res = 0;
struct scst_acg_dev *acg_dev;
struct scst_tgt_dev *tgt_dev, *tt;
struct list_head tgt_dev_list;
TRACE_ENTRY();
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
acg_dev = __scst_acg_del_lun(acg, lun, &gen_report_luns_changed);
acg_dev = __scst_acg_del_lun(acg, lun, &tgt_dev_list,
&gen_report_luns_changed);
if (acg_dev == NULL) {
PRINT_ERROR("Device is not found in group %s", acg->acg_name);
res = -EINVAL;
@@ -4557,6 +4612,18 @@ int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
if (gen_report_luns_changed)
scst_report_luns_changed(acg);
mutex_unlock(&scst_mutex);
scst_wait_for_tgt_devs(&tgt_dev_list);
mutex_lock(&scst_mutex);
list_for_each_entry_safe(tgt_dev, tt, &tgt_dev_list,
extra_tgt_dev_list_entry) {
scst_free_tgt_dev(tgt_dev);
}
scst_free_acg_dev(acg_dev);
out:
TRACE_EXIT_RES(res);
return res;
@@ -4569,12 +4636,13 @@ int scst_acg_repl_lun(struct scst_acg *acg, struct kobject *parent,
{
struct scst_acg_dev *acg_dev;
bool del_gen_ua = false;
struct scst_tgt_dev *tgt_dev, *tt;
struct list_head tgt_dev_list;
int res = -EINVAL;
scst_assert_activity_suspended();
lockdep_assert_held(&scst_mutex);
acg_dev = __scst_acg_del_lun(acg, lun, &del_gen_ua);
acg_dev = __scst_acg_del_lun(acg, lun, &tgt_dev_list, &del_gen_ua);
if (!acg_dev)
flags |= SCST_ADD_LUN_GEN_UA;
res = scst_acg_add_lun(acg, parent, dev, lun, flags, NULL);
@@ -4582,8 +4650,6 @@ int scst_acg_repl_lun(struct scst_acg *acg, struct kobject *parent,
goto out;
if (acg_dev && (flags & SCST_REPL_LUN_GEN_UA)) {
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
dev_tgt_dev_list_entry) {
if (tgt_dev->acg_dev->acg == acg &&
@@ -4591,10 +4657,20 @@ int scst_acg_repl_lun(struct scst_acg *acg, struct kobject *parent,
TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
" on tgt_dev %p", tgt_dev);
scst_gen_aen_or_ua(tgt_dev,
SCST_LOAD_SENSE(scst_sense_inquiry_data_changed));
SCST_LOAD_SENSE(scst_sense_inquiry_data_changed));
}
}
}
mutex_unlock(&scst_mutex);
scst_wait_for_tgt_devs(&tgt_dev_list);
mutex_lock(&scst_mutex);
list_for_each_entry_safe(tgt_dev, tt, &tgt_dev_list,
extra_tgt_dev_list_entry) {
scst_free_tgt_dev(tgt_dev);
}
scst_free_acg_dev(acg_dev);
out:
return res;
@@ -4701,7 +4777,7 @@ static void scst_del_acg(struct scst_acg *acg)
list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
acg_dev_list_entry)
scst_del_acg_dev(acg_dev, true);
scst_del_acg_dev(acg_dev, false, true);
list_for_each_entry(acn, &acg->acn_list, acn_list_entry)
scst_acn_sysfs_del(acn);
@@ -4729,6 +4805,7 @@ static void scst_free_acg(struct scst_acg *acg)
{
struct scst_acg_dev *acg_dev, *acg_dev_tmp;
struct scst_acn *acn, *acnt;
struct scst_session *sess;
struct scst_tgt *tgt = acg->tgt;
/* For procfs acg->tgt could be NULL */
@@ -4741,8 +4818,15 @@ static void scst_free_acg(struct scst_acg *acg)
list_for_each_entry_safe(tgt_dev, tt,
&acg_dev->dev->dev_tgt_dev_list,
dev_tgt_dev_list_entry) {
if (tgt_dev->acg_dev == acg_dev)
if (tgt_dev->acg_dev == acg_dev) {
sess = tgt_dev->sess;
mutex_lock(&sess->tgt_dev_list_mutex);
scst_del_tgt_dev(tgt_dev);
mutex_unlock(&sess->tgt_dev_list_mutex);
scst_free_tgt_dev(tgt_dev);
}
}
scst_free_acg_dev(acg_dev);
}
@@ -5241,6 +5325,7 @@ static int scst_alloc_add_tgt_dev(struct scst_session *sess,
goto out;
}
INIT_LIST_HEAD(&tgt_dev->sess_tgt_dev_list_entry);
tgt_dev->dev = dev;
tgt_dev->lun = acg_dev->lun;
tgt_dev->acg_dev = acg_dev;
@@ -5369,8 +5454,10 @@ static int scst_alloc_add_tgt_dev(struct scst_session *sess,
list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
spin_unlock_bh(&dev->dev_lock);
mutex_lock(&sess->tgt_dev_list_mutex);
head = &sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(tgt_dev->lun)];
list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, head);
list_add_tail_rcu(&tgt_dev->sess_tgt_dev_list_entry, head);
mutex_unlock(&sess->tgt_dev_list_mutex);
scst_tg_init_tgt_dev(tgt_dev);
@@ -5405,7 +5492,10 @@ out_free_ua:
goto out;
}
/* scst_mutex supposed to be held */
/*
* The caller must ensure that tgt_dev does not disappear while this function
* is in progress.
*/
void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
{
TRACE_ENTRY();
@@ -5423,10 +5513,26 @@ void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
return;
}
/*
* scst_mutex supposed to be held, there must not be parallel activity in this
* session.
*/
static void scst_del_tgt_dev(struct scst_tgt_dev *tgt_dev)
{
struct scst_device *dev = tgt_dev->dev;
lockdep_assert_held(&scst_mutex);
#ifdef CONFIG_SCST_EXTRACHECKS
if (scst_is_active_tgt_dev(tgt_dev))
lockdep_assert_held(&tgt_dev->sess->tgt_dev_list_mutex);
#endif
spin_lock_bh(&dev->dev_lock);
list_del(&tgt_dev->dev_tgt_dev_list_entry);
spin_unlock_bh(&dev->dev_lock);
list_del_rcu(&tgt_dev->sess_tgt_dev_list_entry);
scst_tgt_dev_sysfs_del(tgt_dev);
}
/* The caller must ensure that tgt_dev is not on sess_tgt_dev_list */
static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
{
struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
@@ -5434,13 +5540,12 @@ static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
TRACE_ENTRY();
spin_lock_bh(&dev->dev_lock);
list_del(&tgt_dev->dev_tgt_dev_list_entry);
spin_unlock_bh(&dev->dev_lock);
#ifdef CONFIG_SCST_EXTRACHECKS
WARN_ON_ONCE(scst_is_active_tgt_dev(tgt_dev));
#endif
WARN_ON_ONCE(atomic_read(&tgt_dev->tgt_dev_cmd_count) != 0);
list_del(&tgt_dev->sess_tgt_dev_list_entry);
scst_tgt_dev_sysfs_del(tgt_dev);
synchronize_rcu();
if (tgtt->get_initiator_port_transport_id == NULL)
dev->not_pr_supporting_tgt_devs_num--;
@@ -5491,10 +5596,6 @@ out_free:
goto out;
}
/*
* scst_mutex supposed to be held, there must not be parallel activity in this
* session.
*/
void scst_sess_free_tgt_devs(struct scst_session *sess)
{
int i;
@@ -5502,16 +5603,18 @@ void scst_sess_free_tgt_devs(struct scst_session *sess)
TRACE_ENTRY();
/* The session is going down, no users, so no locks */
mutex_lock(&sess->tgt_dev_list_mutex);
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
list_for_each_entry_safe(tgt_dev, t, head,
sess_tgt_dev_list_entry) {
scst_del_tgt_dev(tgt_dev);
scst_free_tgt_dev(tgt_dev);
}
INIT_LIST_HEAD(head);
}
mutex_unlock(&sess->tgt_dev_list_mutex);
TRACE_EXIT();
return;
@@ -6948,6 +7051,7 @@ struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
sess->init_phase = SCST_SESS_IPH_INITING;
sess->shut_phase = SCST_SESS_SPH_READY;
atomic_set(&sess->refcnt, 0);
mutex_init(&sess->tgt_dev_list_mutex);
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
@@ -8400,8 +8504,14 @@ static void scsi_end_async(struct request *req, blk_status_t error)
#endif
{
struct scsi_io_context *sioc = req->end_io_data;
int result;
TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
TRACE_DBG("sioc %p, cmd %p, error %d / %d", sioc, sioc->data, error,
req->errors);
#else
TRACE_DBG("sioc %p, cmd %p, error %d", sioc, sioc->data, error);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
lockdep_assert_held(req->q->queue_lock);
@@ -12790,19 +12900,14 @@ again:
#if !defined(__CHECKER__)
spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
/*
* cmd won't allow to suspend activities, so we can access
* sess->sess_tgt_dev_list without any additional
* protection.
*/
local_bh_disable();
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
/* Lockdep triggers here a false positive.. */
spin_lock_nolockdep(&tgt_dev->tgt_dev_lock);
@@ -12841,7 +12946,7 @@ again:
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
struct scst_tgt_dev_UA *ua;
@@ -12877,11 +12982,12 @@ out_unlock:
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry_reverse(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
spin_unlock_nolockdep(&tgt_dev->tgt_dev_lock);
}
}
rcu_read_unlock();
local_bh_enable();
spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
@@ -12955,6 +13061,8 @@ static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
TRACE_ENTRY();
lockdep_assert_held(&tgt_dev->tgt_dev_lock);
list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
UA_list_entry) {
if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, len) == 0) {

View File

@@ -283,14 +283,14 @@ static int __sgv_shrink(int nr, int min_interval, int *out_freed)
while (prev_nr > nr && nr > 0) {
prev_nr = nr;
mutex_lock(&sgv_pools_mutex);
list_for_each_entry(pool, &sgv_pools_list,
sgv_pools_list_entry) {
rcu_read_lock();
list_for_each_entry_rcu(pool, &sgv_pools_list,
sgv_pools_list_entry) {
if (pool->cached_entries)
nr = sgv_shrink_pool(pool, nr, min_interval,
cur_time, out_freed);
}
mutex_unlock(&sgv_pools_mutex);
rcu_read_unlock();
}
TRACE_EXIT_RES(nr);
@@ -1483,6 +1483,8 @@ out_del:
spin_lock_bh(&sgv_pools_lock);
list_del(&pool->sgv_pools_list_entry);
spin_unlock_bh(&sgv_pools_lock);
synchronize_rcu();
#endif
out_free:
@@ -1561,6 +1563,8 @@ static void sgv_pool_destroy(struct sgv_pool *pool)
spin_unlock_bh(&sgv_pools_lock);
mutex_unlock(&sgv_pools_mutex);
synchronize_rcu();
#ifndef CONFIG_SCST_PROC
scst_sgv_sysfs_del(pool);
#endif

View File

@@ -2075,16 +2075,19 @@ static int scst_sessions_info_show(struct seq_file *seq, void *v)
acg_sess_list_entry) {
int active_cmds = 0, t;
rcu_read_lock();
for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
struct list_head *head =
&sess->sess_tgt_dev_list[t];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
}
}
rcu_read_unlock();
seq_printf(seq, "%-20s %-45s %-35s %d/%d\n",
sess->tgt->tgtt->name,
sess->initiator_name,

View File

@@ -1380,13 +1380,9 @@ static int __scst_process_luns_mgmt_store(char *buffer,
goto out;
}
res = scst_suspend_activity(SCST_SUSPEND_TIMEOUT_USER);
if (res != 0)
goto out;
res = mutex_lock_interruptible(&scst_mutex);
if (res != 0)
goto out_resume;
goto out;
/* Check if tgt and acg not already freed while we were coming here */
if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
@@ -1503,9 +1499,6 @@ static int __scst_process_luns_mgmt_store(char *buffer,
out_unlock:
mutex_unlock(&scst_mutex);
out_resume:
scst_resume_activity();
out:
TRACE_EXIT_RES(res);
return res;
@@ -1901,17 +1894,20 @@ static ssize_t __scst_acg_black_hole_store(struct scst_acg *acg,
list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
int i;
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (t != SCST_ACG_BLACK_HOLE_NONE)
set_bit(SCST_TGT_DEV_BLACK_HOLE, &tgt_dev->tgt_dev_flags);
else
clear_bit(SCST_TGT_DEV_BLACK_HOLE, &tgt_dev->tgt_dev_flags);
}
}
rcu_read_unlock();
}
PRINT_INFO("Black hole set to %d for ACG %s", t, acg->acg_name);
@@ -2001,11 +1997,12 @@ static int __scst_acg_process_cpu_mask_store(struct scst_tgt *tgt,
list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
int i;
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct scst_tgt_dev *tgt_dev;
struct list_head *head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
int rc;
@@ -2017,6 +2014,8 @@ static int __scst_acg_process_cpu_mask_store(struct scst_tgt *tgt,
" failed: %d", rc);
}
}
rcu_read_unlock();
if (tgt->tgtt->report_aen != NULL) {
struct scst_aen *aen;
int rc;
@@ -4447,24 +4446,20 @@ static int scst_sysfs_sess_get_active_commands(struct scst_session *sess)
TRACE_ENTRY();
res = mutex_lock_interruptible(&scst_mutex);
if (res != 0)
goto out_put;
rcu_read_lock();
for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
struct list_head *head = &sess->sess_tgt_dev_list[t];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
}
}
mutex_unlock(&scst_mutex);
rcu_read_unlock();
res = active_cmds;
out_put:
kobject_put(&sess->sess_kobj);
TRACE_EXIT_RES(res);
@@ -4517,15 +4512,12 @@ static int scst_sysfs_sess_get_dif_checks_failed_work_fn(struct scst_sysfs_work_
TRACE_ENTRY();
res = mutex_lock_interruptible(&scst_mutex);
if (res != 0)
goto out_put;
rcu_read_lock();
for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
struct list_head *head = &sess->sess_tgt_dev_list[t];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head, sess_tgt_dev_list_entry) {
app_failed_tgt += atomic_read(&tgt_dev->tgt_dev_dif_app_failed_tgt);
ref_failed_tgt += atomic_read(&tgt_dev->tgt_dev_dif_ref_failed_tgt);
guard_failed_tgt += atomic_read(&tgt_dev->tgt_dev_dif_guard_failed_tgt);
@@ -4537,8 +4529,7 @@ static int scst_sysfs_sess_get_dif_checks_failed_work_fn(struct scst_sysfs_work_
guard_failed_dev += atomic_read(&tgt_dev->tgt_dev_dif_guard_failed_dev);
}
}
mutex_unlock(&scst_mutex);
rcu_read_unlock();
work->res_buf = kasprintf(GFP_KERNEL, "\tapp\tref\tguard\n"
"tgt\t%d\t%d\t%d\nscst\t%d\t%d\t%d\ndev\t%d\t%d\t%d\n",
@@ -4547,7 +4538,6 @@ static int scst_sysfs_sess_get_dif_checks_failed_work_fn(struct scst_sysfs_work_
app_failed_dev, ref_failed_dev, guard_failed_dev);
res = work->res_buf ? 0 : -ENOMEM;
out_put:
kobject_put(&sess->sess_kobj);
TRACE_EXIT_RES(res);
@@ -4598,15 +4588,13 @@ static int scst_sess_zero_dif_checks_failed(struct scst_sysfs_work_item *work)
PRINT_INFO("Zeroing DIF failures statistics for initiator "
"%s, target %s", sess->initiator_name, sess->tgt->tgt_name);
res = mutex_lock_interruptible(&scst_mutex);
if (res != 0)
goto out_put;
rcu_read_lock();
for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
struct list_head *head = &sess->sess_tgt_dev_list[t];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
atomic_set(&tgt_dev->tgt_dev_dif_app_failed_tgt, 0);
atomic_set(&tgt_dev->tgt_dev_dif_ref_failed_tgt, 0);
atomic_set(&tgt_dev->tgt_dev_dif_guard_failed_tgt, 0);
@@ -4618,12 +4606,10 @@ static int scst_sess_zero_dif_checks_failed(struct scst_sysfs_work_item *work)
atomic_set(&tgt_dev->tgt_dev_dif_guard_failed_dev, 0);
}
}
mutex_unlock(&scst_mutex);
rcu_read_unlock();
res = 0;
out_put:
kobject_put(&sess->sess_kobj);
TRACE_EXIT_RES(res);

View File

@@ -2283,14 +2283,14 @@ static int scst_report_luns_local(struct scst_cmd *cmd)
memset(buffer, 0, buffer_size);
offs = 8;
/*
* cmd won't allow to suspend activities, so we can access
* sess->sess_tgt_dev_list without any additional protection.
*/
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &cmd->sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
struct scst_tgt_dev_UA *ua;
if (!overflow) {
if ((buffer_size - offs) < 8) {
overflow = 1;
@@ -2303,34 +2303,8 @@ static int scst_report_luns_local(struct scst_cmd *cmd)
}
inc_dev_cnt:
dev_cnt++;
}
}
/* Set the response header */
dev_cnt *= 8;
put_unaligned_be32(dev_cnt, buffer);
scst_put_buf_full(cmd, buffer);
dev_cnt += 8;
if (dev_cnt < cmd->resp_data_len)
scst_set_resp_data_len(cmd, dev_cnt);
out_compl:
cmd->completed = 1;
/* Clear left sense_reported_luns_data_changed UA, if any. */
/*
* cmd won't allow to suspend activities, so we can access
* sess->sess_tgt_dev_list without any additional protection.
*/
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &cmd->sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
struct scst_tgt_dev_UA *ua;
/* Clear sense_reported_luns_data_changed UA. */
spin_lock_bh(&tgt_dev->tgt_dev_lock);
list_for_each_entry(ua, &tgt_dev->UA_list,
UA_list_entry) {
@@ -2348,6 +2322,20 @@ out_compl:
spin_unlock_bh(&tgt_dev->tgt_dev_lock);
}
}
rcu_read_unlock();
/* Set the response header */
dev_cnt *= 8;
put_unaligned_be32(dev_cnt, buffer);
scst_put_buf_full(cmd, buffer);
dev_cnt += 8;
if (dev_cnt < cmd->resp_data_len)
scst_set_resp_data_len(cmd, dev_cnt);
out_compl:
cmd->completed = 1;
/* Report the result */
cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
@@ -4513,6 +4501,7 @@ static int scst_pre_xmit_response1(struct scst_cmd *cmd)
* Those counters protect from not getting too long processing
* latency, so we should decrement them after cmd completed.
*/
smp_mb__before_atomic_dec();
atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
atomic_dec(&cmd->dev->dev_cmd_count);
@@ -4953,18 +4942,26 @@ out:
return;
}
/*
* Must be invoked either under RCU read lock or with sess->tgt_dev_list_mutex
* held.
*/
struct scst_tgt_dev *scst_lookup_tgt_dev(struct scst_session *sess, u64 lun)
{
struct list_head *head;
struct scst_tgt_dev *tgt_dev;
#ifdef CONFIG_SCST_EXTRACHECKS
if (scst_get_cmd_counter() == 0)
lockdep_assert_held(&scst_mutex);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
#if defined(CONFIG_SCST_EXTRACHECKS) && defined(CONFIG_PREEMPT_RCU) && \
defined(CONFIG_DEBUG_LOCK_ALLOC)
WARN_ON_ONCE(debug_locks &&
!lockdep_is_held(&sess->tgt_dev_list_mutex) &&
rcu_preempt_depth() == 0);
#endif
#endif
head = &sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(lun)];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head, sess_tgt_dev_list_entry) {
if (tgt_dev->lun == lun)
return tgt_dev;
}
@@ -4993,7 +4990,11 @@ static int scst_translate_lun(struct scst_cmd *cmd)
TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
(unsigned long long)cmd->lun);
res = -1;
rcu_read_lock();
tgt_dev = scst_lookup_tgt_dev(cmd->sess, cmd->lun);
rcu_read_unlock();
if (tgt_dev) {
TRACE_DBG("tgt_dev %p found", tgt_dev);
@@ -5852,7 +5853,10 @@ static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
if (unlikely(res != 0))
goto out;
rcu_read_lock();
tgt_dev = scst_lookup_tgt_dev(mcmd->sess, mcmd->lun);
rcu_read_unlock();
if (tgt_dev) {
TRACE_DBG("tgt_dev %p found", tgt_dev);
mcmd->mcmd_tgt_dev = tgt_dev;
@@ -6541,8 +6545,10 @@ static bool scst_is_cmd_belongs_to_dev(struct scst_cmd *cmd,
TRACE_DBG("Finding match for dev %s and cmd %p (lun %lld)",
dev->virt_name, cmd, (unsigned long long)cmd->lun);
rcu_read_lock();
tgt_dev = scst_lookup_tgt_dev(cmd->sess, cmd->lun);
res = tgt_dev && tgt_dev->dev == dev;
rcu_read_unlock();
TRACE_EXIT_HRES(res);
return res;
@@ -6919,7 +6925,6 @@ static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
return res;
}
/* scst_mutex supposed to be held */
static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
{
int i;
@@ -6928,14 +6933,17 @@ static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
TRACE_ENTRY();
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
scst_nexus_loss(tgt_dev,
(mcmd->fn != SCST_UNREG_SESS_TM));
}
}
rcu_read_unlock();
TRACE_EXIT();
return;
@@ -6960,12 +6968,12 @@ static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
sess, mcmd);
}
mutex_lock(&scst_mutex);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
__scst_abort_task_set(mcmd, tgt_dev);
scst_call_dev_task_mgmt_fn_received(mcmd, tgt_dev);
@@ -6985,10 +6993,9 @@ static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
}
}
}
rcu_read_unlock();
__scst_unblock_aborted_cmds(NULL, sess, NULL);
mutex_unlock(&scst_mutex);
scst_unblock_aborted_cmds(NULL, sess, NULL);
res = scst_set_mcmd_next_state(mcmd);
@@ -6996,7 +7003,6 @@ static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
return res;
}
/* scst_mutex supposed to be held */
static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
{
int i;
@@ -7005,17 +7011,19 @@ static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
TRACE_ENTRY();
rcu_read_lock();
list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
scst_nexus_loss(tgt_dev, true);
}
}
}
rcu_read_unlock();
TRACE_EXIT();
return;
@@ -7041,12 +7049,13 @@ static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
mutex_lock(&scst_mutex);
rcu_read_lock();
list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
__scst_abort_task_set(mcmd, tgt_dev);
@@ -7069,6 +7078,7 @@ static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
}
}
}
rcu_read_unlock();
__scst_unblock_aborted_cmds(tgt, NULL, NULL);
@@ -7329,8 +7339,6 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
TRACE_ENTRY();
mutex_lock(&scst_mutex);
switch (mcmd->fn) {
case SCST_NEXUS_LOSS_SESS:
case SCST_UNREG_SESS_TM:
@@ -7342,8 +7350,6 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
break;
}
mutex_unlock(&scst_mutex);
if (!mcmd->task_mgmt_fn_received_called)
goto tgt_done;
@@ -7363,6 +7369,7 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
struct scst_acg_dev *acg_dev;
mutex_lock(&scst_mutex);
rcu_read_lock();
list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
dev = acg_dev->dev;
list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
@@ -7373,6 +7380,7 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
}
}
}
rcu_read_unlock();
mutex_unlock(&scst_mutex);
break;
}
@@ -7380,15 +7388,15 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
case SCST_ABORT_ALL_TASKS_SESS:
case SCST_NEXUS_LOSS_SESS:
case SCST_UNREG_SESS_TM:
mutex_lock(&scst_mutex);
rcu_read_lock();
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &sess->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
list_for_each_entry_rcu(tgt_dev, head, sess_tgt_dev_list_entry) {
scst_call_dev_task_mgmt_fn_done(mcmd, tgt_dev);
}
}
mutex_unlock(&scst_mutex);
rcu_read_unlock();
break;
case SCST_ABORT_ALL_TASKS:
@@ -7398,11 +7406,12 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
struct scst_tgt *tgt = sess->tgt;
mutex_lock(&scst_mutex);
rcu_read_lock();
list_for_each_entry(s, &tgt->sess_list, sess_list_entry) {
for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
struct list_head *head = &s->sess_tgt_dev_list[i];
list_for_each_entry(tgt_dev, head,
list_for_each_entry_rcu(tgt_dev, head,
sess_tgt_dev_list_entry) {
if (mcmd->sess == tgt_dev->sess)
scst_call_dev_task_mgmt_fn_done(
@@ -7410,6 +7419,7 @@ static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
}
}
}
rcu_read_unlock();
mutex_unlock(&scst_mutex);
break;
}