Internal threads management reimplemented based on kthread*() API. Mostly done by Ming Zhang.

git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@58 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2006-12-14 17:50:57 +00:00
parent f1e2c33ac7
commit b4e6b71a90
6 changed files with 241 additions and 236 deletions

View File

@@ -2190,8 +2190,8 @@ struct proc_dir_entry *scst_create_proc_entry(struct proc_dir_entry * root,
* Adds and deletes (stops) num SCST's threads. Returns 0 on success,
* error code otherwise.
*/
int scst_add_threads(int num);
void scst_del_threads(int num);
int scst_add_cmd_threads(int num);
void scst_del_cmd_threads(int num);
void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
unsigned int len);

View File

@@ -34,6 +34,7 @@
#include <linux/writeback.h>
#include <linux/vmalloc.h>
#include <asm/atomic.h>
#include <linux/kthread.h>
#define LOG_PREFIX "dev_fileio"
#include "scst_debug.h"
@@ -107,15 +108,13 @@ struct scst_fileio_dev {
struct scst_fileio_tgt_dev {
spinlock_t fdev_lock;
enum scst_cmd_queue_type last_write_cmd_queue_type;
int shutdown;
struct file *fd;
struct iovec *iv;
int iv_count;
struct list_head fdev_cmd_list;
wait_queue_head_t fdev_waitQ;
struct task_struct *cmd_thread;
struct scst_fileio_dev *virt_dev;
atomic_t threads_count;
struct semaphore shutdown_mutex;
struct list_head ftgt_list_entry;
};
@@ -198,8 +197,6 @@ static char *cdrom_fileio_proc_help_string =
"echo \"open|change|close NAME [FILE_NAME]\" "
">/proc/scsi_tgt/" CDROM_FILEIO_NAME "/" CDROM_FILEIO_NAME "\n";
#define FILEIO_THREAD_FLAGS CLONE_KERNEL
/**************************************************************
* Function: fileio_open
*
@@ -631,7 +628,7 @@ out:
static inline int test_cmd_list(struct scst_fileio_tgt_dev *ftgt_dev)
{
int res = !list_empty(&ftgt_dev->fdev_cmd_list) ||
unlikely(ftgt_dev->shutdown);
unlikely(kthread_should_stop());
return res;
}
@@ -641,13 +638,11 @@ static int fileio_cmd_thread(void *arg)
TRACE_ENTRY();
daemonize("scst_fileio");
recalc_sigpending();
set_user_nice(current, 10);
current->flags |= PF_NOFREEZE;
spin_lock_bh(&ftgt_dev->fdev_lock);
while (1) {
while (!kthread_should_stop()) {
wait_queue_t wait;
struct scst_cmd *cmd;
init_waitqueue_entry(&wait, current);
@@ -674,20 +669,15 @@ static int fileio_cmd_thread(void *arg)
spin_unlock_bh(&ftgt_dev->fdev_lock);
fileio_do_job(cmd);
spin_lock_bh(&ftgt_dev->fdev_lock);
if (unlikely(ftgt_dev->shutdown))
break;
}
if (unlikely(ftgt_dev->shutdown))
break;
}
spin_unlock_bh(&ftgt_dev->fdev_lock);
if (atomic_dec_and_test(&ftgt_dev->threads_count)) {
smp_mb__after_atomic_dec();
TRACE_DBG("%s", "Releasing shutdown_mutex");
up(&ftgt_dev->shutdown_mutex);
}
/*
* If kthread_should_stop() is true, we are guaranteed to be in
* suspended activity state, so fdev_cmd_list must be empty.
*/
sBUG_ON(!list_empty(&ftgt_dev->fdev_cmd_list));
TRACE_EXIT();
return 0;
@@ -713,8 +703,6 @@ static int fileio_attach_tgt(struct scst_tgt_dev *tgt_dev)
spin_lock_init(&ftgt_dev->fdev_lock);
INIT_LIST_HEAD(&ftgt_dev->fdev_cmd_list);
init_waitqueue_head(&ftgt_dev->fdev_waitQ);
atomic_set(&ftgt_dev->threads_count, 0);
init_MUTEX_LOCKED(&ftgt_dev->shutdown_mutex);
ftgt_dev->virt_dev = virt_dev;
if (!virt_dev->cdrom_empty) {
@@ -732,13 +720,12 @@ static int fileio_attach_tgt(struct scst_tgt_dev *tgt_dev)
* Only ONE thread must be run here, otherwise the commands could
* be executed out of order !!
*/
res = kernel_thread(fileio_cmd_thread, ftgt_dev, FILEIO_THREAD_FLAGS);
if (res < 0) {
PRINT_ERROR_PR("kernel_thread() failed: %d", res);
ftgt_dev->cmd_thread = kthread_run(fileio_cmd_thread, ftgt_dev, "scst_fileio");
if (IS_ERR(ftgt_dev->cmd_thread)) {
PRINT_ERROR_PR("kthread_run failed to create %s", "scst_fileio");
res = PTR_ERR(ftgt_dev->cmd_thread);
goto out_free_close;
}
res = 0;
atomic_inc(&ftgt_dev->threads_count);
tgt_dev->dh_priv = ftgt_dev;
@@ -773,9 +760,7 @@ static void fileio_detach_tgt(struct scst_tgt_dev *tgt_dev)
list_del(&ftgt_dev->ftgt_list_entry);
up(&virt_dev->ftgt_list_mutex);
ftgt_dev->shutdown = 1;
wake_up_all(&ftgt_dev->fdev_waitQ);
down(&ftgt_dev->shutdown_mutex);
kthread_stop(ftgt_dev->cmd_thread);
if (ftgt_dev->fd)
filp_close(ftgt_dev->fd, NULL);
@@ -2800,9 +2785,7 @@ static int cdrom_fileio_change(char *p, char *name)
virt_dev->media_changed = 1;
down(&virt_dev->ftgt_list_mutex);
list_for_each_entry(ftgt_dev, &virt_dev->ftgt_list,
ftgt_list_entry)
{
list_for_each_entry(ftgt_dev, &virt_dev->ftgt_list, ftgt_list_entry) {
if (!virt_dev->cdrom_empty) {
fd = fileio_open(virt_dev);
if (IS_ERR(fd)) {
@@ -3116,12 +3099,7 @@ static void __exit exit_scst_fileio_driver(void)
* Wait for one sec. to allow the thread(s) actually exit,
* otherwise we can get Oops. Any better way?
*/
{
unsigned long t = jiffies;
TRACE_DBG("%s", "Waiting 1 sec...");
while ((jiffies - t) < HZ)
schedule();
}
schedule_timeout(HZ);
}
module_init(init_scst_fileio_driver);

View File

@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <asm/unistd.h>
#include <asm/string.h>
#include <linux/kthread.h>
#include "scst_debug.h"
#include "scsi_tgt.h"
@@ -93,12 +94,9 @@ DECLARE_WAIT_QUEUE_HEAD(scst_mgmt_waitQ);
spinlock_t scst_mgmt_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(scst_sess_mgmt_list);
struct semaphore *scst_shutdown_mutex = NULL;
static int scst_threads;
struct scst_threads_info_t scst_threads_info;
int scst_threads;
atomic_t scst_threads_count = ATOMIC_INIT(0);
int scst_shut_threads_count;
int scst_thread_num;
static int suspend_count;
int scst_virt_dev_last_id = 1; /* protected by scst_mutex */
@@ -832,21 +830,87 @@ out_err_detach_tgt:
goto out_resume;
}
int scst_add_threads(int num)
int scst_cmd_threads_count(void)
{
int res = 0, i;
int i;
/* Just to lower the race window, when user can get just changed value */
down(&scst_threads_info.cmd_threads_mutex);
i = scst_threads_info.nr_cmd_threads;
up(&scst_threads_info.cmd_threads_mutex);
return i;
}
static void scst_threads_info_init(void)
{
memset(&scst_threads_info, 0, sizeof(scst_threads_info));
init_MUTEX(&scst_threads_info.cmd_threads_mutex);
INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
}
/* scst_threads_info.cmd_threads_mutex supposed to be held */
void __scst_del_cmd_threads(int num)
{
struct scst_cmd_thread_t *ct, *tmp;
int i;
TRACE_ENTRY();
for (i = 0; i < num; i++) {
res = kernel_thread(scst_cmd_thread, 0, SCST_THREAD_FLAGS);
if (res < 0) {
PRINT_ERROR_PR("kernel_thread() failed: %d", res);
goto out_error;
}
atomic_inc(&scst_threads_count);
i = scst_threads_info.nr_cmd_threads;
if (num <= 0 || num > i) {
PRINT_ERROR_PR("can not del %d cmd threads from %d", num, i);
return;
}
list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
thread_list_entry) {
int res;
res = kthread_stop(ct->cmd_thread);
if (res < 0) {
TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
}
list_del(&ct->thread_list_entry);
kfree(ct);
scst_threads_info.nr_cmd_threads--;
--num;
if (num == 0)
break;
}
TRACE_EXIT();
return;
}
/* scst_threads_info.cmd_threads_mutex supposed to be held */
int __scst_add_cmd_threads(int num)
{
int res = 0, i;
static int scst_thread_num = 0;
TRACE_ENTRY();
for (i = 0; i < num; i++) {
struct scst_cmd_thread_t *thr;
thr = kmalloc(sizeof(*thr), GFP_KERNEL);
if (!thr) {
res = -ENOMEM;
PRINT_ERROR_PR("fail to allocate thr %d", res);
goto out_error;
}
thr->cmd_thread = kthread_run(scst_cmd_thread, 0, "scsi_tgt%d",
scst_thread_num++);
if (IS_ERR(thr->cmd_thread)) {
res = PTR_ERR(thr->cmd_thread);
PRINT_ERROR_PR("kthread_create() failed: %d", res);
kfree(thr);
goto out_error;
}
list_add(&thr->thread_list_entry,
&scst_threads_info.cmd_threads_list);
scst_threads_info.nr_cmd_threads++;
}
res = 0;
out:
@@ -855,24 +919,87 @@ out:
out_error:
if (i > 0)
scst_del_threads(i-1);
__scst_del_cmd_threads(i - 1);
goto out;
}
void scst_del_threads(int num)
int scst_add_cmd_threads(int num)
{
int res;
TRACE_ENTRY();
down(&scst_threads_info.cmd_threads_mutex);
res = __scst_add_cmd_threads(num);
up(&scst_threads_info.cmd_threads_mutex);
TRACE_EXIT_RES(res);
return res;
}
void scst_del_cmd_threads(int num)
{
TRACE_ENTRY();
spin_lock_irq(&scst_list_lock);
scst_shut_threads_count += num;
spin_unlock_irq(&scst_list_lock);
wake_up_nr(&scst_list_waitQ, num);
down(&scst_threads_info.cmd_threads_mutex);
__scst_del_cmd_threads(num);
up(&scst_threads_info.cmd_threads_mutex);
TRACE_EXIT();
return;
}
static void scst_stop_all_threads(void)
{
TRACE_ENTRY();
down(&scst_threads_info.cmd_threads_mutex);
__scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
if (scst_threads_info.mgmt_cmd_thread)
kthread_stop(scst_threads_info.mgmt_cmd_thread);
if (scst_threads_info.mgmt_thread)
kthread_stop(scst_threads_info.mgmt_thread);
up(&scst_threads_info.cmd_threads_mutex);
TRACE_EXIT();
return;
}
static int scst_start_all_threads(int num)
{
int res;
TRACE_ENTRY();
down(&scst_threads_info.cmd_threads_mutex);
res = __scst_add_cmd_threads(num);
if (res < 0)
goto out;
scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
NULL, "scsi_tgt_mc");
if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
PRINT_ERROR_PR("kthread_create() for mcmd failed: %d", res);
scst_threads_info.mgmt_cmd_thread = NULL;
goto out;
}
scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
NULL, "scsi_tgt_mgmt");
if (IS_ERR(scst_threads_info.mgmt_thread)) {
res = PTR_ERR(scst_threads_info.mgmt_thread);
PRINT_ERROR_PR("kthread_create() for mgmt failed: %d", res);
scst_threads_info.mgmt_thread = NULL;
goto out;
}
out:
up(&scst_threads_info.cmd_threads_mutex);
TRACE_EXIT_RES(res);
return res;
}
void scst_get(void)
{
scst_inc_cmd_count();
@@ -969,10 +1096,11 @@ static int __init init_scst(void)
if (scst_threads < scst_num_cpus) {
PRINT_ERROR_PR("%s", "scst_threads can not be less than "
"CPUs count");
res = -EFAULT;
goto out;
scst_threads = scst_num_cpus;
}
scst_threads_info_init();
#define INIT_CACHEP(p, s, t, o) do { \
p = kmem_cache_create(s, sizeof(struct t), 0, \
SCST_SLAB_FLAGS, NULL, NULL); \
@@ -1024,38 +1152,19 @@ static int __init init_scst(void)
scst_scsi_op_list_init();
res = scst_proc_init_module();
if (res != 0)
goto out_unreg_interface;
for (i = 0; i < sizeof(scst_tasklets)/sizeof(scst_tasklets[0]); i++)
tasklet_init(&scst_tasklets[i], (void *)scst_cmd_tasklet, 0);
TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
scst_threads);
for (i = 0; i < scst_threads; i++) {
res = kernel_thread(scst_cmd_thread, NULL, SCST_THREAD_FLAGS);
if (res < 0) {
PRINT_ERROR_PR("kernel_thread() failed: %d", res);
goto out_thread_free;
}
atomic_inc(&scst_threads_count);
}
for (i = 0; i < sizeof(scst_tasklets) / sizeof(scst_tasklets[0]); i++)
tasklet_init(&scst_tasklets[i], (void *)scst_cmd_tasklet, 0);
res = kernel_thread(scst_mgmt_cmd_thread, NULL, SCST_THREAD_FLAGS);
if (res < 0) {
PRINT_ERROR_PR("kernel_thread() for mcmd failed: %d", res);
res = scst_start_all_threads(scst_threads);
if (res < 0)
goto out_thread_free;
}
atomic_inc(&scst_threads_count);
res = kernel_thread(scst_mgmt_thread, NULL, SCST_THREAD_FLAGS);
if (res < 0) {
PRINT_ERROR_PR("kernel_thread() for mgmt failed: %d", res);
res = scst_proc_init_module();
if (res != 0)
goto out_thread_free;
}
atomic_inc(&scst_threads_count);
if (scst_max_cmd_mem == 0) {
struct sysinfo si;
@@ -1079,24 +1188,8 @@ out:
return res;
out_thread_free:
if (atomic_read(&scst_threads_count)) {
DECLARE_MUTEX_LOCKED(shm);
scst_shutdown_mutex = &shm;
smp_mb();
set_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
scst_stop_all_threads();
wake_up_all(&scst_list_waitQ);
wake_up_all(&scst_mgmt_cmd_list_waitQ);
wake_up_all(&scst_mgmt_waitQ);
TRACE_DBG("Waiting for %d threads to complete",
atomic_read(&scst_threads_count));
down(&shm);
}
scst_proc_cleanup_module();
out_unreg_interface:
scsi_unregister_interface(&scst_interface);
out_free_acg:
@@ -1142,37 +1235,15 @@ static void __exit exit_scst(void)
/* ToDo: unregister_cpu_notifier() */
scst_shutdown_mutex = &shm;
smp_mb();
set_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
wake_up_all(&scst_list_waitQ);
wake_up_all(&scst_mgmt_cmd_list_waitQ);
wake_up_all(&scst_mgmt_waitQ);
if (atomic_read(&scst_threads_count)) {
TRACE_DBG("Waiting for %d threads to complete",
atomic_read(&scst_threads_count));
down(&shm);
/*
* Wait for one sec. to allow the thread(s) actually exit,
* otherwise we can get Oops. Any better way?
*/
{
unsigned long t = jiffies;
TRACE_DBG("%s", "Waiting 1 sec...");
while((jiffies - t) < HZ)
schedule();
}
}
if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
cancel_delayed_work(&scst_cmd_mem_work);
flush_scheduled_work();
}
scst_proc_cleanup_module();
scst_stop_all_threads();
scsi_unregister_interface(&scst_interface);
scst_destroy_acg(scst_default_acg);
@@ -1246,8 +1317,8 @@ EXPORT_SYMBOL(scst_find_cmd_by_tag);
EXPORT_SYMBOL(scst_suspend_activity);
EXPORT_SYMBOL(scst_resume_activity);
EXPORT_SYMBOL(scst_add_threads);
EXPORT_SYMBOL(scst_del_threads);
EXPORT_SYMBOL(scst_add_cmd_threads);
EXPORT_SYMBOL(scst_del_cmd_threads);
#if defined(DEBUG) || defined(TRACING)
EXPORT_SYMBOL(scst_proc_log_entry_read);

View File

@@ -66,13 +66,6 @@
/* Set if new commands initialization should be suspended for a while */
#define SCST_FLAG_SUSPENDED 0
/*
* If set, SCST's threads exit immediately not performing any
* sessions' shutdown tasks, therefore at this point all the sessions
* must be already down.
*/
#define SCST_FLAG_SHUTDOWN 1
/* Set if a TM command is being performed */
#define SCST_FLAG_TM_ACTIVE 2
@@ -96,8 +89,6 @@
**/
#define SCST_MAX_DEVICE_COMMANDS 128
#define SCST_THREAD_FLAGS CLONE_KERNEL
#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
#define SCST_CMD_MEM_TIMEOUT (120*HZ)
@@ -170,12 +161,23 @@ extern wait_queue_head_t scst_mgmt_waitQ;
extern spinlock_t scst_mgmt_lock;
extern struct list_head scst_sess_mgmt_list;
extern int scst_threads;
extern int scst_shut_threads_count;
extern atomic_t scst_threads_count;
extern int scst_thread_num;
struct scst_cmd_thread_t {
struct task_struct *cmd_thread;
struct list_head thread_list_entry;
};
extern struct semaphore *scst_shutdown_mutex;
struct scst_threads_info_t {
struct semaphore cmd_threads_mutex;
u32 nr_cmd_threads;
struct list_head cmd_threads_list;
struct task_struct *mgmt_thread;
struct task_struct *mgmt_cmd_thread;
};
extern struct scst_threads_info_t scst_threads_info;
extern int scst_cmd_threads_count(void);
extern int __scst_add_cmd_threads(int num);
extern void __scst_del_cmd_threads(int num);
extern spinlock_t scst_temp_UA_lock;
extern uint8_t scst_temp_UA[SCSI_SENSE_BUFFERSIZE];

View File

@@ -770,25 +770,26 @@ static int scst_proc_threads_write(struct file *file, const char __user *buf,
goto out_free;
}
oldtn = atomic_read(&scst_threads_count);
newtn = simple_strtoul(buffer, NULL, 0) + 2; /* 2 mgmt threads */
down(&scst_threads_info.cmd_threads_mutex);
oldtn = scst_threads_info.nr_cmd_threads;
newtn = simple_strtoul(buffer, NULL, 0);
if (newtn <= 0) {
PRINT_ERROR_PR("Illegal threads num value %d", newtn);
res = -EINVAL;
goto out_up_free;
goto out_up_thr_free;
}
delta = newtn - oldtn;
if (delta < 0) {
scst_del_threads(-delta);
}
else {
scst_add_threads(delta);
}
if (delta < 0)
__scst_del_cmd_threads(-delta);
else
__scst_add_cmd_threads(delta);
PRINT_INFO_PR("Changed threads num: old %d, new %d(%d)", oldtn, newtn,
atomic_read(&scst_threads_count));
PRINT_INFO_PR("Changed cmd threads num: old %d, new %d", oldtn, newtn);
out_up_thr_free:
up(&scst_threads_info.cmd_threads_mutex);
out_up_free:
up(&scst_proc_mutex);
out_free:
@@ -1925,8 +1926,7 @@ static int scst_threads_info_show(struct seq_file *seq, void *v)
{
TRACE_ENTRY();
/* 2 mgmt threads */
seq_printf(seq, "%d\n", atomic_read(&scst_threads_count) - 2);
seq_printf(seq, "%d\n", scst_cmd_threads_count());
TRACE_EXIT();
return 0;

View File

@@ -25,6 +25,7 @@
#include <linux/smp_lock.h>
#include <asm/unistd.h>
#include <asm/string.h>
#include <linux/kthread.h>
#include "scst_debug.h"
#include "scsi_tgt.h"
@@ -80,21 +81,7 @@ static inline void scst_schedule_tasklet(void)
{
struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
#if 0 /* Looks like #else is better for performance */
if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
tasklet_schedule(t);
else {
/*
* We suppose that other CPU(s) are rather idle, so we
* ask one of them to help
*/
TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
"instead", smp_processor_id());
wake_up(&scst_list_waitQ);
}
#else
tasklet_schedule(t);
#endif
}
/*
@@ -2683,29 +2670,20 @@ static inline int test_cmd_lists(void)
int res = !list_empty(&scst_active_cmd_list) ||
(!list_empty(&scst_init_cmd_list) &&
!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
unlikely(scst_shut_threads_count > 0) ||
unlikely(kthread_should_stop()) ||
tm_dbg_is_release();
return res;
}
int scst_cmd_thread(void *arg)
{
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
int n;
TRACE_ENTRY();
spin_lock(&lock);
n = scst_thread_num++;
spin_unlock(&lock);
daemonize("scsi_tgt%d", n);
recalc_sigpending();
set_user_nice(current, 10);
current->flags |= PF_NOFREEZE;
spin_lock_irq(&scst_list_lock);
while (1) {
while (!kthread_should_stop()) {
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
@@ -2725,26 +2703,18 @@ int scst_cmd_thread(void *arg)
scst_do_job_init();
scst_do_job_active(SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
list_empty(&scst_cmd_list) &&
list_empty(&scst_active_cmd_list) &&
list_empty(&scst_init_cmd_list)) {
break;
}
if (unlikely(scst_shut_threads_count > 0)) {
scst_shut_threads_count--;
break;
}
}
spin_unlock_irq(&scst_list_lock);
if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
smp_mb__after_atomic_dec();
TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
up(scst_shutdown_mutex);
}
/*
* If kthread_should_stop() is true, we are guaranteed to be either
* on the module unload, or there must be at least one other thread to
* process the commands lists.
*/
sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
(!list_empty(&scst_cmd_list) ||
!list_empty(&scst_active_cmd_list) ||
!list_empty(&scst_init_cmd_list)));
TRACE_EXIT();
return 0;
@@ -3548,7 +3518,7 @@ static inline int test_mgmt_cmd_list(void)
{
int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
unlikely(kthread_should_stop());
return res;
}
@@ -3558,12 +3528,10 @@ int scst_mgmt_cmd_thread(void *arg)
TRACE_ENTRY();
daemonize("scsi_tgt_mc");
recalc_sigpending();
current->flags |= PF_NOFREEZE;
spin_lock_irq(&scst_list_lock);
while (1) {
while(!kthread_should_stop()) {
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
@@ -3602,20 +3570,14 @@ int scst_mgmt_cmd_thread(void *arg)
&scst_active_mgmt_cmd_list);
}
}
if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
list_empty(&scst_active_mgmt_cmd_list))
{
break;
}
}
spin_unlock_irq(&scst_list_lock);
if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
smp_mb__after_atomic_dec();
TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
up(scst_shutdown_mutex);
}
/*
* If kthread_should_stop() is true, we are guaranteed to be
* on the module unload, so scst_active_mgmt_cmd_list must be empty.
*/
sBUG_ON(!list_empty(&scst_active_mgmt_cmd_list));
TRACE_EXIT();
return 0;
@@ -3977,7 +3939,7 @@ void scst_unregister_session(struct scst_session *sess, int wait,
static inline int test_mgmt_list(void)
{
int res = !list_empty(&scst_sess_mgmt_list) ||
test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
unlikely(kthread_should_stop());
return res;
}
@@ -3987,12 +3949,10 @@ int scst_mgmt_thread(void *arg)
TRACE_ENTRY();
daemonize("scsi_tgt_mgmt");
recalc_sigpending();
current->flags |= PF_NOFREEZE;
spin_lock_irq(&scst_mgmt_lock);
while (1) {
while(!kthread_should_stop()) {
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
@@ -4032,20 +3992,14 @@ restart:
spin_lock_irq(&scst_mgmt_lock);
goto restart;
}
if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
list_empty(&scst_sess_mgmt_list))
{
break;
}
}
spin_unlock_irq(&scst_mgmt_lock);
if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
smp_mb__after_atomic_dec();
TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
up(scst_shutdown_mutex);
}
/*
* If kthread_should_stop() is true, we are guaranteed to be
* on the module unload, so scst_sess_mgmt_list must be empty.
*/
sBUG_ON(!list_empty(&scst_sess_mgmt_list));
TRACE_EXIT();
return 0;