scst: Use a per-cpu counter to track the number of commands per device

git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@8027 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Bart Van Assche
2019-03-10 01:24:43 +00:00
parent 53fdc293e2
commit 759bdca73a
3 changed files with 54 additions and 14 deletions

View File

@@ -2876,11 +2876,13 @@ struct scst_device {
/* Device lock */
spinlock_t dev_lock ____cacheline_aligned_in_smp;
#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
/* How many cmds alive on this dev */
atomic_t dev_cmd_count;
#endif
/* One more than the number of commands associated with this device. */
struct percpu_ref dev_cmd_count;
struct work_struct free_work;
struct completion *dev_freed_cmpl;
/*
* Maximum count of uncompleted commands that an initiator could
* queue on this device. Then it will start getting TASK QUEUE FULL

View File

@@ -4181,6 +4181,32 @@ static int scst_dif_none_type1(struct scst_cmd *cmd);
#define scst_dif_none_type1 scst_dif_none
#endif
/* Called from thread context and hence may sleep. */
static void scst_finally_free_device(struct work_struct *work)
{
struct scst_device *dev = container_of(work, typeof(*dev),
free_work);
struct completion *c = dev->dev_freed_cmpl;
scst_pr_cleanup(dev);
kfree(dev->virt_name);
percpu_ref_exit(&dev->dev_cmd_count);
kmem_cache_free(scst_dev_cachep, dev);
if (c)
complete(c);
}
/* RCU callback. Must not sleep. */
static void scst_release_device(struct percpu_ref *ref)
{
struct scst_device *dev;
dev = container_of(ref, typeof(*dev), dev_cmd_count);
schedule_work(&dev->free_work);
}
int scst_alloc_device(gfp_t gfp_mask, int nodeid, struct scst_device **out_dev)
{
struct scst_device *dev;
@@ -4199,8 +4225,13 @@ int scst_alloc_device(gfp_t gfp_mask, int nodeid, struct scst_device **out_dev)
memset(dev, 0, sizeof(*dev));
dev->handler = &scst_null_devtype;
#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
atomic_set(&dev->dev_cmd_count, 0);
INIT_WORK(&dev->free_work, scst_finally_free_device);
res = percpu_ref_init(&dev->dev_cmd_count, scst_release_device,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
if (res < 0)
goto free_dev;
#ifndef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
percpu_ref_switch_to_percpu(&dev->dev_cmd_count);
#endif
scst_init_mem_lim(&dev->dev_mem_lim);
spin_lock_init(&dev->dev_lock);
@@ -4234,10 +4265,16 @@ int scst_alloc_device(gfp_t gfp_mask, int nodeid, struct scst_device **out_dev)
out:
TRACE_EXIT_RES(res);
return res;
free_dev:
kmem_cache_free(scst_dev_cachep, dev);
goto out;
}
void scst_free_device(struct scst_device *dev)
{
DECLARE_COMPLETION_ONSTACK(c);
TRACE_ENTRY();
EXTRACHECKS_BUG_ON(dev->dev_scsi_atomic_cmd_active != 0);
@@ -4257,11 +4294,11 @@ void scst_free_device(struct scst_device *dev)
scst_deinit_threads(&dev->dev_cmd_threads);
scst_pr_cleanup(dev);
kfree(dev->virt_name);
kmem_cache_free(scst_dev_cachep, dev);
dev->dev_freed_cmpl = &c;
percpu_ref_kill(&dev->dev_cmd_count);
wait_for_completion(&c);
TRACE_EXIT();
return;
}

View File

@@ -4503,9 +4503,7 @@ static int scst_pre_xmit_response1(struct scst_cmd *cmd)
*/
smp_mb__before_atomic_dec();
atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
atomic_dec(&cmd->dev->dev_cmd_count);
#endif
percpu_ref_put(&cmd->dev->dev_cmd_count);
if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
scst_on_hq_cmd_response(cmd);
else if (unlikely(!cmd->sent_for_exec)) {
@@ -5134,6 +5132,7 @@ static int __scst_init_cmd(struct scst_cmd *cmd)
if (likely(res == 0)) {
struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
struct scst_device *dev = cmd->dev;
unsigned long __percpu *a __maybe_unused;
bool failure = false;
int cnt;
@@ -5149,8 +5148,10 @@ static int __scst_init_cmd(struct scst_cmd *cmd)
failure = true;
}
percpu_ref_get(&dev->dev_cmd_count);
#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
cnt = atomic_inc_return(&dev->dev_cmd_count);
sBUG_ON(__ref_is_percpu(&dev->dev_cmd_count, &a));
cnt = atomic_long_read(&dev->dev_cmd_count.count);
if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
if (!failure) {
TRACE(TRACE_FLOW_CONTROL,