- ISCSI sending response timeout increased to 30 seconds

- Fixed 2 problems in scst_user on release() cleanup
 - Added per-device memory limit and new scst.ko module parameter scst_max_dev_cmd_mem
 - Cleanups, including important ones
 - Version changed to 1.0.0-rc1


git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@411 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2008-06-12 18:55:13 +00:00
parent 2cd7071377
commit da47dc7a6c
19 changed files with 423 additions and 238 deletions

View File

@@ -2,7 +2,7 @@
USER SPACE INTERFACE DESCRIPTION.
Version 0.9.6/4
Version 1.0.0
I. Description.

View File

@@ -1,8 +1,8 @@
iSCSI SCST target driver
========================
Version 0.9.6/XXXX, XX XXX 200X
-------------------------------
Version 1.0.0/0.4.16r151, XX June 2008
--------------------------------------
This driver is a forked with all respects version of iSCSI Enterprise
Target (IET) (http://iscsitarget.sourceforge.net/) with updates to work
@@ -21,7 +21,7 @@ simultaneously all the driver's modules and files were renamed:
* iscsi-target -> iscsi-scst
* iscsi-target.ko -> iscsi-scst.ko
This version is compatible with SCST version 0.9.6 and higher.
This version is compatible with SCST version 1.0.0 and higher.
Tested on 2.6.21.1 kernel, but it should also work on other versions,
starting from 2.6.16.x.

View File

@@ -13,4 +13,4 @@
* GNU General Public License for more details.
*/
#define ISCSI_VERSION_STRING "0.9.6/0.4.16r151"
#define ISCSI_VERSION_STRING "1.0.0/0.4.16r151"

View File

@@ -1569,7 +1569,7 @@ static void __cmnd_abort(struct iscsi_cmnd *cmnd)
* it. But, since this function can be called from any thread, not only
* from the read one, we at the moment can't do that, because of
* absence of appropriate locking protection. But this isn't a stuff
* for 0.9.6. So, currently a misbehaving initiator, not sending
* for 1.0.0. So, currently a misbehaving initiator, not sending
* data in R2T state for a sharing between targets device, for which
* for some reason an aborting TM command, e.g. TARGET RESET, from
* another initiator is issued, can block response for this TM command

View File

@@ -324,7 +324,7 @@ struct iscsi_cmnd {
/* Flags for req_cmnd_release_force() */
#define ISCSI_FORCE_RELEASE_WRITE 1
#define ISCSI_RSP_TIMEOUT (7*HZ)
#define ISCSI_RSP_TIMEOUT (30 * HZ)
extern struct mutex target_mgmt_mutex;

View File

@@ -1,8 +1,8 @@
Target driver for Qlogic 2200/2300 Fibre Channel cards
======================================================
Version 0.9.6, XX XXX 200X
--------------------------
Version 1.0.0, XX June 2008
---------------------------
This driver has all required features and looks to be quite stable (for
beta) and useful. It consists from two parts: the target mode driver
@@ -13,7 +13,7 @@ all necessary callbacks, but it's still capable to work as initiator
only. Mode, when a host acts as the initiator and the target
simultaneously, is supported as well.
This version is compatible with SCST version 0.9.5 and higher.
This version is compatible with SCST version 1.0.0 and higher.
The original initiator driver was taken from the kernel 2.6.17.8.

View File

@@ -38,7 +38,7 @@
/* Version numbers, the same as for the kernel */
#define Q2T_VERSION(a,b,c,d) (((a) << 030) + ((b) << 020) + (c) << 010 + (d))
#define Q2T_VERSION_CODE Q2T_VERSION(0,9,6,0)
#define Q2T_VERSION_STRING "0.9.6-pre1"
#define Q2T_VERSION_STRING "1.0.0-rc1"
#define Q2T_MAX_CDB_LEN 16
#define Q2T_TIMEOUT 10 /* in seconds */

View File

@@ -1,8 +1,8 @@
Generic SCSI target mid-level for Linux (SCST)
==============================================
Version 0.9.6, XX XXX 200X
--------------------------
Version 1.0.0, XX June 2008
---------------------------
SCST is designed to provide unified, consistent interface between SCSI
target drivers and Linux kernel and simplify target drivers development

View File

@@ -50,7 +50,7 @@ typedef _Bool bool;
/* Version numbers, the same as for the kernel */
#define SCST_VERSION_CODE 0x00090601
#define SCST_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + d)
#define SCST_VERSION_STRING "0.9.6-rc1"
#define SCST_VERSION_STRING "1.0.0-rc1"
#define SCST_INTERFACE_VERSION SCST_VERSION_STRING "$Revision$" SCST_CONST_VERSION
/*************************************************************
@@ -98,7 +98,6 @@ typedef _Bool bool;
#define SCST_CMD_STATE_LAST_ACTIVE (SCST_CMD_STATE_FINISHED+100)
/* A cmd is created, but scst_cmd_init_done() not called */
#define SCST_CMD_STATE_INIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+1)
@@ -1302,6 +1301,17 @@ struct scst_mgmt_cmd {
void *tgt_priv;
};
struct scst_mem_lim {
/* How much memory allocated under this object */
atomic_t alloced_pages;
/*
* How much memory allowed to allocated under this object. Put here
* mostly to save a possible cache miss accessing scst_max_dev_cmd_mem.
*/
unsigned int max_allowed_pages;
};
struct scst_device {
struct scst_dev_type *handler; /* corresponding dev handler */
@@ -1317,6 +1327,8 @@ struct scst_device {
/* How many write cmds alive on this dev. Temporary, ToDo */
atomic_t write_cmd_count;
struct scst_mem_lim dev_mem_lim;
unsigned short type; /* SCSI type of the device */
/*************************************************************
@@ -2480,6 +2492,18 @@ void scst_process_active_cmd(struct scst_cmd *cmd, int context);
*/
int scst_check_local_events(struct scst_cmd *cmd);
/*
* Returns the next state of the SCSI target state machine in case if command's
* completed abnormally.
*/
int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd);
/*
* Sets state of the SCSI target state machine in case if command's completed
* abnormally.
*/
void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd);
/*
* Returns target driver's root entry in SCST's /proc hierarchy.
* The driver can create own files/directoryes here, which should
@@ -2566,8 +2590,8 @@ void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
unsigned int len);
/*
* Returnes a pseudo-random number for debugging purposes. Available only with
* DEBUG on
* Returnes a pseudo-random number for debugging purposes. Available only in
* the DEBUG build.
*/
unsigned long scst_random(void);
@@ -2578,16 +2602,6 @@ unsigned long scst_random(void);
*/
void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len);
/*
* Checks if total memory allocated by commands is less, than defined
* limit (scst_cur_max_cmd_mem) and returns 0, if it is so. Otherwise,
* returnes 1 and sets on cmd QUEUE FULL or BUSY status as well as
* SCST_CMD_STATE_PRE_XMIT_RESP state. Target drivers and dev handlers are
* required to call this function if they allocate data buffers on their
* own.
*/
int scst_check_mem(struct scst_cmd *cmd);
/*
* Get/put global ref counter that prevents from entering into suspended
* activities stage, so protects from any global management operations.
@@ -2661,12 +2675,14 @@ void sgv_pool_set_allocator(struct sgv_pool *pool,
void (*free_pages_fn)(struct scatterlist *, int, void *));
struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
unsigned long gfp_mask, int atomic, int *count,
struct sgv_pool_obj **sgv, void *priv);
void sgv_pool_free(struct sgv_pool_obj *sgv);
unsigned long gfp_mask, int flags, int *count,
struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv);
void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim);
void *sgv_get_priv(struct sgv_pool_obj *sgv);
void scst_init_mem_lim(struct scst_mem_lim *mem_lim);
/**
** Generic parse() support routines.
** Done via pointer on functions to avoid unneeded dereferences on

View File

@@ -26,7 +26,7 @@
#define DEV_USER_NAME "scst_user"
#define DEV_USER_PATH "/dev/"
#define DEV_USER_VERSION_NAME "0.9.6"
#define DEV_USER_VERSION_NAME "1.0.0-rc1"
#define DEV_USER_VERSION DEV_USER_VERSION_NAME "$Revision$" SCST_CONST_VERSION
/*

View File

@@ -51,7 +51,6 @@ struct scst_user_dev {
/* Protected by dev_rwsem or don't need any protection */
unsigned int blocking:1;
unsigned int cleanup_done:1;
unsigned int cleaning:1;
unsigned int tst:3;
unsigned int queue_alg:4;
unsigned int tas:1;
@@ -64,6 +63,7 @@ struct scst_user_dev {
int block;
int def_block;
struct scst_mem_lim udev_mem_lim;
struct sgv_pool *pool;
uint8_t parse_type;
@@ -84,8 +84,6 @@ struct scst_user_dev {
struct list_head dev_list_entry;
char name[SCST_MAX_NAME];
/* Protected by cleanup_lock */
unsigned char in_cleanup_list:1;
struct list_head cleanup_list_entry;
/* ToDo: make it on-stack */
struct completion cleanup_cmpl;
@@ -160,7 +158,6 @@ static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
unsigned long *flags);
static void dev_user_unjam_dev(struct scst_user_dev *dev);
static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
@@ -245,6 +242,7 @@ static inline void ucmd_get(struct scst_user_cmd *ucmd)
__ucmd_get(ucmd, false);
}
/* Must not be called under cmd_list_lock!! */
static inline void ucmd_put(struct scst_user_cmd *ucmd)
{
TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
@@ -517,7 +515,7 @@ static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
ucmd->buff_cached = cached_buff;
cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
&cmd->sg_cnt, &ucmd->sgv, ucmd);
&cmd->sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
if (cmd->sg != NULL) {
struct scst_user_cmd *buf_ucmd =
(struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
@@ -592,7 +590,7 @@ static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
PRINT_ERROR("Target driver %s requested own memory "
"allocation", ucmd->cmd->tgtt->name);
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
res = SCST_CMD_STATE_PRE_XMIT_RESP;
res = scst_get_cmd_abnormal_done_state(cmd);
goto out;
}
@@ -604,7 +602,7 @@ static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
goto out;
else if (rc < 0) {
scst_set_busy(cmd);
res = SCST_CMD_STATE_PRE_XMIT_RESP;
res = scst_get_cmd_abnormal_done_state(cmd);
goto out;
}
@@ -776,7 +774,7 @@ out_invalid:
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
out_error:
res = SCST_CMD_STATE_PRE_XMIT_RESP;
res = scst_get_cmd_abnormal_done_state(cmd);
goto out;
}
@@ -865,7 +863,7 @@ static int dev_user_exec(struct scst_cmd *cmd)
static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
{
if (ucmd->sgv != NULL) {
sgv_pool_free(ucmd->sgv);
sgv_pool_free(ucmd->sgv, &ucmd->dev->udev_mem_lim);
ucmd->sgv = NULL;
} else if (ucmd->data_pages != NULL) {
/* We mapped pages, but for some reason didn't allocate them */
@@ -1022,19 +1020,6 @@ static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
smp_mb();
if (unlikely(dev->cleaning)) {
spin_lock_irqsave(&cleanup_lock, flags);
if (!dev->in_cleanup_list) {
TRACE_DBG("Adding dev %p to the cleanup list (ucmd %p)",
dev, ucmd);
list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
dev->in_cleanup_list = 1;
wake_up(&cleanup_list_waitQ);
}
spin_unlock_irqrestore(&cleanup_lock, flags);
}
TRACE_EXIT();
return;
}
@@ -1090,7 +1075,7 @@ out_nomem:
/* go through */
out_err:
ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(ucmd->cmd);
goto out;
out_unmap:
@@ -1131,7 +1116,7 @@ static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
} else {
scst_set_busy(ucmd->cmd);
ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(ucmd->cmd);
}
out_process:
@@ -1142,7 +1127,7 @@ out_process:
out_hwerr:
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(ucmd->cmd);
res = -EINVAL;
goto out_process;
}
@@ -1193,7 +1178,7 @@ out_inval:
(long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
res = -EINVAL;
goto out_process;
}
@@ -1857,8 +1842,7 @@ static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
scst_set_cmd_error(ucmd->cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
}
ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(ucmd->cmd);
TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
list_add(&ucmd->cmd->cmd_list_entry,
@@ -1944,43 +1928,42 @@ out:
static void dev_user_unjam_dev(struct scst_user_dev *dev)
{
int i;
unsigned long flags;
struct scst_user_cmd *ucmd;
TRACE_ENTRY();
TRACE_MGMT_DBG("Unjamming dev %p", dev);
spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
repeat:
for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
struct list_head *head = &dev->ucmd_hash[i];
bool repeat = false;
list_for_each_entry(ucmd, head, hash_list_entry) {
if (!ucmd->sent_to_user)
continue;
if (ucmd_get_check(ucmd))
continue;
TRACE_DBG("ucmd %p, state %x, scst_cmd %p",
ucmd, ucmd->state, ucmd->cmd);
TRACE_MGMT_DBG("ucmd %p, state %x, scst_cmd %p", ucmd,
ucmd->state, ucmd->cmd);
if (ucmd->sent_to_user) {
dev_user_unjam_cmd(ucmd, 0, &flags);
repeat = true;
}
dev_user_unjam_cmd(ucmd, 0, NULL);
spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
ucmd_put(ucmd);
spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
if (repeat)
goto repeat;
goto repeat;
}
}
if (dev_user_process_scst_commands(dev) != 0)
goto repeat;
spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
TRACE_EXIT();
return;
@@ -2458,6 +2441,8 @@ static int dev_user_register_dev(struct file *file,
strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
dev->name[sizeof(dev->name)-1] = '\0';
scst_init_mem_lim(&dev->udev_mem_lim);
/*
* We don't use clustered pool, since it implies pages reordering,
* which isn't possible with user space supplied buffers. Although
@@ -2732,14 +2717,7 @@ static int dev_user_release(struct inode *inode, struct file *file)
down_write(&dev->dev_rwsem);
spin_lock_irq(&cleanup_lock);
dev->cleaning = 1;
smp_mb(); /* pair to dev_user_add_to_ready() */
sBUG_ON(dev->in_cleanup_list);
list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
dev->in_cleanup_list = 1;
spin_unlock_irq(&cleanup_lock);
wake_up(&cleanup_list_waitQ);
@@ -2753,14 +2731,6 @@ static int dev_user_release(struct inode *inode, struct file *file)
TRACE_DBG("Unregistering finished (dev %p)", dev);
dev->cleanup_done = 1;
smp_mb(); /* just in case */
spin_lock_irq(&cleanup_lock);
if (!dev->in_cleanup_list) {
list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
dev->in_cleanup_list = 1;
}
spin_unlock_irq(&cleanup_lock);
wake_up(&cleanup_list_waitQ);
wake_up(&dev->cmd_lists.cmd_list_waitQ);
@@ -2780,10 +2750,10 @@ out:
return res;
}
static void dev_user_process_cleanup(struct scst_user_dev *dev)
static int dev_user_process_cleanup(struct scst_user_dev *dev)
{
struct scst_user_cmd *ucmd;
int rc;
int rc, res = 1;
TRACE_ENTRY();
@@ -2795,12 +2765,11 @@ static void dev_user_process_cleanup(struct scst_user_dev *dev)
dev_user_unjam_dev(dev);
spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
smp_mb(); /* just in case, pair for dev_user_release()
* cleanup_done assignment.
*/
rc = dev_user_get_next_cmd(dev, &ucmd);
if (rc == 0)
dev_user_unjam_cmd(ucmd, 1, NULL);
spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
if (rc == -EAGAIN) {
@@ -2832,10 +2801,11 @@ again:
TRACE_DBG("Cleanuping done (dev %p)", dev);
complete_all(&dev->cleanup_cmpl);
res = 0;
out:
TRACE_EXIT();
return;
TRACE_EXIT_RES(res);
return res;
}
static inline int test_cleanup_list(void)
@@ -2847,8 +2817,6 @@ static inline int test_cleanup_list(void)
static int dev_user_cleanup_thread(void *arg)
{
struct scst_user_dev *dev;
TRACE_ENTRY();
PRINT_INFO("Cleanup thread started, PID %d", current->pid);
@@ -2874,16 +2842,45 @@ static int dev_user_cleanup_thread(void *arg)
remove_wait_queue(&cleanup_list_waitQ, &wait);
}
while (!list_empty(&cleanup_list)) {
dev = list_entry(cleanup_list.next, typeof(*dev),
cleanup_list_entry);
list_del(&dev->cleanup_list_entry);
dev->in_cleanup_list = 0;
/*
* We have to poll devices, because commands can go from SCST
* core on cmd_list_waitQ and we have no practical way to
* detect them.
*/
while (1) {
struct scst_user_dev *dev;
LIST_HEAD(cl_devs);
while (!list_empty(&cleanup_list)) {
int rc;
dev = list_entry(cleanup_list.next,
typeof(*dev), cleanup_list_entry);
list_del(&dev->cleanup_list_entry);
spin_unlock_irq(&cleanup_lock);
rc = dev_user_process_cleanup(dev);
spin_lock_irq(&cleanup_lock);
if (rc != 0)
list_add_tail(&dev->cleanup_list_entry,
&cl_devs);
}
if (list_empty(&cl_devs))
break;
spin_unlock_irq(&cleanup_lock);
dev_user_process_cleanup(dev);
msleep(100);
spin_lock_irq(&cleanup_lock);
while (!list_empty(&cl_devs)) {
dev = list_entry(cl_devs.next, typeof(*dev),
cleanup_list_entry);
list_move_tail(&dev->cleanup_list_entry,
&cleanup_list);
}
}
}
spin_unlock_irq(&cleanup_lock);

View File

@@ -186,6 +186,56 @@ void scst_set_busy(struct scst_cmd *cmd)
}
EXPORT_SYMBOL(scst_set_busy);
int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
{
int res;
TRACE_ENTRY();
switch(cmd->state) {
case SCST_CMD_STATE_INIT_WAIT:
case SCST_CMD_STATE_INIT:
case SCST_CMD_STATE_PRE_PARSE:
case SCST_CMD_STATE_DEV_PARSE:
res = SCST_CMD_STATE_PRE_XMIT_RESP;
break;
default:
res = SCST_CMD_STATE_PRE_DEV_DONE;
break;
}
TRACE_EXIT_RES(res);
return res;
}
EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
{
TRACE_ENTRY();
#ifdef EXTRACHECKS
switch(cmd->state) {
case SCST_CMD_STATE_PRE_XMIT_RESP:
case SCST_CMD_STATE_XMIT_RESP:
case SCST_CMD_STATE_FINISHED:
case SCST_CMD_STATE_XMIT_WAIT:
PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
cmd->state, cmd, cmd->cdb[0]);
sBUG();
}
#endif
cmd->state = scst_get_cmd_abnormal_done_state(cmd);
EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
(cmd->tgt_dev == NULL));
TRACE_EXIT();
return;
}
EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
{
int i, l;
@@ -248,6 +298,7 @@ int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
dev->p_cmd_lists = &scst_main_cmd_lists;
atomic_set(&dev->dev_cmd_count, 0);
atomic_set(&dev->write_cmd_count, 0);
scst_init_mem_lim(&dev->dev_mem_lim);
spin_lock_init(&dev->dev_lock);
atomic_set(&dev->on_dev_count, 0);
INIT_LIST_HEAD(&dev->blocked_cmd_list);
@@ -287,6 +338,14 @@ void scst_free_device(struct scst_device *dev)
return;
}
void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
{
atomic_set(&mem_lim->alloced_pages, 0);
mem_lim->max_allowed_pages =
((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
}
EXPORT_SYMBOL(scst_init_mem_lim);
struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
struct scst_device *dev, lun_t lun)
{
@@ -1611,7 +1670,7 @@ int scst_alloc_space(struct scst_cmd *cmd)
}
cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
&cmd->sg_cnt, &cmd->sgv, NULL);
&cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
if (cmd->sg == NULL)
goto out;
@@ -1634,7 +1693,7 @@ out:
return res;
out_sg_free:
sgv_pool_free(cmd->sgv);
sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
cmd->sgv = NULL;
cmd->sg = NULL;
cmd->sg_cnt = 0;
@@ -1653,7 +1712,7 @@ void scst_release_space(struct scst_cmd *cmd)
goto out;
}
sgv_pool_free(cmd->sgv);
sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
cmd->sgv = NULL;
cmd->sg_cnt = 0;

View File

@@ -104,10 +104,6 @@ unsigned long scst_trace_flag;
unsigned long scst_flags;
atomic_t scst_cmd_count;
spinlock_t scst_cmd_mem_lock;
unsigned long scst_cur_cmd_mem, scst_cur_max_cmd_mem;
unsigned long scst_max_cmd_mem;
struct scst_cmd_lists scst_main_cmd_lists;
struct scst_tasklet scst_tasklets[NR_CPUS];
@@ -141,12 +137,19 @@ static int scst_virt_dev_last_id; /* protected by scst_mutex */
spinlock_t scst_temp_UA_lock;
uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
unsigned int scst_max_cmd_mem;
unsigned int scst_max_dev_cmd_mem;
module_param_named(scst_threads, scst_threads, int, 0);
MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, long, 0);
module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
"the SCST commands at any given time in MB");
"all SCSI commands of all devices at any given time in MB");
module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
"by all SCSI commands of a device at any given time in MB");
struct scst_dev_type scst_null_devtype = {
.name = "none",
@@ -1620,7 +1623,6 @@ static int __init init_scst(void)
scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
#endif
atomic_set(&scst_cmd_count, 0);
spin_lock_init(&scst_cmd_mem_lock);
spin_lock_init(&scst_mcmd_lock);
INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
@@ -1712,15 +1714,27 @@ static int __init init_scst(void)
struct sysinfo si;
si_meminfo(&si);
#if BITS_PER_LONG == 32
scst_max_cmd_mem = min(((uint64_t)si.totalram << PAGE_SHIFT) >> 2,
(uint64_t)1 << 30);
scst_max_cmd_mem = min(
(((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
(uint64_t)1 << 30);
#else
scst_max_cmd_mem = (si.totalram << PAGE_SHIFT) >> 2;
scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
#endif
} else
scst_max_cmd_mem <<= 20;
}
res = scst_sgv_pools_init(scst_max_cmd_mem, 0);
if (scst_max_dev_cmd_mem != 0) {
if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
"scst_max_cmd_mem (%d)",
scst_max_dev_cmd_mem,
scst_max_cmd_mem);
scst_max_dev_cmd_mem = scst_max_cmd_mem;
}
} else
scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
res = scst_sgv_pools_init(
((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
if (res != 0)
goto out_destroy_sense_mempool;
@@ -1756,7 +1770,8 @@ static int __init init_scst(void)
PRINT_INFO("SCST version %s loaded successfully (max mem for "
"commands %ld Mb)", SCST_VERSION_STRING, scst_max_cmd_mem >> 20);
"commands %dMB, per device %dMB)", SCST_VERSION_STRING,
scst_max_cmd_mem, scst_max_dev_cmd_mem);
scst_print_config();

View File

@@ -322,7 +322,7 @@ static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
kfree(obj->sg_entries);
}
kmem_cache_free(obj->owner_pool->caches[obj->order], obj);
kmem_cache_free(obj->owner_pool->caches[obj->order_or_pages], obj);
return;
}
@@ -330,22 +330,28 @@ static struct sgv_pool_obj *sgv_pool_cached_get(struct sgv_pool *pool,
int order, unsigned long gfp_mask)
{
struct sgv_pool_obj *obj;
int pages = 1 << order;
spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
if (likely(!list_empty(&pool->recycling_lists[order]))) {
obj = list_entry(pool->recycling_lists[order].next,
struct sgv_pool_obj,
recycle_entry.recycling_list_entry);
list_del(&obj->recycle_entry.sorted_recycling_list_entry);
list_del(&obj->recycle_entry.recycling_list_entry);
sgv_pools_mgr.mgr.thr.inactive_pages_total -= 1 << order;
sgv_pools_mgr.mgr.throttle.inactive_pages_total -= pages;
sgv_pools_mgr.mgr.throttle.active_pages_total += pages;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
EXTRACHECKS_BUG_ON(obj->order != order);
EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
goto out;
}
pool->acc.cached_entries++;
pool->acc.cached_pages += (1 << order);
pool->acc.cached_pages += pages;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
@@ -353,12 +359,12 @@ static struct sgv_pool_obj *sgv_pool_cached_get(struct sgv_pool *pool,
gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
if (likely(obj)) {
memset(obj, 0, sizeof(*obj));
obj->order = order;
obj->order_or_pages = order;
obj->owner_pool = pool;
} else {
spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
pool->acc.cached_entries--;
pool->acc.cached_pages -= (1 << order);
pool->acc.cached_pages -= pages;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
}
@@ -370,12 +376,15 @@ static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
{
struct sgv_pool *owner = sgv->owner_pool;
struct list_head *entry;
struct list_head *list = &owner->recycling_lists[sgv->order];
struct list_head *list = &owner->recycling_lists[sgv->order_or_pages];
int sched = 0;
int pages = 1 << sgv->order_or_pages;
EXTRACHECKS_BUG_ON(sgv->order_or_pages < 0);
spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
TRACE_MEM("sgv %p, order %d, sg_count %d", sgv, sgv->order,
TRACE_MEM("sgv %p, order %d, sg_count %d", sgv, sgv->order_or_pages,
sgv->sg_count);
if (owner->clustered) {
@@ -385,7 +394,7 @@ static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
struct sgv_pool_obj,
recycle_entry.recycling_list_entry);
TRACE_DBG("tmp %p, order %d, sg_count %d", tmp,
tmp->order, tmp->sg_count);
tmp->order_or_pages, tmp->sg_count);
if (sgv->sg_count <= tmp->sg_count)
break;
}
@@ -401,7 +410,9 @@ static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
sgv->recycle_entry.time_stamp = jiffies;
sgv_pools_mgr.mgr.thr.inactive_pages_total += 1 << sgv->order;
sgv_pools_mgr.mgr.throttle.inactive_pages_total += pages;
sgv_pools_mgr.mgr.throttle.active_pages_total -= pages;
if (!sgv_pools_mgr.mgr.pitbool_running) {
sgv_pools_mgr.mgr.pitbool_running = 1;
sched = 1;
@@ -417,13 +428,13 @@ static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
/* Must be called under pool_mgr_lock held */
static void __sgv_pool_cached_purge(struct sgv_pool_obj *e)
{
int pages = 1 << e->order;
int pages = 1 << e->order_or_pages;
list_del(&e->recycle_entry.sorted_recycling_list_entry);
list_del(&e->recycle_entry.recycling_list_entry);
e->owner_pool->acc.cached_entries--;
e->owner_pool->acc.cached_pages -= pages;
sgv_pools_mgr.mgr.thr.inactive_pages_total -= pages;
sgv_pools_mgr.mgr.throttle.inactive_pages_total -= pages;
return;
}
@@ -445,8 +456,8 @@ static int sgv_pool_cached_purge(struct sgv_pool_obj *e, int t,
static int sgv_pool_oom_free_objs(int pgs)
{
TRACE_MEM("Shrinking pools about %d pages", pgs);
while ((sgv_pools_mgr.mgr.thr.inactive_pages_total >
sgv_pools_mgr.mgr.thr.lo_wmk) &&
while ((sgv_pools_mgr.mgr.throttle.inactive_pages_total >
sgv_pools_mgr.mgr.throttle.lo_wmk) &&
(pgs > 0)) {
struct sgv_pool_obj *e;
@@ -457,7 +468,7 @@ static int sgv_pool_oom_free_objs(int pgs)
recycle_entry.sorted_recycling_list_entry);
__sgv_pool_cached_purge(e);
pgs -= 1 << e->order;
pgs -= 1 << e->order_or_pages;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
sgv_dtor_and_free(e);
@@ -468,22 +479,19 @@ static int sgv_pool_oom_free_objs(int pgs)
return pgs;
}
static int sgv_pool_hiwmk_check(int pages_to_alloc, int no_fail)
static int sgv_pool_hiwmk_check(int pages_to_alloc)
{
int res = 0;
int pages = pages_to_alloc;
if (unlikely(no_fail))
goto out;
spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
pages += atomic_read(&sgv_pools_mgr.mgr.thr.active_pages_total);
pages += sgv_pools_mgr.mgr.thr.inactive_pages_total;
pages += sgv_pools_mgr.mgr.throttle.active_pages_total;
pages += sgv_pools_mgr.mgr.throttle.inactive_pages_total;
if (unlikely((u32)pages > sgv_pools_mgr.mgr.thr.hi_wmk)) {
pages -= sgv_pools_mgr.mgr.thr.hi_wmk;
sgv_pools_mgr.mgr.thr.releases_on_hiwmk++;
if (unlikely((u32)pages > sgv_pools_mgr.mgr.throttle.hi_wmk)) {
pages -= sgv_pools_mgr.mgr.throttle.hi_wmk;
sgv_pools_mgr.mgr.throttle.releases_on_hiwmk++;
pages = sgv_pool_oom_free_objs(pages);
if (pages > 0) {
@@ -491,26 +499,70 @@ static int sgv_pool_hiwmk_check(int pages_to_alloc, int no_fail)
"memory (%d pages) for being executed "
"commands together with the already "
"allocated memory exceeds the allowed "
"maximum %dMB. Should you increase "
"maximum %d. Should you increase "
"scst_max_cmd_mem?", pages_to_alloc,
sgv_pools_mgr.mgr.thr.hi_wmk >>
(20-PAGE_SHIFT));
sgv_pools_mgr.mgr.thr.releases_failed++;
sgv_pools_mgr.mgr.throttle.hi_wmk);
sgv_pools_mgr.mgr.throttle.releases_failed++;
res = -ENOMEM;
goto out_unlock;
}
}
out_unlock:
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
sgv_pools_mgr.mgr.throttle.active_pages_total += pages_to_alloc;
out:
out_unlock:
TRACE_MEM("pages_to_alloc %d, new active %d", pages_to_alloc,
sgv_pools_mgr.mgr.throttle.active_pages_total);
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
return res;
}
static void sgv_pool_hiwmk_uncheck(int pages)
{
spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
sgv_pools_mgr.mgr.throttle.active_pages_total -= pages;
TRACE_MEM("pages %d, new active %d", pages,
sgv_pools_mgr.mgr.throttle.active_pages_total);
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
return;
}
static bool scst_check_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
{
int alloced;
bool res = true;
alloced = atomic_add_return(pages, &mem_lim->alloced_pages);
if (unlikely(alloced > mem_lim->max_allowed_pages)) {
TRACE(TRACE_OUT_OF_MEM, "Requested amount of memory "
"(%d pages) for being executed commands on a device "
"together with the already allocated memory exceeds "
"the allowed maximum %d. Should you increase "
"scst_max_dev_cmd_mem?", pages,
mem_lim->max_allowed_pages);
atomic_sub(pages, &mem_lim->alloced_pages);
res = false;
}
TRACE_MEM("mem_lim %p, pages %d, res %d, new alloced %d", mem_lim,
pages, res, atomic_read(&mem_lim->alloced_pages));
return res;
}
static void scst_uncheck_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
{
atomic_sub(pages, &mem_lim->alloced_pages);
TRACE_MEM("mem_lim %p, pages %d, new alloced %d", mem_lim,
pages, atomic_read(&mem_lim->alloced_pages));
return;
}
struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
unsigned long gfp_mask, int flags, int *count,
struct sgv_pool_obj **sgv, void *priv)
struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
{
struct sgv_pool_obj *obj;
int order, pages, cnt;
@@ -518,13 +570,15 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
int pages_to_alloc;
struct kmem_cache *cache;
int no_cached = flags & SCST_POOL_ALLOC_NO_CACHED;
bool no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
bool allowed_mem_checked = false, hiwmk_checked = false;
TRACE_ENTRY();
if (unlikely(size == 0))
goto out;
sBUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
order = get_order(size);
@@ -533,34 +587,51 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
if (*sgv != NULL) {
obj = *sgv;
TRACE_MEM("Supplied sgv_obj %p, sgv_order %d", obj, obj->order);
EXTRACHECKS_BUG_ON(obj->order != order);
EXTRACHECKS_BUG_ON(obj->sg_count != 0);
pages_to_alloc = (1 << order);
cache = pool->caches[obj->order];
if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
cache = pool->caches[obj->order_or_pages];
TRACE_MEM("Supplied sgv_obj %p, sgv_order %d", obj,
obj->order_or_pages);
EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
EXTRACHECKS_BUG_ON(obj->sg_count != 0);
if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
goto out_fail_free_sg_entries;
allowed_mem_checked = true;
if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
goto out_fail_free_sg_entries;
hiwmk_checked = true;
} else if ((order < SGV_POOL_ELEMENTS) && !no_cached) {
pages_to_alloc = (1 << order);
cache = pool->caches[order];
if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
goto out_fail;
allowed_mem_checked = true;
obj = sgv_pool_cached_get(pool, order, gfp_mask);
if (unlikely(obj == NULL)) {
TRACE(TRACE_OUT_OF_MEM, "Allocation of "
"sgv_pool_obj failed (size %d)", size);
goto out_fail;
}
if (obj->sg_count != 0) {
TRACE_MEM("Cached sgv_obj %p", obj);
EXTRACHECKS_BUG_ON(obj->order != order);
EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
atomic_inc(&pool->cache_acc[order].hit_alloc);
goto success;
}
pages_to_alloc = (1 << order);
if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) {
if (!(flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
goto out_fail_free;
}
TRACE_MEM("Brand new sgv_obj %p", obj);
if (order <= sgv_pools_mgr.sgv_max_local_order) {
obj->sg_entries = obj->sg_entries_data;
sg_init_table(obj->sg_entries, pages_to_alloc);
@@ -585,15 +656,25 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
goto out_return;
obj->allocator_priv = priv;
if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
goto out_fail_free_sg_entries;
hiwmk_checked = true;
} else {
int sz;
pages_to_alloc = pages;
if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
goto out_fail;
allowed_mem_checked = true;
if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS)
goto out_return2;
cache = NULL;
sz = sizeof(*obj) + pages*sizeof(obj->sg_entries[0]);
obj = kmalloc(sz, gfp_mask);
if (unlikely(obj == NULL)) {
TRACE(TRACE_OUT_OF_MEM, "Allocation of "
@@ -601,15 +682,18 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
goto out_fail;
}
memset(obj, 0, sizeof(*obj));
obj->owner_pool = pool;
obj->order = -1 - order;
obj->order_or_pages = -pages_to_alloc;
obj->allocator_priv = priv;
obj->sg_entries = obj->sg_entries_data;
sg_init_table(obj->sg_entries, pages);
if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
goto out_fail_free_sg_entries;
hiwmk_checked = true;
TRACE_MEM("Big or no_cached sgv_obj %p (size %d)", obj, sz);
}
@@ -642,8 +726,6 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
}
success:
atomic_add(1 << order, &sgv_pools_mgr.mgr.thr.active_pages_total);
if (cache) {
int sg;
atomic_inc(&pool->cache_acc[order].total_alloc);
@@ -692,7 +774,7 @@ out_return1:
out_return2:
*count = pages_to_alloc;
res = NULL;
goto out;
goto out_uncheck;
out_fail_free_sg_entries:
if (obj->sg_entries != obj->sg_entries_data) {
@@ -716,6 +798,12 @@ out_fail:
*count = 0;
*sgv = NULL;
TRACE_MEM("%s", "Allocation failed");
out_uncheck:
if (hiwmk_checked)
sgv_pool_hiwmk_uncheck(pages_to_alloc);
if (allowed_mem_checked)
scst_uncheck_allowed_mem(mem_lim, pages_to_alloc);
goto out;
}
EXPORT_SYMBOL(sgv_pool_alloc);
@@ -726,27 +814,28 @@ void *sgv_get_priv(struct sgv_pool_obj *sgv)
}
EXPORT_SYMBOL(sgv_get_priv);
void sgv_pool_free(struct sgv_pool_obj *sgv)
void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim)
{
int order = sgv->order, pages;
int pages;
TRACE_MEM("Freeing sgv_obj %p, order %d, sg_entries %p, "
"sg_count %d, allocator_priv %p", sgv, order,
"sg_count %d, allocator_priv %p", sgv, sgv->order_or_pages,
sgv->sg_entries, sgv->sg_count, sgv->allocator_priv);
if (order >= 0) {
sgv->sg_entries[sgv->orig_sg].length = sgv->orig_length;
pages = (sgv->sg_count) ? 1 << order : 0;
if (sgv->order_or_pages >= 0) {
sgv->sg_entries[sgv->orig_sg].length = sgv->orig_length;
pages = (sgv->sg_count != 0) ? 1 << sgv->order_or_pages : 0;
sgv_pool_cached_put(sgv);
} else {
sgv->owner_pool->alloc_fns.free_pages_fn(sgv->sg_entries,
sgv->sg_count, sgv->allocator_priv);
pages = (sgv->sg_count) ? 1 << (-order - 1) : 0;
pages = (sgv->sg_count != 0) ? -sgv->order_or_pages : 0;
kfree(sgv);
sgv_pool_hiwmk_uncheck(pages);
}
atomic_sub(pages, &sgv_pools_mgr.mgr.thr.active_pages_total);
scst_uncheck_allowed_mem(mem_lim, pages);
return;
}
EXPORT_SYMBOL(sgv_pool_free);
@@ -763,14 +852,19 @@ struct scatterlist *scst_alloc(int size, unsigned long gfp_mask, int *count)
atomic_inc(&sgv_pools_mgr.sgv_other_total_alloc);
if (sgv_pool_hiwmk_check(pages, no_fail) != 0) {
res = NULL;
goto out;
if (unlikely(!no_fail)) {
if (unlikely(sgv_pool_hiwmk_check(pages) != 0)) {
res = NULL;
goto out;
}
}
res = kmalloc(pages*sizeof(*res), gfp_mask);
if (res == NULL)
goto out;
if (res == NULL) {
TRACE(TRACE_OUT_OF_MEM, "Unable to allocate sg for %d pages",
pages);
goto out_uncheck;
}
sg_init_table(res, pages);
@@ -784,8 +878,6 @@ struct scatterlist *scst_alloc(int size, unsigned long gfp_mask, int *count)
if (*count <= 0)
goto out_free;
atomic_add(pages, &sgv_pools_mgr.mgr.thr.active_pages_total);
out:
TRACE_MEM("Alloced sg %p (count %d)", res, *count);
@@ -795,6 +887,10 @@ out:
out_free:
kfree(res);
res = NULL;
out_uncheck:
if (!no_fail)
sgv_pool_hiwmk_uncheck(pages);
goto out;
}
EXPORT_SYMBOL(scst_alloc);
@@ -803,7 +899,7 @@ void scst_free(struct scatterlist *sg, int count)
{
TRACE_MEM("Freeing sg=%p", sg);
atomic_sub(count, &sgv_pools_mgr.mgr.thr.active_pages_total);
sgv_pool_hiwmk_uncheck(count);
scst_free_sys_sg_entries(sg, count, NULL);
kfree(sg);
@@ -1040,7 +1136,7 @@ static int sgv_pool_cached_shrinker(int nr, gfp_t gfpm)
recycle_entry.sorted_recycling_list_entry);
if (sgv_pool_cached_purge(e, SHRINK_TIME_AFTER, rt) == 0) {
nr -= 1 << e->order;
nr -= 1 << e->order_or_pages;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
sgv_dtor_and_free(e);
spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
@@ -1052,7 +1148,7 @@ static int sgv_pool_cached_shrinker(int nr, gfp_t gfpm)
}
}
nr = sgv_pools_mgr.mgr.thr.inactive_pages_total;
nr = sgv_pools_mgr.mgr.throttle.inactive_pages_total;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
@@ -1085,7 +1181,7 @@ static void sgv_pool_cached_pitbool(void *p)
break;
}
total_pages = sgv_pools_mgr.mgr.thr.inactive_pages_total;
total_pages = sgv_pools_mgr.mgr.throttle.inactive_pages_total;
spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
@@ -1098,6 +1194,7 @@ static void sgv_pool_cached_pitbool(void *p)
return;
}
/* Both parameters in pages */
int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
{
int res;
@@ -1107,10 +1204,8 @@ int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
memset(pools, 0, sizeof(*pools));
atomic_set(&sgv_pools_mgr.mgr.thr.active_pages_total, 0);
sgv_pools_mgr.mgr.thr.hi_wmk = mem_hwmark >> PAGE_SHIFT;
sgv_pools_mgr.mgr.thr.lo_wmk = mem_lwmark >> PAGE_SHIFT;
sgv_pools_mgr.mgr.throttle.hi_wmk = mem_hwmark;
sgv_pools_mgr.mgr.throttle.lo_wmk = mem_lwmark;
sgv_pool_evaluate_local_order(&sgv_pools_mgr);
@@ -1255,12 +1350,12 @@ int sgv_pool_procinfo_show(struct seq_file *seq, void *v)
seq_printf(seq, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n\n",
"Inactive/active pages",
sgv_pools_mgr.mgr.thr.inactive_pages_total,
atomic_read(&sgv_pools_mgr.mgr.thr.active_pages_total),
"Hi/lo watermarks [pages]", sgv_pools_mgr.mgr.thr.hi_wmk,
sgv_pools_mgr.mgr.thr.lo_wmk, "Hi watermark releases/failures",
sgv_pools_mgr.mgr.thr.releases_on_hiwmk,
sgv_pools_mgr.mgr.thr.releases_failed);
sgv_pools_mgr.mgr.throttle.inactive_pages_total,
sgv_pools_mgr.mgr.throttle.active_pages_total,
"Hi/lo watermarks [pages]", sgv_pools_mgr.mgr.throttle.hi_wmk,
sgv_pools_mgr.mgr.throttle.lo_wmk, "Hi watermark releases/failures",
sgv_pools_mgr.mgr.throttle.releases_on_hiwmk,
sgv_pools_mgr.mgr.throttle.releases_failed);
seq_printf(seq, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
"% merged", "Cached");

View File

@@ -33,7 +33,8 @@ struct trans_tbl_ent {
};
struct sgv_pool_obj {
int order;
/* if <0 - pages, >0 - order */
int order_or_pages;
struct {
unsigned long time_stamp; /* jiffies, protected by pool_mgr_lock */
@@ -102,14 +103,14 @@ struct scst_sgv_pools_manager {
struct sgv_mem_throttling {
u32 inactive_pages_total;
atomic_t active_pages_total;
u32 active_pages_total;
u32 hi_wmk; /* compared against inactive_pages_total + active_pages_total */
u32 lo_wmk; /* compared against inactive_pages_total only */
u32 releases_on_hiwmk;
u32 releases_failed;
} thr; /* protected by pool_mgr_lock */
} throttle; /* protected by pool_mgr_lock */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
struct shrinker *sgv_shrinker;

View File

@@ -138,7 +138,8 @@ static inline bool scst_is_context_gfp_atomic(void)
return irqs_disabled() || in_atomic() || in_interrupt();
}
extern unsigned long scst_max_cmd_mem;
extern unsigned int scst_max_cmd_mem;
extern unsigned int scst_max_dev_cmd_mem;
extern mempool_t *scst_mgmt_mempool;
extern mempool_t *scst_mgmt_stub_mempool;
@@ -273,7 +274,7 @@ int scst_add_dev_threads(struct scst_device *dev, int num);
void scst_del_dev_threads(struct scst_device *dev, int num);
int scst_alloc_device(int gfp_mask, struct scst_device **out_dev);
void scst_free_device(struct scst_device *tgt_dev);
void scst_free_device(struct scst_device *dev);
struct scst_acg *scst_alloc_add_acg(const char *acg_name);
int scst_destroy_acg(struct scst_acg *acg);

View File

@@ -149,7 +149,7 @@ out_redirect:
*/
sBUG_ON(context != SCST_CONTEXT_DIRECT);
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
/* Keep initiator away from too many BUSY commands */
if (!in_interrupt() && !in_atomic())
msleep(50);
@@ -239,7 +239,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
case SCST_SESS_IPH_FAILED:
spin_unlock_irqrestore(&sess->sess_list_lock, flags);
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
goto active;
default:
sBUG();
@@ -253,7 +253,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_lun_not_supported));
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
goto active;
}
@@ -261,7 +261,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_invalid_opcode));
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
goto active;
}
@@ -269,7 +269,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_invalid_message));
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
goto active;
}
@@ -393,10 +393,10 @@ static int scst_pre_parse(struct scst_cmd *cmd)
/*
* Command data length can't be easily
* determined from the CDB. ToDo, all such
* commands should be fixed. Until they are
* fixed, get it from the supplied expected
* value, but limit it to some reasonable
* value (15MB).
* commands processing should be fixed. Until
* it's done, get the length from the supplied
* expected value, but limit it to some
* reasonable value (15MB).
*/
cmd->bufflen = min(cmd->expected_transfer_len,
15*1024*1024);
@@ -429,7 +429,7 @@ out:
return res;
out_xmit:
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
@@ -684,7 +684,7 @@ prep_done:
if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
"cmd %p", cmd);
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
@@ -717,13 +717,13 @@ out_no_space:
TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
"(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
out_error:
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
@@ -775,7 +775,7 @@ void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
break;
case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
case SCST_PREPROCESS_STATUS_ERROR_FATAL:
@@ -784,13 +784,13 @@ void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
case SCST_PREPROCESS_STATUS_ERROR:
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
default:
PRINT_ERROR("%s() received unknown status %x", __func__,
status);
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
}
@@ -915,7 +915,7 @@ out_error_rc:
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
out_dev_done:
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
@@ -1002,7 +1002,7 @@ void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
break;
case SCST_RX_STATUS_ERROR_SENSE_SET:
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
case SCST_RX_STATUS_ERROR_FATAL:
@@ -1011,13 +1011,13 @@ void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
case SCST_RX_STATUS_ERROR:
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
default:
PRINT_ERROR("scst_rx_data() received unknown status %x",
status);
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
}
@@ -1046,7 +1046,7 @@ static int scst_tgt_pre_exec(struct scst_cmd *cmd)
if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
switch (rc) {
case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
case SCST_PREPROCESS_STATUS_ERROR_FATAL:
set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
@@ -1054,7 +1054,7 @@ static int scst_tgt_pre_exec(struct scst_cmd *cmd)
case SCST_PREPROCESS_STATUS_ERROR:
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
break;
case SCST_PREPROCESS_STATUS_NEED_THREAD:
TRACE_DBG("Target driver's %s pre_exec() requested "
@@ -1243,6 +1243,7 @@ static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
}
#endif
cmd->state = next_state;
#ifdef EXTRACHECKS
if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
@@ -1252,11 +1253,9 @@ static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
"state %d (opcode %d)", next_state, cmd->cdb[0]);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
next_state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
}
#endif
cmd->state = next_state;
context = scst_optimize_post_exec_context(cmd, scst_get_context());
if (cmd->context_processable)
context |= SCST_CONTEXT_PROCESSABLE;
@@ -1960,7 +1959,7 @@ static int scst_send_to_midlev(struct scst_cmd **active_cmd)
cmd,
(long long unsigned)cmd->tag);
tgt_dev->def_cmd_count--;
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
} else {
TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
@@ -2472,7 +2471,7 @@ static int scst_dev_done(struct scst_cmd *cmd)
}
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
res = SCST_CMD_STATE_RES_CONT_SAME;
break;
}
@@ -2957,7 +2956,7 @@ static int __scst_init_cmd(struct scst_cmd *cmd)
TRACE_DBG("Finishing cmd %p", cmd);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_lun_not_supported));
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
} else
goto out;
@@ -2967,7 +2966,7 @@ out:
out_busy:
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
goto out;
}
@@ -3003,7 +3002,7 @@ restart:
} else {
TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
cmd, (long long unsigned int)cmd->tag);
cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
scst_set_cmd_abnormal_done_state(cmd);
}
/*
@@ -3676,9 +3675,11 @@ void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
* after this TM command completed.
*/
TRACE_MGMT_DBG("cmd %p (tag %llu) being executed/xmitted "
"(state %d, proc time %ld sec.), deferring ABORT...",
cmd, (long long unsigned int)cmd->tag, cmd->state,
(long)(jiffies - cmd->start_time)/HZ);
"(state %d, op %x, proc time %ld sec., timeout %d "
"sec.), deferring ABORT...", cmd,
(long long unsigned int)cmd->tag, cmd->state,
cmd->cdb[0], (long)(jiffies - cmd->start_time) / HZ,
cmd->timeout / HZ);
mcmd->cmd_finish_wait_count++;

View File

@@ -1,8 +1,8 @@
User space FILEIO handler
=========================
Version 0.9.6, XX XXX 200X
--------------------------
Version 1.0.0, XX June 2008
---------------------------
User space program fileio_tgt uses interface of SCST's scst_user dev
handler as well as allows to see how it works in various modes.

View File

@@ -66,7 +66,7 @@ unsigned long trace_flag = DEFAULT_LOG_FLAGS;
#endif /* defined(DEBUG) || defined(TRACING) */
#define DEF_BLOCK_SHIFT 9
#define VERSION_STR "0.9.6"
#define VERSION_STR "1.0.0"
#define THREADS 7
static struct option const long_options[] =