Various changes and fixes

git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@56 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2006-12-14 16:22:29 +00:00
parent d01eacc4d2
commit f375c2e756
13 changed files with 734 additions and 421 deletions

View File

@@ -126,8 +126,8 @@ mpt_proc_read(char *buffer, char **start, off_t offset, int length, int *eof,
TRACE_DBG("res %d, buffer %p, length %d, %d, priv %p, tgt %p",
res, buffer, length, len, priv, tgt);
BUG_ON(tgt == NULL);
BUG_ON(ioc == NULL);
sBUG_ON(tgt == NULL);
sBUG_ON(ioc == NULL);
len = snprintf(buffer, length,
"ProductID :0x%04x (%s)\n"
@@ -318,7 +318,7 @@ mpt_msg_frame_alloc(MPT_ADAPTER *ioc, int index)
mf = mpt_get_msg_frame(stm_context, _IOC_ID);
if (mf == NULL) {
BUG_ON(1);
sBUG_ON(1);
}
if (index != -1) {
@@ -725,7 +725,7 @@ stm_tgt_reply(MPT_ADAPTER *ioc, u32 reply_word)
TRACE_DBG("Data received, context %x, rx_status %d",
context, rx_status);
BUG_ON(!(*io_state & IO_STATE_DATA_SENT));
sBUG_ON(!(*io_state & IO_STATE_DATA_SENT));
mpt_msg_frame_free(priv, index);
if (*io_state & IO_STATE_DATA_SENT) {
*io_state &= ~IO_STATE_DATA_SENT;
@@ -1092,7 +1092,7 @@ mpt_set_sense_info(MPT_STM_PRIV *priv, CMD *cmd, int len, u8 *sense_buf)
}
}
BUG_ON(info == NULL);
sBUG_ON(info == NULL);
memcpy(info, sense_buf, len);
/*out:*/

View File

@@ -43,14 +43,6 @@
#include "qla2x00t.h"
/* Undefine the initiator driver's own DEBUG symbol */
#undef DEBUG
#ifdef DEBUG_TGT
#define DEBUG
#endif
#include <scst_debug.h>
#include <scst_debug.c>
#ifndef FC_TARGET_SUPPORT
@@ -282,9 +274,9 @@ static void q2t_free_session_done(struct scst_session *scst_sess)
TRACE_ENTRY();
BUG_ON(scst_sess == NULL);
sBUG_ON(scst_sess == NULL);
sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
BUG_ON(sess == NULL);
sBUG_ON(sess == NULL);
tgt = sess->tgt;
kfree(sess);
@@ -406,7 +398,7 @@ static int q2t_pci_map_calc_cnt(struct q2t_prm *prm)
{
int res = 0;
BUG_ON(prm->sg_cnt == 0);
sBUG_ON(prm->sg_cnt == 0);
/* 32 bit S/G Data Transfer */
prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, prm->sg, prm->sg_cnt,
@@ -1123,7 +1115,7 @@ out_free:
static void q2t_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
{
TRACE_ENTRY();
BUG_ON(ha == NULL);
sBUG_ON(ha == NULL);
if (ha->tgt != NULL) {
q2t_do_ctio_completion(ha, handle,
@@ -1716,7 +1708,7 @@ static void q2t_response_pkt(scsi_qla_host_t *ha, sts_entry_t *pkt)
TRACE_ENTRY();
BUG_ON((ha == NULL) || (pkt == NULL));
sBUG_ON((ha == NULL) || (pkt == NULL));
TRACE(TRACE_SCSI, "pkt %p: T %02x C %02x S %02x handle %#x",
pkt, pkt->entry_type, pkt->entry_count, pkt->entry_status,
@@ -1902,7 +1894,7 @@ static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha, uint16_t *mailbo
{
TRACE_ENTRY();
BUG_ON(ha == NULL);
sBUG_ON(ha == NULL);
if (ha->tgt == NULL) {
TRACE(TRACE_DEBUG|TRACE_MGMT,
@@ -1963,7 +1955,7 @@ static void q2t_host_action(scsi_qla_host_t *ha,
TRACE_ENTRY();
BUG_ON(ha == NULL);
sBUG_ON(ha == NULL);
switch (action) {
case ENABLE_TARGET_MODE :

View File

@@ -28,6 +28,15 @@
#include <qla_def.h>
#include "qla2x_tgt_def.h"
/* Undefine the initiator driver's own DEBUG symbol */
#undef DEBUG
#ifdef DEBUG_TGT
#define DEBUG
#endif
#include <scst_debug.h>
/* Version numbers, the same as for the kernel */
#define Q2T_VERSION(a,b,c,d) (((a) << 030) + ((b) << 020) + (c) << 010 + (d))
#define Q2T_VERSION_CODE Q2T_VERSION(0,9,6,0)
@@ -157,7 +166,7 @@ static inline struct q2t_sess *q2t_find_sess_by_lid(struct q2t_tgt *tgt,
uint16_t lid)
{
struct q2t_sess *sess, *sess_tmp;
BUG_ON(tgt == NULL);
sBUG_ON(tgt == NULL);
list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list, list) {
if (lid == (sess->loop_id))
return sess;

View File

@@ -56,26 +56,29 @@
/* Allocation of the cmd's data buffer */
#define SCST_CMD_STATE_PREPARE_SPACE 5
/* Allocation of the cmd's data buffer */
#define SCST_CMD_STATE_PREPROCESS_DONE 6
/* Target driver's rdy_to_xfer() is going to be called */
#define SCST_CMD_STATE_RDY_TO_XFER 6
#define SCST_CMD_STATE_RDY_TO_XFER 7
/* Waiting for data from the initiator (until scst_rx_data() called) */
#define SCST_CMD_STATE_DATA_WAIT 7
#define SCST_CMD_STATE_DATA_WAIT 8
/* CDB is going to be sent to SCSI mid-level for execution */
#define SCST_CMD_STATE_SEND_TO_MIDLEV 8
#define SCST_CMD_STATE_SEND_TO_MIDLEV 9
/* Waiting for CDB's execution finish */
#define SCST_CMD_STATE_EXECUTING 9
#define SCST_CMD_STATE_EXECUTING 10
/* Dev handler's dev_done() is going to be called */
#define SCST_CMD_STATE_DEV_DONE 10
#define SCST_CMD_STATE_DEV_DONE 11
/* Target driver's xmit_response() is going to be called */
#define SCST_CMD_STATE_XMIT_RESP 11
#define SCST_CMD_STATE_XMIT_RESP 12
/* Waiting for response's transmission finish */
#define SCST_CMD_STATE_XMIT_WAIT 12
#define SCST_CMD_STATE_XMIT_WAIT 13
/* The cmd finished */
#define SCST_CMD_STATE_FINISHED 14
@@ -156,23 +159,48 @@
#define SCST_RX_STATUS_SUCCESS 0
/*
* Data receiving finished with error, so set the sense and
* Data receiving finished with error, so set the sense and
* finish the command, including xmit_response() call
*/
#define SCST_RX_STATUS_ERROR 1
/*
* Data receiving finished with error and the sense is set,
* Data receiving finished with error and the sense is set,
* so finish the command, including xmit_response() call
*/
#define SCST_RX_STATUS_ERROR_SENSE_SET 2
/*
* Data receiving finished with fatal error, so finish the command,
* Data receiving finished with fatal error, so finish the command,
* but don't call xmit_response()
*/
#define SCST_RX_STATUS_ERROR_FATAL 3
/*************************************************************
** Values for status parameter of scst_restart_cmd()
*************************************************************/
/* Success */
#define SCST_PREPROCESS_STATUS_SUCCESS 0
/*
* Command's processing finished with error, so set the sense and
* finish the command, including xmit_response() call
*/
#define SCST_PREPROCESS_STATUS_ERROR 1
/*
* Command's processing finished with error and the sense is set,
* so finish the command, including xmit_response() call
*/
#define SCST_PREPROCESS_STATUS_ERROR_SENSE_SET 2
/*
* Command's processing finished with fatal error, so finish the command,
* but don't call xmit_response()
*/
#define SCST_PREPROCESS_STATUS_ERROR_FATAL 3
/*************************************************************
** Allowed return codes for xmit_response(), rdy_to_xfer(),
** report_aen()
@@ -213,19 +241,6 @@
*/
#define SCST_EXEC_NEED_THREAD 2
/*************************************************************
** Allowed return codes for dev handler's task_mgmt_fn()
*************************************************************/
/* The command is done with success, no firther actions required */
#define SCST_DEV_TM_COMPLETED_SUCCESS 0
/* The command is failed, no firther actions required */
#define SCST_DEV_TM_COMPLETED_FAILED -1
/* Regular standard actions for the command should be done */
#define SCST_DEV_TM_NOT_COMPLETED 1
/*************************************************************
** Default timeout for cmd's CDB execution
** by SCSI mid-level (cmd's "timeout" field).
@@ -286,10 +301,21 @@
#define SCST_ABORT_ALL_TASKS 9
/*************************************************************
** Values for mgmt cmd's "status"
** Values for mgmt cmd's status field. Codes taken from iSCSI
*************************************************************/
#define SCST_MGMT_STATUS_SUCCESS 0
#define SCST_MGMT_STATUS_FAILED -1
#define SCST_MGMT_STATUS_SUCCESS 0
#define SCST_MGMT_STATUS_TASK_NOT_EXIST -1
#define SCST_MGMT_STATUS_LUN_NOT_EXIST -2
#define SCST_MGMT_STATUS_FN_NOT_SUPPORTED -5
#define SCST_MGMT_STATUS_REJECTED -255
#define SCST_MGMT_STATUS_FAILED -129
/*************************************************************
** Additional return code for dev handler's task_mgmt_fn()
*************************************************************/
/* Regular standard actions for the command should be done */
#define SCST_DEV_TM_NOT_COMPLETED 1
/*************************************************************
** Session initialization phases
@@ -477,12 +503,6 @@ struct scst_tgt_template
{
/* public: */
/*
* Name of the template. Must be unique to identify
* the template. MUST HAVE
*/
const char name[15];
/*
* SG tablesize allows to check whether scatter/gather can be used
* or not.
@@ -495,17 +515,23 @@ struct scst_tgt_template
unsigned unchecked_isa_dma:1;
/*
* True, if this target adapter wants to use clustering
* (i.e. smaller number of segments).
* True, if this target adapter can benefit from using SG-vector
* clustering (i.e. smaller number of segments).
*/
unsigned use_clustering:1;
/*
* True, if this target adapter doesn't support SG-vector clustering
*/
unsigned no_clustering:1;
/*
* True, if corresponding function supports execution in
* the atomic (non-sleeping) context
*/
unsigned xmit_response_atomic:1;
unsigned rdy_to_xfer_atomic:1;
unsigned preprocessing_done_atomic:1;
/* True, if the template doesn't need the entry in /proc */
unsigned no_proc_entry:1;
@@ -520,6 +546,120 @@ struct scst_tgt_template
*/
unsigned tm_sync_reply:1;
/*
* This function is equivalent to the SCSI
* queuecommand. The target should transmit the response
* buffer and the status in the scst_cmd struct.
* The expectation is that this executing this command is NON-BLOCKING.
*
* After the response is actually transmitted, the target
* should call the scst_tgt_cmd_done() function of the
* mid-level, which will allow it to free up the command.
* Returns one of the SCST_TGT_RES_* constants.
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* MUST HAVE
*/
int (*xmit_response) (struct scst_cmd *cmd);
/*
* This function informs the driver that data
* buffer corresponding to the said command have now been
* allocated and it is OK to receive data for this command.
* This function is necessary because a SCSI target does not
* have any control over the commands it receives. Most lower
* level protocols have a corresponding function which informs
* the initiator that buffers have been allocated e.g., XFER_
* RDY in Fibre Channel. After the data is actually received
* the low-level driver needs to call scst_rx_data() in order to
* continue processing this command.
* Returns one of the SCST_TGT_RES_* constants.
* This command is expected to be NON-BLOCKING.
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* OPTIONAL
*/
int (*rdy_to_xfer) (struct scst_cmd *cmd);
/*
* Called to notify the driver that the command is about to be freed.
* Necessary, because for aborted commands xmit_response() could not
* be called. Could be called on IRQ context.
*
* OPTIONAL
*/
void (*on_free_cmd) (struct scst_cmd *cmd);
/*
* This function allows the target driver to handle data buffer
* allocations on its own.
* Shall return 0 in case of success or < 0 (preferrably -ENOMEM)
* in case of error, or > 0 if the regular SCST allocation should be
* done. In case of returning successfully, scst_cmd->data_buf_alloced
* will be set by SCST.
*
* If allocation in atomic context - cf. scst_cmd_atomic() - is not
* desired or fails and consequently < 0 is returned, this function
* will be re-called in thread context.
*
* Please note that the driver will have to handle all relevant details
* such as scatterlist setup, highmem, freeing the allocated memory, ...
* itself.
*
* OPTIONAL.
*/
int (*alloc_data_buf) (struct scst_cmd *cmd);
/*
* This function informs the driver that data
* buffer corresponding to the said command have now been
* allocated and other preprocessing tasks have been done.
* A target driver could need to do some actions at this stage.
* After the target driver done the needed actions, it shall call
* scst_restart_cmd() in order to continue processing this command.
*
* Called only if the cmd is queued using scst_cmd_init_stage1_done()
* instead of scst_cmd_init_done().
*
* Returns void, the result is expected to be returned using
* scst_restart_cmd().
*
* This command is expected to be NON-BLOCKING.
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* OPTIONAL.
*/
void (*preprocessing_done) (struct scst_cmd *cmd);
/*
* This function informs the driver that a
* received task management function has been completed. This
* function is necessary because low-level protocols have some
* means of informing the initiator about the completion of a
* Task Management function. This function being called will
* signify that a Task Management function is completed as far
* as the mid-level is concerned. Any information that must be
* stored about the command is the responsibility of the low-
* level driver. No return value expected.
* This function is expected to be NON-BLOCKING
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* MUST HAVE if the target supports ABORTs
*/
void (*task_mgmt_fn_done) (struct scst_mgmt_cmd *mgmt_cmd);
/*
* This function should detect the target adapters that
* are present in the system. The function should return a value
@@ -541,95 +681,6 @@ struct scst_tgt_template
*/
int (*release) (struct scst_tgt *tgt);
/*
* This function is equivalent to the SCSI
* queuecommand. The target should transmit the response
* buffer and the status in the scst_cmd struct.
* The expectation is that this executing this command is NON-BLOCKING.
*
* After the response is actually transmitted, the target
* should call the scst_tgt_cmd_done() function of the
* mid-level, which will allow it to free up the command.
* Returns one of the SCST_TGT_RES_* constants.
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* MUST HAVE
*/
int (*xmit_response) (struct scst_cmd *cmd);
/*
* This function allows the target driver to handle data buffer
* allocations on its own.
* Shall return 0 in case of success, and != 0 (preferrably -ENOMEM)
* in case of error. In case of returning successfully,
* scst_cmd->data_buf_alloced will be set by SCST.
*
* If allocation in atomic context - cf. scst_cmd_atomic() - is not
* desired or fails and consequently != 0 is returned, this function
* will be re-called in thread context.
*
* Please note that the driver will have to handle all relevant details
* such as scatterlist setup, highmem, freeing the allocated memory, ...
* itself.
*
* OPTIONAL.
*/
int (*alloc_data_buf) (struct scst_cmd *cmd);
/*
* This function informs the driver that data
* buffer corresponding to the said command have now been
* allocated and it is OK to receive data for this command.
* This function is necessary because a SCSI target does not
* have any control over the commands it receives. Most lower
* level protocols have a corresponding function which informs
* the initiator that buffers have been allocated e.g., XFER_
* RDY in Fibre Channel. After the data is actually received
* the low-level driver needs to call scst_rx_data() in order to
* continue processing this command.
* Returns one of the SCST_TGT_RES_* constants.
* This command is expected to be NON-BLOCKING.
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* MUST HAVE.
*/
int (*rdy_to_xfer) (struct scst_cmd *cmd);
/*
* Called to notify the driver that the command is about to be freed.
* Necessary, because for aborted commands xmit_response() could not
* be called. Could be called on IRQ context.
*
* MUST HAVE
*/
void (*on_free_cmd) (struct scst_cmd *cmd);
/*
* This function informs the driver that a
* received task management function has been completed. This
* function is necessary because low-level protocols have some
* means of informing the initiator about the completion of a
* Task Management function. This function being called will
* signify that a Task Management function is completed as far
* as the mid-level is concerned. Any information that must be
* stored about the command is the responsibility of the low-
* level driver. No return value expected.
* This function is expected to be NON-BLOCKING
*
* Pay attention to "atomic" attribute of the cmd, which can be get
* by scst_cmd_atomic(): it is true if the function called in the
* atomic (non-sleeping) context.
*
* MUST HAVE if the target supports ABORTs
*/
void (*task_mgmt_fn_done) (struct scst_mgmt_cmd *mgmt_cmd);
/*
* This function is used for Asynchronous Event Notification.
* It is the responsibility of the driver to notify any/all
@@ -652,6 +703,12 @@ struct scst_tgt_template
int (*proc_info) (char *buffer, char **start, off_t offset,
int length, int *eof, struct scst_tgt *tgt, int inout);
/*
* Name of the template. Must be unique to identify
* the template. MUST HAVE
*/
const char name[50];
/* Private, must be inited to 0 by memset() */
/* List of targets per template, protected by scst_mutex */
@@ -767,9 +824,9 @@ struct scst_dev_type
/*
* Called to execute a task management command.
* Returns:
* - SCST_DEV_TM_COMPLETED_SUCCESS - the command is done with success,
* - SCST_MGMT_STATUS_SUCCESS - the command is done with success,
* no firther actions required
* - SCST_DEV_TM_COMPLETED_FAILED - the command is failed,
* - The SCST_MGMT_STATUS_* error code if the command is failed and
* no firther actions required
* - SCST_DEV_TM_NOT_COMPLETED - regular standard actions for the command
* should be done
@@ -1020,6 +1077,21 @@ struct scst_cmd
*/
unsigned int may_need_dma_sync:1;
/*
* Set if scst_cmd_init_stage1_done() called and the target
* want that preprocessing_done() will be called
*/
unsigned int preprocessing_only:1;
/*
* Set if scst_cmd_init_stage1_done() called and the target want
* that the SN for the cmd isn't assigned until scst_restart_cmd()
*/
unsigned int no_sn:1;
/* Set if the cmd's must not use sgv cache for data buffer */
unsigned int no_sgv:1;
/**************************************************************/
unsigned long cmd_flags; /* cmd's async flags */
@@ -1100,6 +1172,9 @@ struct scst_cmd
uint16_t host_status; /* set by low-level driver to indicate status */
uint16_t driver_status; /* set by mid-level */
/* Used for storage of target driver private stuff */
void *tgt_priv;
/*
* Used to restore the SG vector if it was modified by
* scst_set_resp_data_len()
@@ -1114,18 +1189,15 @@ struct scst_cmd
/* List entry for dev's blocked_cmd_list */
struct list_head blocked_cmd_list_entry;
/* Used for storage of dev handler private stuff */
void *dh_priv;
/*
* Fileio private fields
*/
struct list_head fileio_cmd_list_entry;
int fileio_in_list;
/* Used for storage of target driver private stuff */
void *tgt_priv;
/* Used for storage of dev handler private stuff */
void *dh_priv;
/*
* Used to store previous tgt_dev if dev handler returns
* SCST_CMD_STATE_REINIT state
@@ -1536,6 +1608,31 @@ struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
*/
void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context);
/*
* Notifies SCST that the driver finished the first stage of the command
* initialization, and the command is ready for execution, but after
* SCST done the command's preprocessing preprocessing_done() function
* should be called. The second argument sets preferred command execition
* context. See SCST_CONTEXT_* constants for details.
*/
static inline void scst_cmd_init_stage1_done(struct scst_cmd *cmd,
int pref_context, int set_sn)
{
cmd->preprocessing_only = 1;
cmd->no_sn = !set_sn;
scst_cmd_init_done(cmd, pref_context);
}
/*
* Notifies SCST that the driver finished its part of the command's
* preprocessing and it is ready for further processing.
* The second argument sets data receiving completion status
* (see SCST_PREPROCESS_STATUS_* constants for details)
* The third argument sets preferred command execition context
* (see SCST_CONTEXT_* constants for details)
*/
void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context);
/*
* Notifies SCST that the driver received all the necessary data
* and the command is ready for further processing.
@@ -1861,6 +1958,19 @@ static inline void scst_cmd_set_data_buff_alloced(struct scst_cmd *cmd)
cmd->data_buf_alloced = 1;
}
/*
* Get/Set functions for no_sgv flag
*/
static inline int scst_cmd_get_no_sgv(struct scst_cmd *cmd)
{
return cmd->no_sgv;
}
static inline void scst_cmd_set_no_sgv(struct scst_cmd *cmd)
{
cmd->no_sgv = 1;
}
/*
* Returns 1 if the cmd was aborted, so its status is invalid and no
* reply shall be sent to the remote initiator. A target driver should
@@ -2087,4 +2197,16 @@ int scst_check_mem(struct scst_cmd *cmd);
void scst_get(void);
void scst_put(void);
/*
* Allocates and returns pointer to SG vector with data size "size".
* If use_clustering is not 0, segments in the vector will be merged,
* when possible. In *count returned the count of entries in the vector.
* Returns NULL for failure.
*/
struct scatterlist *scst_alloc(int size, unsigned long gfp_mask,
int use_clustering, int *count);
/* Frees SG vector returned by scst_alloc() */
void scst_free(struct scatterlist *sg, int count);
#endif /* __SCST_H */

View File

@@ -20,9 +20,7 @@
#ifndef __SCST_DEBUG_H
#define __SCST_DEBUG_H
#include <linux/config.h> /* for CONFIG_SCSI_TARGET_EXTRACHEKS
CONFIG_SCSI_TARGET_TRACING
CONFIG_SCSI_TARGET_DEBUG */
#include <linux/config.h> /* for CONFIG_* */
#if !defined(EXTRACHECKS) && defined(CONFIG_SCSI_TARGET_EXTRACHECKS)
#define EXTRACHECKS
@@ -36,6 +34,34 @@
#define DEBUG
#endif
#ifdef DEBUG
#ifndef CONFIG_DEBUG_BUGVERBOSE
#define sBUG() do { \
printk(KERN_CRIT "BUG at %s:%d\n", \
__FILE__, __LINE__); \
BUG(); \
} while (0)
#else
#define sBUG() BUG()
#endif
#define sBUG_ON(p) do { \
if (unlikely(p)) { \
printk(KERN_CRIT "BUG at %s:%d (%s)\n", \
__FILE__, __LINE__, #p); \
BUG(); \
} \
} while (0)
#else
#define sBUG() BUG()
#define sBUG_ON(p) BUG_ON(p)
#endif
#ifdef EXTRACHECKS
#define EXTRACHECKS_BUG_ON(a) sBUG_ON(a)
#else
#define EXTRACHECKS_BUG_ON(a)
#endif
#ifdef DEBUG
//# define LOG_FLAG KERN_DEBUG
# define LOG_FLAG KERN_INFO

View File

@@ -45,7 +45,7 @@
/* 8 byte ASCII Vendor of the FILE IO target */
#define SCST_FIO_VENDOR "SCST_FIO"
/* 4 byte ASCII Product Revision Level of the FILE IO target - left aligned */
#define SCST_FIO_REV " 095"
#define SCST_FIO_REV " 096"
#define READ_CAP_LEN 8
#define READ_CAP16_LEN 32
@@ -2217,7 +2217,7 @@ out:
static int fileio_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
struct scst_tgt_dev *tgt_dev)
{
int res = SCST_DEV_TM_COMPLETED_SUCCESS;
int res = SCST_DEV_TM_NOT_COMPLETED;
TRACE_ENTRY();

View File

@@ -147,20 +147,6 @@ int scst_register_target_template(struct scst_tgt_template *vtt)
goto out;
}
if (!vtt->rdy_to_xfer) {
PRINT_ERROR_PR("Target driver %s doesn't have a "
"rdy_to_xfer() method.", vtt->name);
res = -EINVAL;
goto out;
}
if (!vtt->on_free_cmd) {
PRINT_ERROR_PR("Target driver %s doesn't have a "
"on_free_cmd() method.", vtt->name);
res = -EINVAL;
goto out;
}
if (!vtt->no_proc_entry) {
res = scst_build_proc_target_dir_entries(vtt);
if (res < 0) {
@@ -295,7 +281,7 @@ void scst_unregister(struct scst_tgt *tgt)
down(&scst_mutex);
list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
BUG_ON(!sess->shutting_down);
sBUG_ON(!sess->shutting_down);
}
up(&scst_mutex);
@@ -1245,6 +1231,7 @@ EXPORT_SYMBOL(scst_unregister_target_template);
EXPORT_SYMBOL(scst_cmd_init_done);
EXPORT_SYMBOL(scst_tgt_cmd_done);
EXPORT_SYMBOL(scst_restart_cmd);
EXPORT_SYMBOL(scst_rx_cmd);
EXPORT_SYMBOL(scst_rx_data);
EXPORT_SYMBOL(scst_rx_mgmt_fn_tag);
@@ -1272,6 +1259,9 @@ EXPORT_SYMBOL(scst_check_mem);
EXPORT_SYMBOL(scst_get);
EXPORT_SYMBOL(scst_put);
EXPORT_SYMBOL(scst_alloc);
EXPORT_SYMBOL(scst_free);
/*
* Other Commands
*/

View File

@@ -180,7 +180,7 @@ void scst_free_device(struct scst_device *dev)
{
PRINT_ERROR_PR("%s: dev_tgt_dev_list or dev_acg_dev_list "
"is not empty!", __FUNCTION__);
BUG();
sBUG();
}
#endif
@@ -732,7 +732,7 @@ struct scst_cmd *scst_create_prepare_internal_cmd(
res->bufflen = bufsize;
if (bufsize > 0) {
if (scst_alloc_space(res) != 0)
PRINT_ERROR("Unable to create buffer (size %d) for "
PRINT_ERROR_PR("Unable to create buffer (size %d) for "
"internal cmd", bufsize);
goto out_free_res;
}
@@ -799,7 +799,7 @@ struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
TRACE_ENTRY();
BUG_ON(orig_cmd);
sBUG_ON(orig_cmd);
len = scst_get_buf_first(cmd, &buf);
@@ -1091,7 +1091,7 @@ void scst_free_cmd(struct scst_cmd *cmd)
TRACE_ENTRY();
BUG_ON(cmd->blocking);
sBUG_ON(cmd->blocking);
#if defined(EXTRACHECKS) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
if (cmd->scsi_req) {
@@ -1101,9 +1101,11 @@ void scst_free_cmd(struct scst_cmd *cmd)
}
#endif
TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
cmd->tgtt->on_free_cmd(cmd);
TRACE_DBG("%s", "Target's on_free_cmd() returned");
if (cmd->tgtt->on_free_cmd != NULL) {
TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
cmd->tgtt->on_free_cmd(cmd);
TRACE_DBG("%s", "Target's on_free_cmd() returned");
}
if (likely(cmd->dev != NULL)) {
struct scst_dev_type *handler = cmd->dev->handler;
@@ -1226,7 +1228,7 @@ struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
if (mcmd == NULL) {
PRINT_ERROR("%s", "Allocation of management command "
PRINT_ERROR_PR("%s", "Allocation of management command "
"failed, some commands and their data could leak");
goto out;
}
@@ -1310,14 +1312,14 @@ int scst_alloc_space(struct scst_cmd *cmd)
int gfp_mask;
int res = -ENOMEM;
int ini_unchecked_isa_dma, ini_use_clustering;
int use_clustering = 0;
struct sgv_pool *pool;
struct sgv_pool_obj *sgv;
TRACE_ENTRY();
if (cmd->data_buf_alloced) {
TRACE_MEM("%s", "data_buf_alloced set, returning");
BUG_ON(cmd->sg == NULL);
sBUG_ON(cmd->sg == NULL);
res = 0;
goto out;
}
@@ -1340,10 +1342,12 @@ int scst_alloc_space(struct scst_cmd *cmd)
ini_use_clustering = 0;
}
if (cmd->tgtt->use_clustering || ini_use_clustering)
if ((cmd->tgtt->use_clustering || ini_use_clustering) &&
!cmd->tgtt->no_clustering)
{
TRACE_MEM("%s", "Use clustering");
pool = &scst_sgv.norm_clust;
use_clustering = 1;
}
if (cmd->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
@@ -1357,9 +1361,17 @@ int scst_alloc_space(struct scst_cmd *cmd)
#endif
}
sgv = sgv_pool_alloc(pool, cmd->bufflen, gfp_mask, &cmd->sg_cnt);
if (sgv == NULL)
goto out;
if (cmd->no_sgv) {
cmd->sg = scst_alloc(cmd->bufflen, gfp_mask, use_clustering,
&cmd->sg_cnt);
if (cmd->sg == NULL)
goto out;
} else {
cmd->sg = sgv_pool_alloc(pool, cmd->bufflen, gfp_mask,
&cmd->sg_cnt, &cmd->sgv);
if (cmd->sg == NULL)
goto out;
}
if (unlikely(cmd->sg_cnt > ini_sg)) {
static int ll;
@@ -1370,7 +1382,7 @@ int scst_alloc_space(struct scst_cmd *cmd)
ini_sg);
ll++;
}
goto out_sgv_free;
goto out_sg_free;
}
if (unlikely(cmd->sg_cnt > tgt_sg)) {
@@ -1382,11 +1394,8 @@ int scst_alloc_space(struct scst_cmd *cmd)
cmd->sg_cnt, tgt_sg);
ll++;
}
goto out_sgv_free;
goto out_sg_free;
}
cmd->sgv = sgv;
cmd->sg = sgv_pool_sg(sgv);
res = 0;
@@ -1394,8 +1403,13 @@ out:
TRACE_EXIT();
return res;
out_sgv_free:
sgv_pool_free(sgv);
out_sg_free:
if (cmd->no_sgv)
scst_free(cmd->sg, cmd->sg_cnt);
else
sgv_pool_free(cmd->sgv);
cmd->sgv = NULL;
cmd->sg = NULL;
cmd->sg_cnt = 0;
goto out;
}
@@ -1412,7 +1426,8 @@ void scst_release_space(struct scst_cmd *cmd)
if (cmd->sgv) {
scst_check_restore_sg_buff(cmd);
sgv_pool_free(cmd->sgv);
}
} else if (cmd->sg)
scst_free(cmd->sg, cmd->sg_cnt);
cmd->sgv = NULL;
cmd->sg_cnt = 0;
@@ -1454,6 +1469,7 @@ int __scst_get_buf(struct scst_cmd *cmd, uint8_t **buf)
#else
*buf = page_address(sg[i].page);
#endif
*buf += sg[i].offset;
res = sg[i].length;
cmd->get_sg_buf_entry_num++;
@@ -1982,7 +1998,7 @@ int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
int res = 0;
struct scst_device *dev = cmd->dev;
BUG_ON(cmd->blocking);
sBUG_ON(cmd->blocking);
atomic_inc(&dev->on_dev_count);
@@ -2073,7 +2089,7 @@ void scst_unblock_cmds(struct scst_device *dev)
if (likely(!cmd->internal) && likely(!cmd->retry)) {
int expected_sn;
if (cmd->tgt_dev == NULL)
BUG();
sBUG();
expected_sn = cmd->tgt_dev->expected_sn;
if (cmd->sn == expected_sn)
brk = 1;
@@ -2137,13 +2153,18 @@ static struct scst_cmd *scst_inc_expected_sn(
}
void scst_inc_expected_sn_unblock(struct scst_tgt_dev *tgt_dev,
struct scst_cmd *cmd_sn, int locked)
struct scst_cmd *out_of_sn_cmd, int locked)
{
struct scst_cmd *cmd;
TRACE_ENTRY();
cmd = scst_inc_expected_sn(tgt_dev, cmd_sn);
if (out_of_sn_cmd->no_sn) {
TRACE(TRACE_SCSI_SERIALIZING, "cmd %p with no_sn", out_of_sn_cmd);
goto out;
}
cmd = scst_inc_expected_sn(tgt_dev, out_of_sn_cmd);
if (cmd != NULL) {
unsigned long flags = 0;
if (!locked)
@@ -2153,10 +2174,11 @@ void scst_inc_expected_sn_unblock(struct scst_tgt_dev *tgt_dev,
list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
if (!locked)
spin_unlock_irqrestore(&scst_list_lock, flags);
if (!cmd_sn->processible_env)
if (!out_of_sn_cmd->processible_env)
wake_up(&scst_list_waitQ);
}
out:
TRACE_EXIT();
return;
}
@@ -2294,7 +2316,7 @@ static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
break;
default:
BUG();
sBUG();
}
list_move_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
cmd->tm_dbg_delayed = 1;
@@ -2345,7 +2367,7 @@ static void tm_dbg_change_state(void)
}
break;
default:
BUG();
sBUG();
}
tm_dbg_on_state_passes =
tm_dbg_on_state_num_passes[tm_dbg_state];

View File

@@ -43,6 +43,7 @@
*/
atomic_t sgv_big_total_alloc;
atomic_t sgv_other_total_alloc;
static int scst_check_clustering(struct scatterlist *sg, int cur, int hint)
{
@@ -111,15 +112,15 @@ out_head:
goto out;
}
static void sgv_free_sg(struct sgv_pool_obj *obj)
static void scst_free_sg_entries(struct scatterlist *sg, int sg_count)
{
int i;
TRACE_MEM("obj=%p, sg_count=%d", obj, obj->sg_count);
TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
for (i = 0; i < obj->sg_count; i++) {
struct page *p = obj->entries[i].page;
int len = obj->entries[i].length;
for (i = 0; i < sg_count; i++) {
struct page *p = sg[i].page;
int len = sg[i].length;
int pages =
(len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
@@ -149,13 +150,12 @@ static void sgv_free_sg(struct sgv_pool_obj *obj)
p += 1 << order;
}
}
obj->sg_count = 0;
}
static int sgv_alloc_sg(struct sgv_pool_obj *obj, int pages,
unsigned long mask, int clustered)
static int scst_alloc_sg_entries(struct scatterlist *sg, int pages,
unsigned long gfp_mask, int clustered, struct trans_tbl_ent *trans_tbl)
{
int res = 0;
int sg_count = 0;
int pg, i, j;
int merged = -1;
@@ -168,100 +168,55 @@ static int sgv_alloc_sg(struct sgv_pool_obj *obj, int pages,
mask |= __GFP_ZERO;
#endif
obj->sg_count = 0;
for (pg = 0; pg < pages; pg++) {
#ifdef DEBUG_OOM
if ((scst_random() % 10000) == 55)
obj->entries[obj->sg_count].page = NULL;
sg[sg_count].page = NULL;
else
#endif
obj->entries[obj->sg_count].page = alloc_pages(mask, 0);
if (obj->entries[obj->sg_count].page == NULL) {
sg[sg_count].page = alloc_pages(gfp_mask, 0);
if (sg[sg_count].page == NULL) {
TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
"sgv_pool_obj page failed");
res = -ENOMEM;
goto out_free;
"sg page failed");
goto out_no_mem;
}
obj->entries[obj->sg_count].length = PAGE_SIZE;
sg[sg_count].length = PAGE_SIZE;
if (clustered) {
merged = scst_check_clustering(obj->entries,
obj->sg_count, merged);
merged = scst_check_clustering(sg, sg_count, merged);
if (merged == -1)
obj->sg_count++;
sg_count++;
} else
obj->sg_count++;
sg_count++;
TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
obj->sg_count);
sg_count);
}
if (clustered) {
if (clustered && trans_tbl) {
pg = 0;
for (i = 0; i < pages; i++) {
int n = obj->entries[i].length >> PAGE_SHIFT;
obj->trans_tbl[i].pg_count = pg;
int n = sg[i].length >> PAGE_SHIFT;
trans_tbl[i].pg_count = pg;
for (j = 0; j < n; j++)
obj->trans_tbl[pg++].sg_num = i+1;
trans_tbl[pg++].sg_num = i+1;
}
}
out:
TRACE_MEM("res=%d, sg_count=%d", res, obj->sg_count);
return res;
TRACE_MEM("sg_count=%d", sg_count);
return sg_count;
out_free:
sgv_free_sg(obj);
out_no_mem:
scst_free_sg_entries(sg, sg_count);
sg_count = 0;
goto out;
}
struct sgv_pool_obj *sgv_pool_alloc_big(int size, int pages,
unsigned long mask, int *count, int clustered)
{
struct sgv_pool_obj *obj;
int elen, cnt = 0;
elen = sizeof(*obj) + pages * (sizeof(obj->entries[0]) +
clustered ? sizeof(obj->trans_tbl[0]) : 0);
obj = kzalloc(elen, mask & ~(__GFP_HIGHMEM|GFP_DMA));
if (obj == NULL) {
TRACE(TRACE_OUT_OF_MEM, "Allocation big of sgv_pool_obj "
"failed (elen=%d, size=%d)", elen, size);
goto out;
}
obj->entries = (struct scatterlist*)&obj->trans_tbl[pages];
atomic_inc(&sgv_big_total_alloc);
if (sgv_alloc_sg(obj, pages, mask, clustered) != 0)
goto out_free;
cnt = obj->sg_count;
if (size & ~PAGE_MASK) {
obj->entries[cnt-1].length -=
PAGE_SIZE - (size & ~PAGE_MASK);
}
*count = cnt;
out:
TRACE_MEM("obj=%p (count=%d)", obj, cnt);
return obj;
out_free:
kfree(obj);
obj = NULL;
goto out;
}
void __sgv_pool_free_big(struct sgv_pool_obj *obj)
{
TRACE_MEM("obj=%p", obj);
sgv_free_sg(obj);
kfree(obj);
}
struct sgv_pool_obj *sgv_pool_alloc(struct sgv_pool *pool, int size,
unsigned long mask, int *count)
struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, int size,
unsigned long gfp_mask, int *count, struct sgv_pool_obj **sgv)
{
struct sgv_pool_obj *obj;
int order, pages, cnt, sg;
struct scatterlist *res = NULL;
if (unlikely(size == 0))
return NULL;
@@ -273,17 +228,18 @@ struct sgv_pool_obj *sgv_pool_alloc(struct sgv_pool *pool, int size,
if (order >= SGV_POOL_ELEMENTS) {
obj = NULL;
if (mask & GFP_ATOMIC)
if (gfp_mask & GFP_ATOMIC)
goto out;
obj = sgv_pool_alloc_big(size, pages, mask, count,
pool->clustered);
atomic_inc(&sgv_big_total_alloc);
atomic_dec(&sgv_other_total_alloc);
res = scst_alloc(size, gfp_mask, pool->clustered, count);
goto out;
}
obj = kmem_cache_alloc(pool->caches[order],
mask & ~(__GFP_HIGHMEM|GFP_DMA));
gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
if (obj == NULL) {
if (!(mask & GFP_ATOMIC)) {
if (!(gfp_mask & GFP_ATOMIC)) {
TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
"failed (size %d)", size);
}
@@ -293,7 +249,7 @@ struct sgv_pool_obj *sgv_pool_alloc(struct sgv_pool *pool, int size,
if (obj->owner_cache != pool->caches[order]) {
int esz, epg, eorder;
if (mask & GFP_ATOMIC)
if (gfp_mask & GFP_ATOMIC)
goto out_free;
esz = (1 << order) * sizeof(obj->entries[0]);
@@ -304,15 +260,16 @@ struct sgv_pool_obj *sgv_pool_alloc(struct sgv_pool *pool, int size,
obj->eorder = eorder;
obj->entries = (struct scatterlist*)__get_free_pages(
mask|__GFP_ZERO, eorder);
gfp_mask|__GFP_ZERO, eorder);
if (obj->entries == NULL) {
TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
"SG vector order %d failed", eorder);
goto out_free;
}
if (sgv_alloc_sg(obj, (1 << order), mask,
pool->clustered) != 0)
obj->sg_count = scst_alloc_sg_entries(obj->entries, (1 << order),
gfp_mask, pool->clustered, obj->trans_tbl);
if (obj->sg_count <= 0)
goto out_free_entries;
obj->owner_cache = pool->caches[order];
@@ -343,8 +300,11 @@ struct sgv_pool_obj *sgv_pool_alloc(struct sgv_pool *pool, int size,
"sg_count=%d, count=%d, last_len=%d)", obj, size, pages,
obj->sg_count, *count, obj->entries[obj->orig_sg].length);
res = obj->entries;
*sgv = obj;
out:
return obj;
return res;
out_free_entries:
free_pages((unsigned long)obj->entries, obj->eorder);
@@ -373,7 +333,7 @@ static void __sgv_dtor(void *data, int pages)
struct sgv_pool_obj *obj = data;
TRACE_MEM("Destructor for sgv_obj %p", obj);
if (obj->entries) {
sgv_free_sg(obj);
scst_free_sg_entries(obj->entries, obj->sg_count);
free_pages((unsigned long)obj->entries, obj->eorder);
}
}
@@ -401,6 +361,44 @@ dtor_t cache_dtors[SGV_POOL_ELEMENTS] =
SGV_DTOR_NAME(4), SGV_DTOR_NAME(5), SGV_DTOR_NAME(6), SGV_DTOR_NAME(7),
SGV_DTOR_NAME(8), SGV_DTOR_NAME(9), SGV_DTOR_NAME(10) };
struct scatterlist *scst_alloc(int size, unsigned long gfp_mask,
int use_clustering, int *count)
{
struct scatterlist *res;
int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
TRACE_ENTRY();
atomic_inc(&sgv_other_total_alloc);
res = kzalloc(pages*sizeof(*res), gfp_mask);
if (res == NULL)
goto out;
*count = scst_alloc_sg_entries(res, pages, gfp_mask, use_clustering,
NULL);
if (*count <= 0)
goto out_free;
out:
TRACE_MEM("Alloced sg %p (count %d)", res, *count);
TRACE_EXIT_HRES((int)res);
return res;
out_free:
kfree(res);
res = NULL;
goto out;
}
void scst_free(struct scatterlist *sg, int count)
{
TRACE_MEM("Freeing sg=%p", sg);
scst_free_sg_entries(sg, count);
kfree(sg);
}
int sgv_pool_init(struct sgv_pool *pool, const char *name, int clustered)
{
int res = -ENOMEM;
@@ -476,7 +474,7 @@ struct sgv_pool *sgv_pool_create(const char *name, int clustered)
TRACE_ENTRY();
pool = kmalloc(sizeof(*pool), GFP_KERNEL);
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (pool == NULL) {
TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
goto out;
@@ -513,6 +511,7 @@ int scst_sgv_pools_init(struct scst_sgv_pools *pools)
TRACE_ENTRY();
atomic_set(&sgv_big_total_alloc, 0);
atomic_set(&sgv_other_total_alloc, 0);
res = sgv_pool_init(&pools->norm, "sgv", 0);
if (res != 0)

View File

@@ -70,6 +70,7 @@ struct scst_sgv_pools
};
extern atomic_t sgv_big_total_alloc;
extern atomic_t sgv_other_total_alloc;
extern struct sgv_pool *sgv_pool_create(const char *name, int clustered);
extern void sgv_pool_destroy(struct sgv_pool *pool);
@@ -78,19 +79,13 @@ extern int sgv_pool_init(struct sgv_pool *pool, const char *name,
int clustered);
extern void sgv_pool_deinit(struct sgv_pool *pool);
extern struct sgv_pool_obj *sgv_pool_alloc(struct sgv_pool *pool, int size,
unsigned long flags, int *count);
extern void __sgv_pool_free_big(struct sgv_pool_obj *obj);
static inline void sgv_pool_free(struct sgv_pool_obj *obj)
extern struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, int size,
unsigned long gfp_mask, int *count, struct sgv_pool_obj **sgv);
static inline void sgv_pool_free(struct sgv_pool_obj *sgv)
{
TRACE_MEM("Freeing sgv_obj %p", obj);
if (obj->owner_cache != NULL) {
obj->entries[obj->orig_sg].length = obj->orig_length;
kmem_cache_free(obj->owner_cache, obj);
}
else
__sgv_pool_free_big(obj);
TRACE_MEM("Freeing sgv_obj %p", sgv);
sgv->entries[sgv->orig_sg].length = sgv->orig_length;
kmem_cache_free(sgv->owner_cache, sgv);
}
static inline struct scatterlist *sgv_pool_sg(struct sgv_pool_obj *obj)

View File

@@ -275,7 +275,7 @@ static inline void scst_do_req(struct scsi_request *sreq,
#ifdef STRICT_SERIALIZING
scsi_do_req(sreq, cmnd, buffer, bufflen, done, timeout, retries);
#elif defined(FILEIO_ONLY)
BUG();
sBUG();
#else
scsi_do_req_fifo(sreq, cmnd, buffer, bufflen, done, timeout, retries);
#endif
@@ -290,7 +290,7 @@ static inline int scst_exec_req(struct scsi_device *sdev,
return scsi_execute_async(sdev, cmd, cmd_len, data_direction, buffer,
bufflen, use_sg, timeout, retries, privdata, done, gfp);
#elif defined(FILEIO_ONLY)
BUG();
sBUG();
return -1;
#else
return scsi_execute_async_fifo(sdev, cmd, cmd_len, data_direction,

View File

@@ -414,7 +414,7 @@ int scst_proc_log_entry_write(struct file *file, const char *buf,
}
}
if (level == 0) {
PRINT_ERROR("Unknown token \"%s\"", p);
PRINT_ERROR_PR("Unknown token \"%s\"", p);
res = -EINVAL;
goto out_free;
}
@@ -828,6 +828,11 @@ static int scst_proc_sgv_read(char *buffer, char **start,
if (scst_proc_update_size(size, offset, length, &st))
goto stop_output;
size = scnprintf(buffer + st.len, length - st.len, "\n%-32s %-11d\n",
"other", atomic_read(&sgv_other_total_alloc));
if (scst_proc_update_size(size, offset, length, &st))
goto stop_output;
*eof = 1;
stop_output:

View File

@@ -30,7 +30,8 @@
#include "scsi_tgt.h"
#include "scst_priv.h"
static int scst_do_job_init(struct list_head *init_cmd_list);
static int scst_do_job_init(void);
static int scst_process_init_cmd(struct scst_cmd *cmd);
static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
int left_locked);
@@ -61,6 +62,20 @@ static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
return res;
}
/* Called under scst_list_lock and IRQs disabled */
static inline void scst_cmd_set_sn(struct scst_cmd *cmd)
{
/* ToDo: cmd->queue_type */
/* scst_list_lock is enough to protect that */
cmd->sn = cmd->tgt_dev->next_sn;
cmd->tgt_dev->next_sn++;
cmd->no_sn = 0;
TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/, "cmd(%p)->sn: %d",
cmd, cmd->sn);
}
static inline void scst_schedule_tasklet(void)
{
struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
@@ -97,7 +112,7 @@ struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
#ifdef EXTRACHECKS
if (unlikely(sess->shutting_down)) {
PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
BUG();
sBUG();
}
#endif
@@ -130,6 +145,13 @@ out:
return cmd;
}
static void scst_setup_to_active(struct scst_cmd *cmd)
{
cmd->state = SCST_CMD_STATE_XMIT_RESP;
TRACE_DBG("Adding cmd %p to active cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
}
void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
{
int res = 0;
@@ -144,6 +166,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
cmd->cdb, cmd->cdb_len);
#ifdef EXTRACHECKS
if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
(pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
{
@@ -152,6 +175,7 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
cmd->tgtt->name);
pref_context = SCST_CONTEXT_TASKLET;
}
#endif
spin_lock_irqsave(&scst_list_lock, flags);
@@ -171,79 +195,79 @@ void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
goto out_unlock_flags;
case SCST_SESS_IPH_FAILED:
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_XMIT_RESP;
TRACE_DBG("Adding cmd %p to active cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry,
&scst_active_cmd_list);
scst_setup_to_active(cmd);
goto active;
default:
BUG();
sBUG();
}
}
#ifdef EXTRACHECKS
if (unlikely(cmd->lun == (lun_t)-1)) {
PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
PRINT_ERROR_PR("Wrong LUN %d, finishing cmd", -1);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_lun_not_supported));
cmd->state = SCST_CMD_STATE_XMIT_RESP;
TRACE_DBG("Moving cmd %p to active cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
scst_setup_to_active(cmd);
goto active;
}
if (unlikely(cmd->cdb_len == 0)) {
PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
PRINT_ERROR_PR("Wrong CDB len %d, finishing cmd", 0);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_invalid_opcode));
cmd->state = SCST_CMD_STATE_XMIT_RESP;
TRACE_DBG("Adding cmd %p to active cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
scst_setup_to_active(cmd);
goto active;
}
#endif
TRACE_DBG("Adding cmd %p to init cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
cmd->state = SCST_CMD_STATE_INIT;
TRACE_DBG("Moving cmd %p to init cmd list", cmd);
list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
switch (pref_context) {
case SCST_CONTEXT_TASKLET:
scst_schedule_tasklet();
goto out_unlock_flags;
case SCST_CONTEXT_DIRECT:
case SCST_CONTEXT_DIRECT_ATOMIC:
res = scst_do_job_init(&scst_init_cmd_list);
if (res > 0)
if (cmd->no_sn)
res = scst_process_init_cmd(cmd);
else
res = scst_do_job_init();
if (unlikely(res > 0))
goto out_unlock_flags;
break;
case SCST_CONTEXT_THREAD:
goto out_thread_unlock_flags;
case SCST_CONTEXT_TASKLET:
scst_schedule_tasklet();
goto out_unlock_flags;
default:
PRINT_ERROR_PR("Context %x is undefined, using thread one",
pref_context);
PRINT_ERROR_PR("Context %x is undefined, using the thread one",
pref_context);
goto out_thread_unlock_flags;
}
active:
/* Here cmd must be in active cmd list */
switch (pref_context) {
case SCST_CONTEXT_TASKLET:
scst_schedule_tasklet();
goto out_unlock_flags;
case SCST_CONTEXT_DIRECT:
case SCST_CONTEXT_DIRECT_ATOMIC:
scst_process_active_cmd(cmd, pref_context, &flags, 0);
/* For *NEED_THREAD wake_up() is already done */
break;
case SCST_CONTEXT_THREAD:
goto out_thread_unlock_flags;
case SCST_CONTEXT_TASKLET:
scst_schedule_tasklet();
goto out_unlock_flags;
default:
PRINT_ERROR_PR("Context %x is undefined, using thread one",
pref_context);
PRINT_ERROR_PR("Context %x is undefined, using the thread one",
pref_context);
goto out_thread_unlock_flags;
}
@@ -270,6 +294,7 @@ static int scst_parse_cmd(struct scst_cmd *cmd)
struct scst_device *dev = cmd->dev;
struct scst_info_cdb cdb_info;
int atomic = scst_cmd_atomic(cmd);
int orig_bufflen = cmd->bufflen;
int set_dir = 1;
TRACE_ENTRY();
@@ -393,6 +418,13 @@ static int scst_parse_cmd(struct scst_cmd *cmd)
if (cmd->data_len == -1)
cmd->data_len = cmd->bufflen;
if (cmd->data_buf_alloced && (orig_bufflen < cmd->bufflen)) {
PRINT_ERROR_PR("Target driver supplied data buffer (size %d), "
"is less, than required (size %d)", orig_bufflen,
cmd->bufflen);
goto out_error;
}
#ifdef EXTRACHECKS
if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
@@ -427,12 +459,6 @@ static int scst_parse_cmd(struct scst_cmd *cmd)
res = SCST_CMD_STATE_RES_CONT_SAME;
break;
case SCST_CMD_STATE_REINIT:
cmd->tgt_dev_saved = tgt_dev_saved;
cmd->state = state;
res = SCST_CMD_STATE_RES_RESTART;
set_dir = 0;
break;
case SCST_CMD_STATE_NEED_THREAD_CTX:
TRACE_DBG("Dev handler %s parse() requested thread "
@@ -441,6 +467,13 @@ static int scst_parse_cmd(struct scst_cmd *cmd)
set_dir = 0;
break;
case SCST_CMD_STATE_REINIT:
cmd->tgt_dev_saved = tgt_dev_saved;
cmd->state = state;
res = SCST_CMD_STATE_RES_RESTART;
set_dir = 0;
break;
default:
if (state >= 0) {
PRINT_ERROR_PR("Dev handler %s parse() returned "
@@ -574,19 +607,34 @@ static int scst_prepare_space(struct scst_cmd *cmd)
TRACE_ENTRY();
if (cmd->data_direction == SCST_DATA_NONE) {
cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
goto out;
}
if (cmd->data_direction == SCST_DATA_NONE)
goto prep_done;
r = scst_check_mem(cmd);
if (unlikely(r != 0))
goto out;
if (cmd->data_buf_tgt_alloc) {
int orig_bufflen = cmd->bufflen;
TRACE_MEM("%s", "Custom tgt data buf allocation requested");
r = cmd->tgtt->alloc_data_buf(cmd);
cmd->data_buf_alloced = (r == 0);
if (r > 0)
r = scst_alloc_space(cmd);
else if (r == 0) {
cmd->data_buf_alloced = 1;
if (cmd->data_buf_alloced &&
unlikely(orig_bufflen < cmd->bufflen)) {
PRINT_ERROR_PR("Target driver allocated data "
"buffer (size %d), is less, than "
"required (size %d)", orig_bufflen,
cmd->bufflen);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_DEV_DONE;
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
}
} else
r = scst_alloc_space(cmd);
@@ -600,6 +648,27 @@ static int scst_prepare_space(struct scst_cmd *cmd)
goto out_no_space;
}
prep_done:
if (cmd->preprocessing_only) {
if (scst_cmd_atomic(cmd) &&
!cmd->tgtt->preprocessing_done_atomic) {
TRACE_DBG("%s", "preprocessing_done() can not be "
"called in atomic context, rescheduling to "
"the thread");
res = SCST_CMD_STATE_RES_NEED_THREAD;
goto out;
}
res = SCST_CMD_STATE_RES_CONT_NEXT;
cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
cmd->tgtt->preprocessing_done(cmd);
TRACE_DBG("%s", "preprocessing_done() returned");
goto out;
}
switch (cmd->data_direction) {
case SCST_DATA_WRITE:
cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
@@ -624,6 +693,81 @@ out_no_space:
goto out;
}
void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
{
TRACE_ENTRY();
TRACE_DBG("Preferred context: %d", pref_context);
TRACE_DBG("tag=%d, status=%#x", scst_cmd_get_tag(cmd), status);
cmd->non_atomic_only = 0;
#ifdef EXTRACHECKS
if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
(pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
{
PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
"SCST_CONTEXT_TASKLET instead\n", pref_context,
cmd->tgtt->name);
pref_context = SCST_CONTEXT_TASKLET;
}
#endif
switch (status) {
case SCST_PREPROCESS_STATUS_SUCCESS:
switch (cmd->data_direction) {
case SCST_DATA_WRITE:
cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
break;
default:
cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
break;
}
if (cmd->no_sn) {
unsigned long flags;
int rc;
spin_lock_irqsave(&scst_list_lock, flags);
/* Necessary to keep the command's order */
rc = scst_do_job_init();
if (unlikely(rc > 0)) {
TRACE_DBG("Adding cmd %p to init cmd list",
cmd);
list_add_tail(&cmd->cmd_list_entry,
&scst_init_cmd_list);
spin_unlock_irqrestore(&scst_list_lock, flags);
goto out;
}
scst_cmd_set_sn(cmd);
spin_unlock_irqrestore(&scst_list_lock, flags);
}
break;
case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
cmd->state = SCST_CMD_STATE_DEV_DONE;
break;
case SCST_PREPROCESS_STATUS_ERROR_FATAL:
set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
/* go through */
case SCST_PREPROCESS_STATUS_ERROR:
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_hardw_error));
cmd->state = SCST_CMD_STATE_DEV_DONE;
break;
default:
PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
status);
cmd->state = SCST_CMD_STATE_DEV_DONE;
break;
}
scst_proccess_redirect_cmd(cmd, pref_context, 1);
out:
TRACE_EXIT();
return;
}
/* No locks */
static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
{
@@ -682,6 +826,12 @@ static int scst_rdy_to_xfer(struct scst_cmd *cmd)
goto out_dev_done;
}
if (cmd->tgtt->rdy_to_xfer == NULL) {
cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
res = SCST_CMD_STATE_RES_CONT_SAME;
goto out;
}
if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
TRACE_DBG("%s", "rdy_to_xfer() can not be "
"called in atomic context, rescheduling to the thread");
@@ -817,6 +967,7 @@ void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
cmd->non_atomic_only = 0;
#ifdef EXTRACHECKS
if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
(pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
{
@@ -825,6 +976,7 @@ void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
cmd->tgtt->name);
pref_context = SCST_CONTEXT_TASKLET;
}
#endif
switch (status) {
case SCST_RX_STATUS_SUCCESS:
@@ -886,12 +1038,13 @@ static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
if (rq_sense != NULL) {
sense_valid = SCST_SENSE_VALID(rq_sense);
if (sense_valid) {
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
/*
* We checked that rq_sense_len < sizeof(cmd->sense_buffer)
* in init_scst()
*/
memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
memset(&cmd->sense_buffer[rq_sense_len], 0,
sizeof(cmd->sense_buffer) - rq_sense_len);
}
} else
sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
@@ -1164,7 +1317,7 @@ static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
{
TRACE_ENTRY();
BUG_ON(in_irq());
sBUG_ON(in_irq());
scst_dec_on_dev_cmd(cmd);
@@ -1773,11 +1926,13 @@ static int scst_send_to_midlev(struct scst_cmd *cmd)
scst_dec_on_dev_cmd(cmd);
goto out_dec_cmd_count;
} else {
BUG_ON(rc != SCST_EXEC_COMPLETED);
sBUG_ON(rc != SCST_EXEC_COMPLETED);
goto out_unplug;
}
}
EXTRACHECKS_BUG_ON(cmd->no_sn);
expected_sn = tgt_dev->expected_sn;
if (cmd->sn != expected_sn) {
spin_lock_bh(&tgt_dev->sn_lock);
@@ -1815,7 +1970,7 @@ static int scst_send_to_midlev(struct scst_cmd *cmd)
else
goto out_dec_cmd_count;
}
BUG_ON(rc != SCST_EXEC_COMPLETED);
sBUG_ON(rc != SCST_EXEC_COMPLETED);
/* !! At this point cmd can be already freed !! */
count++;
expected_sn = __scst_inc_expected_sn(tgt_dev);
@@ -2189,7 +2344,7 @@ void scst_tgt_cmd_done(struct scst_cmd *cmd)
{
TRACE_ENTRY();
BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
cmd->state = SCST_CMD_STATE_FINISHED;
scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
@@ -2243,7 +2398,7 @@ static int scst_finish_cmd(struct scst_cmd *cmd)
static int scst_translate_lun(struct scst_cmd *cmd)
{
struct scst_tgt_dev *tgt_dev = NULL;
int res = 0;
int res;
TRACE_ENTRY();
@@ -2279,15 +2434,6 @@ static int scst_translate_lun(struct scst_cmd *cmd)
tgt_dev->cmd_count++;
cmd->dev = tgt_dev->acg_dev->dev;
/* ToDo: cmd->queue_type */
/* scst_list_lock is enough to protect that */
cmd->sn = tgt_dev->next_sn;
tgt_dev->next_sn++;
TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
"cmd->sn: %d", cmd->sn);
res = 0;
break;
}
@@ -2320,6 +2466,11 @@ static int scst_process_init_cmd(struct scst_cmd *cmd)
TRACE_ENTRY();
if (unlikely(cmd->tgt_dev)) {
scst_cmd_set_sn(cmd);
goto out_move;
}
res = scst_translate_lun(cmd);
if (likely(res == 0)) {
cmd->state = SCST_CMD_STATE_DEV_PARSE;
@@ -2330,18 +2481,21 @@ static int scst_process_init_cmd(struct scst_cmd *cmd)
"Anonymous" : cmd->sess->initiator_name);
scst_set_busy(cmd);
cmd->state = SCST_CMD_STATE_XMIT_RESP;
}
TRACE_DBG("Moving cmd %p to active cmd list", cmd);
list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
} else if (!cmd->no_sn)
scst_cmd_set_sn(cmd);
} else if (res < 0) {
TRACE_DBG("Finishing cmd %p", cmd);
scst_set_cmd_error(cmd,
SCST_LOAD_SENSE(scst_sense_lun_not_supported));
cmd->state = SCST_CMD_STATE_XMIT_RESP;
TRACE_DBG("Moving cmd %p to active cmd list", cmd);
list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
}
} else
goto out;
out_move:
TRACE_DBG("Moving cmd %p to active cmd list", cmd);
list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
out:
TRACE_EXIT_RES(res);
return res;
}
@@ -2352,22 +2506,24 @@ static int scst_process_init_cmd(struct scst_cmd *cmd)
* have to be serialized, i.e. commands must be executed in order
* of their arrival, and we set this order inside scst_translate_lun().
*/
static int scst_do_job_init(struct list_head *init_cmd_list)
static int scst_do_job_init(void)
{
int res = 1;
int res = 0;
TRACE_ENTRY();
if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
while (!list_empty(init_cmd_list)) {
struct scst_cmd *cmd = list_entry(init_cmd_list->next,
typeof(*cmd),
if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
while (!list_empty(&scst_init_cmd_list)) {
struct scst_cmd *cmd = list_entry(
scst_init_cmd_list.next, typeof(*cmd),
cmd_list_entry);
res = scst_process_init_cmd(cmd);
if (res > 0)
break;
/* For DIRECT context the cmd is always the last */
}
}
} else
res = 1;
TRACE_EXIT_RES(res);
return res;
@@ -2381,9 +2537,7 @@ static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
TRACE_ENTRY();
#ifdef EXTRACHECKS
BUG_ON(in_irq());
#endif
EXTRACHECKS_BUG_ON(in_irq());
cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
SCST_CONTEXT_DIRECT_ATOMIC);
@@ -2421,9 +2575,9 @@ static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
break;
default:
PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
PRINT_ERROR_PR("cmd (%p) in state %d, but shouldn't be",
cmd, cmd->state);
BUG();
sBUG();
res = SCST_CMD_STATE_RES_CONT_NEXT;
break;
}
@@ -2454,7 +2608,7 @@ static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
"useful list (left on scst cmd list)", cmd,
cmd->state);
spin_unlock_irq(&scst_list_lock);
BUG();
sBUG();
spin_lock_irq(&scst_list_lock);
break;
#endif
@@ -2473,16 +2627,16 @@ static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
if (!left_locked)
spin_unlock_irq(&scst_list_lock);
} else
BUG();
sBUG();
} else
BUG();
sBUG();
TRACE_EXIT_RES(res);
return res;
}
/* Called under scst_list_lock and IRQs disabled */
static void scst_do_job_active(struct list_head *active_cmd_list, int context)
static void scst_do_job_active(int context)
{
int res;
struct scst_cmd *cmd;
@@ -2502,7 +2656,7 @@ static void scst_do_job_active(struct list_head *active_cmd_list, int context)
tm_dbg_check_released_cmds();
restart:
list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
list_for_each_entry(cmd, &scst_active_cmd_list, cmd_list_entry) {
if (atomic && cmd->non_atomic_only) {
TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
continue;
@@ -2517,7 +2671,7 @@ restart:
} else if (res == SCST_CMD_STATE_RES_RESTART) {
break;
} else
BUG();
sBUG();
}
TRACE_EXIT();
@@ -2569,9 +2723,8 @@ int scst_cmd_thread(void *arg)
remove_wait_queue(&scst_list_waitQ, &wait);
}
scst_do_job_init(&scst_init_cmd_list);
scst_do_job_active(&scst_active_cmd_list,
SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
scst_do_job_init();
scst_do_job_active(SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
list_empty(&scst_cmd_list) &&
@@ -2603,9 +2756,8 @@ void scst_cmd_tasklet(long p)
spin_lock_irq(&scst_list_lock);
scst_do_job_init(&scst_init_cmd_list);
scst_do_job_active(&scst_active_cmd_list,
SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
scst_do_job_init();
scst_do_job_active(SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
spin_unlock_irq(&scst_list_lock);
@@ -2705,9 +2857,7 @@ static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
int irq = irqs_disabled();
TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
#ifdef EXTRACHECKS
BUG_ON(in_irq());
#endif
EXTRACHECKS_BUG_ON(in_irq());
if (!irq)
local_bh_disable();
res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd,
@@ -2716,11 +2866,8 @@ static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
local_bh_enable();
TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
tgt_dev->acg_dev->dev->handler->name, res);
if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ?
SCST_MGMT_STATUS_SUCCESS :
SCST_MGMT_STATUS_FAILED;
}
if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
mcmd->status = res;
}
return res;
}
@@ -2757,7 +2904,7 @@ void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
smp_mb__after_set_bit();
if (call_dev_task_mgmt_fn && cmd->tgt_dev)
scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
if (mcmd) {
int defer;
@@ -2790,7 +2937,7 @@ void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
cmd->mgmt_cmnd, mcmd);
}
#endif
BUG_ON(cmd->mgmt_cmnd);
sBUG_ON(cmd->mgmt_cmnd);
mcmd->cmd_wait_count++;
cmd->mgmt_cmnd = mcmd;
}
@@ -2956,7 +3103,7 @@ static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
if (cmd == NULL) {
TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
"tag %d not found", mcmd->tag);
mcmd->status = SCST_MGMT_STATUS_FAILED;
mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
mcmd->state = SCST_MGMT_CMD_STATE_DONE;
} else {
TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
@@ -2974,7 +3121,7 @@ static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
if (rc < 0) {
PRINT_ERROR_PR("Corresponding device for lun %Ld not "
"found", (uint64_t)mcmd->lun);
mcmd->status = SCST_MGMT_STATUS_FAILED;
mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
mcmd->state = SCST_MGMT_CMD_STATE_DONE;
} else if (rc == 0)
mcmd->state = SCST_MGMT_CMD_STATE_READY;
@@ -3020,8 +3167,9 @@ static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
if (rc == SCST_DEV_TM_NOT_COMPLETED)
c = 1;
else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
mcmd->status = SCST_MGMT_STATUS_FAILED;
else if ((rc < 0) &&
(mcmd->status == SCST_MGMT_STATUS_SUCCESS))
mcmd->status = rc;
}
if (cont && !c)
continue;
@@ -3054,7 +3202,8 @@ static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
dev->scsi_dev->host->host_no,
(rc == SUCCESS) ? "SUCCESS" : "FAILED");
if (rc != SUCCESS) {
if ((rc != SUCCESS) &&
(mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
/* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
mcmd->status = SCST_MGMT_STATUS_FAILED;
}
@@ -3101,7 +3250,7 @@ static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
TRACE(TRACE_MGMT, "Resetting host %d bus ",
dev->scsi_dev->host->host_no);
rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
if (rc != SUCCESS)
if ((rc != SUCCESS) && (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
mcmd->status = SCST_MGMT_STATUS_FAILED;
dev->scsi_dev->was_reset = 0;
}
@@ -3146,8 +3295,8 @@ static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
spin_unlock_bh(&dev->dev_lock);
rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
if (rc == SCST_DEV_TM_COMPLETED_FAILED)
mcmd->status = SCST_MGMT_STATUS_FAILED;
if ((rc < 0) && (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
mcmd->status = rc;
__scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
if (nexus_loss)
@@ -3198,8 +3347,9 @@ static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
int rc;
rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
if (rc == SCST_DEV_TM_COMPLETED_FAILED)
mcmd->status = SCST_MGMT_STATUS_FAILED;
if ((rc < 0) &&
(mcmd->status == SCST_MGMT_STATUS_SUCCESS))
mcmd->status = rc;
__scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
if (nexus_loss)
@@ -3257,13 +3407,16 @@ static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
break;
case SCST_CLEAR_ACA:
scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
/* Nothing to do (yet) */
if (scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1) ==
SCST_DEV_TM_NOT_COMPLETED) {
mcmd->status = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
/* Nothing to do (yet) */
}
break;
default:
PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
mcmd->status = SCST_MGMT_STATUS_FAILED;
mcmd->status = SCST_MGMT_STATUS_REJECTED;
break;
}
@@ -3371,7 +3524,7 @@ static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
#ifdef EXTRACHECKS
case SCST_MGMT_CMD_STATE_EXECUTING:
BUG();
sBUG();
#endif
default:
@@ -3513,14 +3666,14 @@ static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
if (unlikely(sess->shutting_down)) {
PRINT_ERROR_PR("%s",
"New mgmt cmd while shutting down the session");
BUG();
sBUG();
}
#endif
if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
switch(sess->init_phase) {
case SCST_SESS_IPH_INITING:
TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
TRACE_DBG("Moving mcmd %p to init deferred mcmd list",
mcmd);
list_add_tail(&mcmd->mgmt_cmd_list_entry,
&sess->init_deferred_mcmd_list);
@@ -3531,7 +3684,7 @@ static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
res = -1;
goto out_unlock;
default:
BUG();
sBUG();
}
}
@@ -3868,13 +4021,13 @@ restart:
if (sess->init_phase == SCST_SESS_IPH_INITING) {
scst_init_session(sess);
} else if (sess->shutting_down) {
BUG_ON(atomic_read(&sess->refcnt) != 0);
sBUG_ON(atomic_read(&sess->refcnt) != 0);
scst_free_session_callback(sess);
} else {
PRINT_ERROR_PR("session %p is in "
"scst_sess_mgmt_list, but in unknown "
"phase %x", sess, sess->init_phase);
BUG();
sBUG();
}
spin_lock_irq(&scst_mgmt_lock);
goto restart;