From 284d07c7186cbc59dd3c273890afecc3d41ecdd2 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 12 Feb 2008 08:40:54 +0000 Subject: [PATCH] Synchronize with Feral CVS repository: - clean up tpublic notes and change tmd_xfr_t to tmd_xact_t - minor cleanups from mendocino git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@255 d57e44dd-8a1f-0410-8b47-8ef2f437770f --- qla_isp/common/isp.c | 4 +- qla_isp/common/isp_tpublic.h | 301 ++++++++++++++++------------------- qla_isp/linux/isp_linux.c | 172 ++++++++++---------- qla_isp/linux/isp_linux.h | 8 +- qla_isp/linux/isp_pci.c | 156 +++++++++--------- qla_isp/linux/isp_scst.c | 86 +++++----- qla_isp/linux/scsi_target.c | 287 +++++++++++++++++++++------------ 7 files changed, 533 insertions(+), 481 deletions(-) diff --git a/qla_isp/common/isp.c b/qla_isp/common/isp.c index 4ebd593cf..f49285a27 100644 --- a/qla_isp/common/isp.c +++ b/qla_isp/common/isp.c @@ -1,4 +1,4 @@ -/* $Id: isp.c,v 1.178 2007/10/11 22:08:38 mjacob Exp $ */ +/* $Id: isp.c,v 1.179 2007/11/14 01:20:38 mjacob Exp $ */ /*- * Copyright (c) 1997-2007 by Matthew Jacob * All rights reserved. @@ -6089,7 +6089,7 @@ isp_fastpost_complete(ispsoftc_t *isp, uint16_t fph) } xs = isp_find_xs(isp, fph); if (xs == NULL) { - isp_prt(isp, ISP_LOGDEBUG1, + isp_prt(isp, ISP_LOGWARN, "Command for fast post handle 0x%x not found", fph); return; } diff --git a/qla_isp/common/isp_tpublic.h b/qla_isp/common/isp_tpublic.h index e682cda18..b29f8785d 100644 --- a/qla_isp/common/isp_tpublic.h +++ b/qla_isp/common/isp_tpublic.h @@ -1,4 +1,4 @@ -/* $Id: isp_tpublic.h,v 1.36 2007/10/31 05:28:18 mjacob Exp $ */ +/* $Id: isp_tpublic.h,v 1.37 2007/11/13 01:25:50 mjacob Exp $ */ /*- * Copyright (c) 1997-2007 by Matthew Jacob * All rights reserved. @@ -31,8 +31,8 @@ * is the GNU Public License: * * This program is free software; you can redistribute it and/or modify - * it under the terms of The Version 2 GNU General Public License as published - * by the Free Software Foundation. + * it under the terms of The Version 2 GNU General Public License as + * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -55,27 +55,39 @@ /* * Host Adapter Public Target Interface Structures && Routines */ +/* + * A note about terminology: + * + * "Inner Layer" means this driver (isp and the isp_tpublic API). + * + * This module includes the both generic and platform specific pieces. + * + * "Outer Layer" means another (external) module. + * + * This is an additional module that actually implements SCSI target command + * decode and is the recipient of incoming commands and the source of the + * disposition for them. + */ #ifndef _ISP_TPUBLIC_H #define _ISP_TPUBLIC_H 1 /* - * Action codes set by the MD target driver for - * the external layer to figure out what to do with. + * Action codes set by the Inner Layer for the outer layer to figure out what to do with. */ typedef enum { QOUT_HBA_REG=0, /* the argument is a pointer to a hba_register_t */ QOUT_ENABLE, /* the argument is a pointer to a enadis_t */ QOUT_DISABLE, /* the argument is a pointer to a enadis_t */ QOUT_TMD_START, /* the argument is a pointer to a tmd_cmd_t */ - QOUT_TMD_DONE, /* the argument is a pointer to a tmd_xfr_t */ + QOUT_TMD_DONE, /* the argument is a pointer to a tmd_xact_t */ QOUT_NOTIFY, /* the argument is a pointer to a tmd_notify_t */ QOUT_HBA_UNREG /* the argument is a pointer to a hba_register_t */ } tact_e; /* - * Action codes set by the external layer for the - * MD driver to figure out what to do with. + * Action codes set by the outer layer for the + * inner layer to figure out what to do with. */ typedef enum { QIN_HBA_REG=99, /* the argument is a pointer to a hba_register_t */ @@ -84,25 +96,27 @@ typedef enum { QIN_GETDLIST, /* the argument is a pointer to a fc_dlist_t */ QIN_ENABLE, /* the argument is a pointer to a enadis_t */ QIN_DISABLE, /* the argument is a pointer to a enadis_t */ - QIN_TMD_CONT, /* the argument is a pointer to a tmd_xfr_t */ + QIN_TMD_CONT, /* the argument is a pointer to a tmd_xact_t */ QIN_TMD_FIN, /* the argument is a pointer to a tmd_cmd_t */ QIN_NOTIFY_ACK, /* the argument is a pointer to a tmd_notify_t */ QIN_HBA_UNREG, /* the argument is a pointer to a hba_register_t */ } qact_e; /* - * This structure is used to register to other software modules the + * This structure is used to register to the outer layer the * binding of an HBA identifier, driver name and instance and the - * lun width capapbilities of this target driver. It's up to each - * platform to figure out how it wants to do this, but a typical - * sequence would be for the MD layer to find some external module's - * entry point and start by sending a QOUT_HBA_REG with info filled - * in, and the external module to call back with a QIN_HBA_REG that - * passes back the corresponding information. + * lun width capapbilities of this inner layer. It's up to each + * platform to figure out how it wants to actually implement this. + * A typical sequence would be for the MD layer to find some external + * module's entry point and start by sending a QOUT_HBA_REG with info + * filled in, and the external module to call back with a QIN_HBA_REG + * that passes back the corresponding information. + * + * The r_version tag defines the version of this API. */ #define QR_VERSION 18 typedef struct { - /* NB: tags from here to r_version must never change */ + /* NB: structure tags from here to r_version must never change */ void * r_identity; void (*r_action)(qact_e, void *); char r_name[8]; @@ -145,8 +159,10 @@ typedef struct { int d_count; uint64_t * d_wwpns; } fc_dlist_t; + /* - * Notify structure + * Notify structure- these are for asynchronous events that need to be sent + * as notifications to the outer layer. It should be pretty self-explanatory. */ typedef enum { NT_ABORT_TASK=0x1000, @@ -189,6 +205,8 @@ typedef struct tmd_notify { ) /* + * Lun ENABLE/DISABLE + * * A word about ENABLE/DISABLE: the argument is a pointer to a enadis_t * with en_hba, en_iid, en_chan, en_tgt and en_lun filled out. * @@ -199,106 +217,130 @@ typedef struct { void * en_private; /* for outer layer usage */ void * en_hba; /* HBA tag */ uint64_t en_iid; /* initiator ID */ - uint64_t en_tgt; /* target id */ + uint64_t en_tgt; /* target identifier */ uint16_t en_lun; /* logical unit */ uint16_t en_chan; /* channel on card */ int en_error; } enadis_t; + + /* - * Suggested Software Target Mode Command Handling structure. + * Data Transaction * - * A note about terminology: + * A tmd_xact_t is a structure used to describe a transaction within + * an overall command. It used to be part of the overall command, + * but it became desirable to allow for multiple simultaneous + * transfers for a command to happen. Generally these structures + * define data to be moved (along with the relative offset within + * the overall command) with the last structure containing status + * and sense (if needed) as well. * - * MD stands for "Machine Dependent". + * The td_cmd tag points back to the owning command. * - * This driver is structured in three layers: Outer MD, core, and inner MD. - * The latter also is bus dependent (i.e., is cognizant of PCI bus issues - * as well as platform issues). + * The td_data tag points to the (platform specific) data descriptor. * + * The td_lprivate is for use by the Inner Layer for private usage. * - * "Outer Layer" means "Other Module" + * The td_xfrlen says whether this transaction is moving data- if nonzero. * - * Some additional module that actually implements SCSI target command - * policy is the recipient of incoming commands and the source of the - * disposition for them. + * The td_offset states what the relative offset within the comamnd the + * data transfer will start at. It is undefined if td_xfrlen is zero. * - * The command structure below is one suggested possible MD command structure, - * but since the handling of thbis is entirely in the MD layer, there is - * no explicit or implicit requirement that it be used. + * The td_error flag will note any errors that occurred during an attempt + * to start this transaction. The inner layer is responsible for setting + * this. + * + * The td_hflags tag is set by the outer layer to indicate how the inner + * layer is supposed to treat this transaction. + * + * The td_lflags tag is set by the inner layer to indicate whether this + * transaction sent status and/or sense. Note that (much as it hurts), + * this API allows the inner layer to *fail* to send sense even if asked + * to- that is, AUTOSENSE is not a requirement of this API and the outer + * layer has to be prepared for this (unlikely) eventuality. + */ + +typedef struct tmd_cmd tmd_cmd_t; +typedef struct tmd_xact { + tmd_cmd_t * td_cmd; /* cross-ref to tmd_cmd_t */ + void * td_data; /* data descriptor */ + void * td_lprivate; /* private for lower layer */ + uint32_t td_xfrlen; /* size of this data load */ + uint32_t td_offset; /* offset for this data load */ + int td_error; /* error with this transfer */ + uint8_t td_hflags; /* flags set by caller */ + uint8_t td_lflags; /* flags set by callee */ +} tmd_xact_t; + +#define TDFH_STSVALID 0x01 /* status is valid - include it */ +#define TDFH_SNSVALID 0x02 /* sense data (from outer layer) good - include it */ +#define TDFH_DATA_IN 0x04 /* target (us) -> initiator (them) */ +#define TDFH_DATA_OUT 0x08 /* initiator (them) -> target (us) */ +#define TDFH_DATA_MASK 0x0C /* mask to cover data direction */ +#define TDFH_PRIVATE 0xF0 /* private outer layer usage */ + +#define TDFL_SENTSTATUS 0x01 /* this transaction sent status */ +#define TDFL_SENTSENSE 0x02 /* this transaction sent sense data */ +#define TDFL_PRIVATE 0xF0 /* private inner layer usage */ + +/* + * The command structure below the SCSI Command structure that is + * is the whole point of this API. After a LUN is (or LUNS are) + * enabled, initiators who send commands addressed to the port, + * channel and lun that have been enabled cause an interrupt which + * causes the chip to receive the command and present it to the + * inner layer. The inner layer allocates one of this command + * structures and copies relevant information to it and sends it + * to the outer layer with the action QOUT_TMD_START. + * + * The outer layer is then responsible for command decode and is responsible + * for sending any transactions back (via a QIN_TMD_CONT) to the inner layer + * that (optionally) moves data and then sends closing status. + * + * The outer layer, when informed of the status of the final transaction + * then releases this structure by sending it back to the inner layer + * with the action QOUT_TMD_FIN. + * + * The structure tag meanings are as described below. * * The cd_hba tag is a tag that uniquely identifies the HBA this target * mode command is coming from. The outer layer has to pass this back - * unchanged to avoid chaos. + * unchanged to avoid chaos. It is identical to the r_identity tag used + * by the inner layer to register with the outer layer. * - * The cd_iid, cd_tgt, cd_lun and cd_port tags are used to identify the - * id of the initiator who sent us a command, the target claim to be, the - * lun on the target we claim to be, and the port instance (for multiple - * port host adapters) that this applies to (consider it an extra port - * parameter). The iid, tgt and lun values are deliberately chosen to be - * fat so that, for example, World Wide Names can be used instead of - * the units that the firmware uses (in the case where the MD - * layer maintains a port database, for example). + * The cd_iid, cd_channel, cd_tgt and cd_lun tags are used to identify the + * the initiator who sent us a command, the channel on the this particular + * hardware port we arrived on (for multiple channel devices), the target we + * claim to be, and the lun on that target. * - * The cd_tagtype field specifies what kind of command tag type, if - * any, has been sent with the command. Note that the Outer Layer - * still needs to pass the tag handle through unchanged even - * if the tag type is CD_UNTAGGED. - * - * The cd_cdb contains storage for the passed in command descriptor block. - * There is no need to define length as the callee should be able to - * figure this out. - * - * The tag cd_lflags are the flags set by the MD driver when it gets - * command incoming or when it needs to inform any outside entities - * that the last requested action failed. - * - * The tag cd_hflags should be set by any outside software to indicate - * the validity of sense and status fields (defined below) and to indicate - * the direction data is expected to move. It is an error to have both - * CDFH_DATA_IN and CDFH_DATA_OUT set. - * - * If the CDFH_STSVALID flag is set, the command should be completed (after - * sending any data and/or status). If CDFH_SNSVALID is set and the MD layer - * can also handle sending the associated sense data (either back with an - * FCP RESPONSE IU for Fibre Channel or otherwise automatically handling a - * REQUEST SENSE from the initator for this target/lun), the MD layer will - * set the CDFL_SENTSENSE flag on successful transmission of the sense data. - * It is an error for the CDFH_SNSVALID bit to be set and CDFH_STSVALID not - * to be set. It is an error for the CDFH_SNSVALID be set and the associated - * SCSI status (cd_scsi_status) not be set to CHECK CONDITON. - * - * The tag cd_data points to a data segment to either be filled or - * read from depending on the direction of data movement. The tag - * is undefined if no data direction is set. The MD layer and outer - * layers must agree on the meaning of cd_data and it is specifically - * not defined here. + * The cd_tagval field is a tag that uniquely describes this tag. It may + * or may not have any correspondence to an underying hardware tag. The + * outer layer must pass this back unchanged or chaos will result. * * The tag cd_totlen is the total data amount expected to be moved - * over the life of the command. It may be set by the MD layer, possibly - * from the datalen field of an FCP CMND IU unit. If it shows up in the outer - * layers set to zero and the CDB indicates data should be moved, the outer - * layer should set it to the amount expected to be moved. + * for this command. This will be set to non-zero for transports + * that know this value from the transport level (e.g., Fibre Channel). + * If it shows up in the outer layers set to zero, the total data length + * must be inferred from the CDB. * - * The tag cd_xfrlen is the length of the currently active data transfer. - * The tag cd_offset is the current offset within the entire command that - * this data transfer starts at (this only makes sense for Fibre Channel). + * The tag cd_moved is the total amount of data moved so far. It is the + * responsibilty of the inner layer to set this for every transaction and + * to keep track of it so that transport level residuals may be correctly + * set. * - * This allows several interations between any outside software and the - * MD layer to move data. It is undefined what may occur if the data - * segments are transferred out of order. + * The cd_cdb contains storage for the passed in SCSI command. * - * The reason that total length and total residual have to be tracked - * is to make sure that residual is calculated correctly. + * The cd_tagtype field specifies what kind of command tag type, if + * any, has been sent with this command. * - * The tags cd_sense and cd_scsi_status are pretty obvious and only are - * valid if CDFS_SNSVALID and CDFS_STSVALID are set. + * The tag cd_flags has some junk flags set but mostly has flags reserved for outer layer use. * - * The tag cd_error is to communicate between the MD layer and outer software - * the current error conditions. + * The tags cd_sense and cd_scsi_status are self-explanatory. * - * The tag cd_lreserved, cd_hreserved are scratch areas for use for the MD - * and outer layers respectively. + * The cd_xact tag is the first or only transaction structure related to this command. + * + * The tag cd_lreserved, cd_hreserved are scratch areas for use for the outer and inner layers respectively. * */ @@ -312,29 +354,6 @@ typedef struct { #define QCDS (sizeof (uint64_t)) #endif -typedef struct tmd_cmd tmd_cmd_t; -typedef struct tmd_xfr { - tmd_cmd_t * td_cmd; /* cross-referenced tmd_cmd_t */ - void * td_data; /* data descriptor */ - void * td_lprivate; /* private for lower layer */ - uint32_t td_xfrlen; /* size of this data load */ - uint32_t td_offset; /* offset for this data load */ - int td_error; /* error with this transfer or zero */ - uint8_t td_hflags; /* flags set by caller */ - uint8_t td_lflags; /* flags set by callee */ -} tmd_xfr_t; - -#define TDFL_SENTSTATUS 0x01 /* this action sent status */ -#define TDFL_SENTSENSE 0x02 /* this action sent sense data */ - -#define TDFH_STSVALID 0x01 /* status valid - include it */ -#define TDFH_SNSVALID 0x02 /* sense data (from outer layer) good - include it */ -#define TDFH_DATA_IN 0x04 /* target (us) -> initiator (them) */ -#define TDFH_DATA_OUT 0x08 /* initiator (them) -> target (us) */ -#define TDFH_DATA_MASK 0x0C /* mask to cover data direction */ -#define TDFH_BUSY 0x40 /* busy */ -#define TDFH_PRIMARY 0x80 /* within tmd */ - struct tmd_cmd { void * cd_hba; /* HBA tag */ uint64_t cd_iid; /* initiator ID */ @@ -345,12 +364,12 @@ struct tmd_cmd { uint32_t cd_moved; /* total data moved so far */ uint16_t cd_channel; /* channel index */ uint16_t cd_flags; /* flags */ - uint16_t cd_req_cnt; /* how many tmd_xfr_t's are active */ + uint16_t cd_req_cnt; /* how many tmd_xact_t's are active */ uint8_t cd_cdb[TMD_CDBLEN]; uint8_t cd_tagtype; /* tag type */ uint8_t cd_scsi_status; uint8_t cd_sense[TMD_SENSELEN]; - tmd_xfr_t cd_xfr; /* first or only transfer structure */ + tmd_xact_t cd_xact; /* first or only transaction */ union { void * ptrs[QCDS / sizeof (void *)]; /* (assume) one pointer */ uint64_t llongs[QCDS / sizeof (uint64_t)]; /* one long long */ @@ -396,54 +415,14 @@ struct tmd_cmd { memset(&(lptr)[2], 0, 6) /* - * Note that knowing the data direction and lengh at the time of receipt of - * a command from the initiator is a feature only of Fibre Channel. + * Inner Layer Handler Function. * - * The CDFL_BIDIR is in anticipation of the adoption of some newer - * features required by OSD. - * - * The principle selector for MD layer to know whether data is to - * be transferred in any QOUT_TMD_CONT call is cd_xfrlen- the - * flags CDFH_DATA_IN and CDFH_DATA_OUT define which direction. - */ - -/* - * A word about the START/CONT/DONE/FIN dance: - * - * When the HBA is enabled for receiving commands, one may show up - * without notice. When that happens, the MD target mode driver - * gets a tmd_cmd_t, fills it with the info that just arrived, and - * calls the outer layer with a QOUT_TMD_START code and pointer to - * the tmd_cmd_t. - * - * The outer layer decodes the command, fetches data, prepares stuff, - * whatever, and starts by passing back the pointer with a QIN_TMD_CONT - * code which causes the MD target mode driver to generate CTIOs to - * satisfy whatever action needs to be taken. When those CTIOs complete, - * the MD target driver sends the pointer to the cmd_tmd_t back with - * a QOUT_TMD_DONE code. This repeats for as long as necessary. These - * may not be done in parallel- they are sequential operations. - * - * The outer layer signals it wants to end the command by settings within - * the tmd_cmd_t itself. When the final QIN_TMD_CONT is reported completed, - * the outer layer frees the tmd_cmd_t by sending the pointer to it - * back with a QIN_TMD_FIN code. - * - * The graph looks like: - * - * QOUT_TMD_START -> [ QIN_TMD_CONT -> QOUT_TMD_DONE ] * -> QIN_TMD_FIN. - * - */ - -/* - * Target handler functions. - * - * The MD target handler function (the outer layer calls this) - * should be be prototyped like: + * The inner layer target handler function (the outer layer calls this) + * should be be prototyped like so: * * void target_action(qact_e, void *arg) * - * The outer layer target handler function (the MD layer calls this) + * The outer layer target handler function (the inner layer calls this) * should be be prototyped like: * * void scsi_target_handler(tact_e, void *arg) diff --git a/qla_isp/linux/isp_linux.c b/qla_isp/linux/isp_linux.c index f05824723..441d3d6d4 100644 --- a/qla_isp/linux/isp_linux.c +++ b/qla_isp/linux/isp_linux.c @@ -1,4 +1,4 @@ -/* $Id: isp_linux.c,v 1.206 2007/10/30 01:55:32 mjacob Exp $ */ +/* $Id: isp_linux.c,v 1.207 2007/11/13 01:25:50 mjacob Exp $ */ /* * Copyright (c) 1997-2007 by Matthew Jacob * All rights reserved. @@ -110,23 +110,23 @@ static char *isp_wwnns; ins->notify.nt_lreserved = hba->isp_osinfo.pending_n; \ hba->isp_osinfo.pending_n = ins -#define CALL_PARENT_XFR(hba, xfr) \ - xfr->td_lprivate = hba->isp_osinfo.pending_x; \ - hba->isp_osinfo.pending_x = xfr +#define CALL_PARENT_XFR(hba, xact) \ + xact->td_lprivate = hba->isp_osinfo.pending_x; \ + hba->isp_osinfo.pending_x = xact extern void ISP_PARENT_TARGET (qact_e, void *); static __inline tmd_cmd_t *isp_find_tmd(ispsoftc_t *, uint64_t); static __inline int isp_find_iid_wwn(ispsoftc_t *, uint32_t, uint64_t *); static __inline void isp_clear_iid_wwn(ispsoftc_t *, uint32_t, uint64_t); static void isp_taction(qact_e, void *); -static void isp_target_start_ctio(ispsoftc_t *, tmd_xfr_t *); +static void isp_target_start_ctio(ispsoftc_t *, tmd_xact_t *); static void isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); static int isp_terminate_cmd(ispsoftc_t *, tmd_cmd_t *); static void isp_handle_platform_ctio(ispsoftc_t *, void *); static int isp_target_putback_atio(ispsoftc_t *, tmd_cmd_t *); -static void isp_complete_ctio(ispsoftc_t *, tmd_xfr_t *); +static void isp_complete_ctio(ispsoftc_t *, tmd_xact_t *); static void isp_tgt_tq(ispsoftc_t *); #endif @@ -939,7 +939,7 @@ isp_tgt_tq(ispsoftc_t *isp) { isp_notify_t *ins; tmd_cmd_t *tmd; - tmd_xfr_t *xfr; + tmd_xact_t *xact; unsigned long flags; ISP_LOCK_SOFTC(isp); @@ -951,8 +951,8 @@ isp_tgt_tq(ispsoftc_t *isp) if (tmd) { isp->isp_osinfo.pending_t = NULL; } - xfr = isp->isp_osinfo.pending_x; - if (xfr) { + xact = isp->isp_osinfo.pending_x; + if (xact) { isp->isp_osinfo.pending_x = NULL; } ISP_UNLK_SOFTC(isp); @@ -970,11 +970,11 @@ isp_tgt_tq(ispsoftc_t *isp) ISP_PARENT_TARGET(tmd->cd_action, tmd); tmd = next; } - while (xfr != NULL) { - tmd_xfr_t *next = xfr->td_lprivate; - xfr->td_lprivate = NULL; - ISP_PARENT_TARGET(QOUT_TMD_DONE, xfr); - xfr = next; + while (xact != NULL) { + tmd_xact_t *next = xact->td_lprivate; + xact->td_lprivate = NULL; + ISP_PARENT_TARGET(QOUT_TMD_DONE, xact); + xact = next; } } @@ -1192,8 +1192,8 @@ isp_taction(qact_e action, void *arg) case QIN_TMD_CONT: { - tmd_xfr_t *xfr = arg; - tmd = xfr->td_cmd; + tmd_xact_t *xact = arg; + tmd = xact->td_cmd; isp = tmd->cd_hba; isp_target_start_ctio(isp, arg); break; @@ -1378,7 +1378,7 @@ remlun(ispsoftc_t *isp, uint16_t lun, uint16_t bus) } static void -isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) +isp_target_start_ctio(ispsoftc_t *isp, tmd_xact_t *xact) { void *qe; uint32_t handle; @@ -1386,9 +1386,9 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) uint8_t local[QENTRY_LEN]; unsigned long flags; int32_t resid; - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_cmd_t *tmd = xact->td_cmd; - xfr->td_error = 0; + xact->td_error = 0; /* * Check for commands that are already dead @@ -1396,7 +1396,7 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) if (tmd->cd_lflags & CDFL_ABORTED) { isp_prt(isp, ISP_LOGINFO, "[%llx] already ABORTED- not sending a CTIO", tmd->cd_tagval); dump_stack(); - xfr->td_error = -ENXIO; + xact->td_error = -ENXIO; goto out; } @@ -1405,24 +1405,24 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) * If we're sending data, we have to have one and only one data * direction set. */ - if (xfr->td_xfrlen == 0) { - if ((xfr->td_hflags & TDFH_STSVALID) == 0) { + if (xact->td_xfrlen == 0) { + if ((xact->td_hflags & TDFH_STSVALID) == 0) { isp_prt(isp, ISP_LOGERR, "CTIO, no data, and no status is wrong"); dump_stack(); - xfr->td_error = -EINVAL; + xact->td_error = -EINVAL; goto out; } } else { - if ((xfr->td_hflags & TDFH_DATA_MASK) == 0) { + if ((xact->td_hflags & TDFH_DATA_MASK) == 0) { isp_prt(isp, ISP_LOGERR, "data CTIO with no direction is wrong"); dump_stack(); - xfr->td_error = -EINVAL; + xact->td_error = -EINVAL; goto out; } - if ((xfr->td_hflags & TDFH_DATA_MASK) == TDFH_DATA_MASK) { + if ((xact->td_hflags & TDFH_DATA_MASK) == TDFH_DATA_MASK) { isp_prt(isp, ISP_LOGERR, "data CTIO with both directions is wrong (for now)"); dump_stack(); - xfr->td_error = -EINVAL; + xact->td_error = -EINVAL; goto out; } } @@ -1438,7 +1438,7 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) * Pre-increment cd_moved so we know how many bytes are actually in transit. If we actually fail to move * the bytes, we'll subtract things out when we collect status. */ - tmd->cd_moved += xfr->td_xfrlen; + tmd->cd_moved += xact->td_xfrlen; /* * Set the residual to be equal to the total length less the amount previously moved plus this transfer size @@ -1480,21 +1480,21 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) } cto->ct_flags = tattr << CT7_TASK_ATTR_SHIFT; - if (xfr->td_xfrlen == 0) { + if (xact->td_xfrlen == 0) { cto->ct_flags |= CT7_FLAG_MODE1 | CT7_NO_DATA | CT7_SENDSTATUS; - if ((xfr->td_hflags & TDFH_SNSVALID) != 0) { + if ((xact->td_hflags & TDFH_SNSVALID) != 0) { cto->ct_senselen = min(TMD_SENSELEN, MAXRESPLEN); MEMCPY(cto->rsp.m1.ct_resp, tmd->cd_sense, cto->ct_senselen); cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); } } else { cto->ct_flags |= CT7_FLAG_MODE0; - if (xfr->td_hflags & TDFH_DATA_IN) { + if (xact->td_hflags & TDFH_DATA_IN) { cto->ct_flags |= CT7_DATA_IN; } else { cto->ct_flags |= CT7_DATA_OUT; } - if (xfr->td_hflags & TDFH_STSVALID) { + if (xact->td_hflags & TDFH_STSVALID) { cto->ct_flags |= CT7_SENDSTATUS; } } @@ -1508,7 +1508,7 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) } else { cto->ct_resid = 0; } - isp_prt(isp, ISP_LOGTDEBUG0, "CTIO7[%llx] scsi sts %x flags %x resid %d offset %u", tmd->cd_tagval, tmd->cd_scsi_status, cto->ct_flags, resid, xfr->td_offset); + isp_prt(isp, ISP_LOGTDEBUG0, "CTIO7[%llx] scsi sts %x flags %x resid %d offset %u", tmd->cd_tagval, tmd->cd_scsi_status, cto->ct_flags, resid, xact->td_offset); } else if (IS_FC(isp)) { ct2_entry_t *cto = (ct2_entry_t *) local; uint16_t *ssptr = NULL; @@ -1528,7 +1528,7 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) cto->ct_rxid = AT2_GET_TAG(tmd->cd_tagval); if (cto->ct_rxid == 0) { isp_prt(isp, ISP_LOGERR, "a tagval of zero is not acceptable"); - xfr->td_error = -EINVAL; + xact->td_error = -EINVAL; ISP_UNLK_SOFTC(isp); goto out; } @@ -1539,23 +1539,23 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) cto->ct_flags = CT2_FASTPOST; #endif - if (xfr->td_xfrlen == 0) { + if (xact->td_xfrlen == 0) { cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA | CT2_SENDSTATUS; ssptr = &cto->rsp.m1.ct_scsi_status; *ssptr = tmd->cd_scsi_status; - if ((xfr->td_hflags & TDFH_SNSVALID) != 0) { + if ((xact->td_hflags & TDFH_SNSVALID) != 0) { cto->rsp.m1.ct_senselen = min(TMD_SENSELEN, MAXRESPLEN); MEMCPY(cto->rsp.m1.ct_resp, tmd->cd_sense, cto->rsp.m1.ct_senselen); cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; } } else { cto->ct_flags |= CT2_FLAG_MODE0; - if (xfr->td_hflags & TDFH_DATA_IN) { + if (xact->td_hflags & TDFH_DATA_IN) { cto->ct_flags |= CT2_DATA_IN; } else { cto->ct_flags |= CT2_DATA_OUT; } - if (xfr->td_hflags & TDFH_STSVALID) { + if (xact->td_hflags & TDFH_STSVALID) { ssptr = &cto->rsp.m0.ct_scsi_status; cto->ct_flags |= CT2_SENDSTATUS; cto->rsp.m0.ct_scsi_status = tmd->cd_scsi_status; @@ -1597,15 +1597,15 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) if (tmd->cd_flags & CDF_NODISC) { cto->ct_flags |= CT_NODISC; } - if (xfr->td_xfrlen == 0) { + if (xact->td_xfrlen == 0) { cto->ct_flags |= CT_NO_DATA | CT_SENDSTATUS; cto->ct_scsi_status = tmd->cd_scsi_status; cto->ct_resid = 0; } else { - if (xfr->td_hflags & TDFH_STSVALID) { + if (xact->td_hflags & TDFH_STSVALID) { cto->ct_flags |= CT_SENDSTATUS; } - if (xfr->td_hflags & TDFH_DATA_IN) { + if (xact->td_hflags & TDFH_DATA_IN) { cto->ct_flags |= CT_DATA_IN; } else { cto->ct_flags |= CT_DATA_OUT; @@ -1614,11 +1614,11 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) * We assume we'll transfer what we say we'll transfer. * Otherwise, the command is dead. */ - if (xfr->td_hflags & TDFH_STSVALID) { + if (xact->td_hflags & TDFH_STSVALID) { cto->ct_resid = resid; } } - isp_prt(isp, ISP_LOGTDEBUG0, "CTIO[%llx] scsi sts %x resid %d cd_lflags %x", tmd->cd_tagval, tmd->cd_scsi_status, resid, xfr->td_hflags); + isp_prt(isp, ISP_LOGTDEBUG0, "CTIO[%llx] scsi sts %x resid %d cd_lflags %x", tmd->cd_tagval, tmd->cd_scsi_status, resid, xact->td_hflags); if (cto->ct_flags & CT_SENDSTATUS) { cto->ct_flags |= CT_CCINCR; } @@ -1626,14 +1626,14 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) if (isp_getrqentry(isp, &nxti, &optr, &qe)) { isp_prt(isp, ISP_LOGWARN, "%s: request queue overflow", __FUNCTION__); - xfr->td_error = -ENOMEM; + xact->td_error = -ENOMEM; ISP_UNLK_SOFTC(isp); goto out; } - if (isp_save_xs_tgt(isp, xfr, &handle)) { + if (isp_save_xs_tgt(isp, xact, &handle)) { isp_prt(isp, ISP_LOGERR, "isp_target_start_ctio: No XFLIST pointers"); - xfr->td_error = -ENOMEM; + xact->td_error = -ENOMEM; ISP_UNLK_SOFTC(isp); goto out; } @@ -1655,7 +1655,7 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) * format. */ - switch (ISP_DMASETUP(isp, (XS_T *)xfr, (ispreq_t *) local, &nxti, optr)) { + switch (ISP_DMASETUP(isp, (XS_T *)xact, (ispreq_t *) local, &nxti, optr)) { case CMD_QUEUED: ISP_ADD_REQUEST(isp, nxti); /* @@ -1663,14 +1663,14 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) * If the CTIO fails, we still do resource replenish, but handle it at * CTIO completion time. */ - if (xfr->td_hflags & TDFH_STSVALID) { + if (xact->td_hflags & TDFH_STSVALID) { tmd->cd_lflags &= ~CDFL_RESRC_FILL; } ISP_UNLK_SOFTC(isp); return; case CMD_EAGAIN: - xfr->td_error = -ENOMEM; + xact->td_error = -ENOMEM; isp_destroy_tgt_handle(isp, handle); break; @@ -1679,14 +1679,14 @@ isp_target_start_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) break; default: - xfr->td_error = -EFAULT; /* probably dma mapping failure */ + xact->td_error = -EFAULT; /* probably dma mapping failure */ isp_destroy_tgt_handle(isp, handle); break; } ISP_UNLK_SOFTC(isp); out: if ((tmd->cd_lflags & CDFL_LCL) == 0) { - CALL_PARENT_XFR(isp, xfr); + CALL_PARENT_XFR(isp, xact); } } @@ -1807,7 +1807,7 @@ isp_lcl_respond(ispsoftc_t *isp, void *aep, tmd_cmd_t *tmd) if (cdbp[0] == INQUIRY && L0LUN_TO_FLATLUN(tmd->cd_lun) == 0) { if (cdbp[1] == 0 && cdbp[2] == 0 && cdbp[3] == 0 && cdbp[5] == 0) { - tmd_xfr_t *xfr; + tmd_xact_t *xact; struct scatterlist *dp; int amt, i; @@ -1821,7 +1821,7 @@ isp_lcl_respond(ispsoftc_t *isp, void *aep, tmd_cmd_t *tmd) return; } LUN_BSET(isp->isp_osinfo.auxbmap, i); - xfr = &isp->isp_osinfo.auxinfo[i].xfr; + xact = &isp->isp_osinfo.auxinfo[i].xact; dp = &isp->isp_osinfo.auxinfo[i].sg; MEMZERO(dp, sizeof (*dp)); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) @@ -1832,24 +1832,24 @@ isp_lcl_respond(ispsoftc_t *isp, void *aep, tmd_cmd_t *tmd) #endif dp->length = DEFAULT_INQSIZE; - xfr->td_data = dp; - xfr->td_xfrlen = min(DEFAULT_INQSIZE, tmd->cd_totlen); + xact->td_data = dp; + xact->td_xfrlen = min(DEFAULT_INQSIZE, tmd->cd_totlen); if ((amt = cdbp[4]) == 0) { amt = 256; } - if (xfr->td_xfrlen > amt) { - xfr->td_xfrlen = amt; + if (xact->td_xfrlen > amt) { + xact->td_xfrlen = amt; } - xfr->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; - xfr->td_cmd = tmd; - xfr->td_offset = 0; - xfr->td_error = 0; - xfr->td_lflags = 0; + xact->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; + xact->td_cmd = tmd; + xact->td_offset = 0; + xact->td_error = 0; + xact->td_lflags = 0; tmd->cd_scsi_status = 0; tmd->cd_lflags |= CDFL_LCL; ISP_DROP_LK_SOFTC(isp); - isp_target_start_ctio(isp, xfr); + isp_target_start_ctio(isp, xact); ISP_IGET_LK_SOFTC(isp); return; } @@ -2206,7 +2206,7 @@ isp_terminate_cmd(ispsoftc_t *isp, tmd_cmd_t *tmd) static void isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) { - tmd_xfr_t *xfr; + tmd_xact_t *xact; tmd_cmd_t *tmd; char *ctstr; int sentstatus = 0, ok, resid = 0, id; @@ -2217,12 +2217,12 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) */ if (IS_24XX(isp)) { ct7_entry_t *ct = arg; - xfr = (tmd_xfr_t *) isp_find_xs_tgt(isp, ct->ct_syshandle); - if (xfr == NULL) { - isp_prt(isp, ISP_LOGERR, "isp_handle_platform_ctio: null xfr"); + xact = (tmd_xact_t *) isp_find_xs_tgt(isp, ct->ct_syshandle); + if (xact == NULL) { + isp_prt(isp, ISP_LOGERR, "isp_handle_platform_ctio: null xact"); return; } - tmd = xfr->td_cmd; + tmd = xact->td_cmd; isp_destroy_tgt_handle(isp, ct->ct_syshandle); status = ct->ct_nphdl; flags = ct->ct_flags; @@ -2235,12 +2235,12 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) id = ct->ct_iid_lo | (ct->ct_iid_hi << 16); } else if (IS_FC(isp)) { ct2_entry_t *ct = arg; - xfr = (tmd_xfr_t *) isp_find_xs_tgt(isp, ct->ct_syshandle); - if (xfr == NULL) { - isp_prt(isp, ISP_LOGERR, "isp_handle_platform_ctio: null xfr"); + xact = (tmd_xact_t *) isp_find_xs_tgt(isp, ct->ct_syshandle); + if (xact == NULL) { + isp_prt(isp, ISP_LOGERR, "isp_handle_platform_ctio: null xact"); return; } - tmd = xfr->td_cmd; + tmd = xact->td_cmd; isp_destroy_tgt_handle(isp, ct->ct_syshandle); status = ct->ct_status; flags = ct->ct_flags; @@ -2253,12 +2253,12 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) id = ct->ct_iid; } else { ct_entry_t *ct = arg; - xfr = (tmd_xfr_t *) isp_find_xs_tgt(isp, ct->ct_syshandle); - if (xfr == NULL) { - isp_prt(isp, ISP_LOGERR, "isp_handle_platform_ctio: null xfr"); + xact = (tmd_xact_t *) isp_find_xs_tgt(isp, ct->ct_syshandle); + if (xact == NULL) { + isp_prt(isp, ISP_LOGERR, "isp_handle_platform_ctio: null xact"); return; } - tmd = xfr->td_cmd; + tmd = xact->td_cmd; isp_destroy_tgt_handle(isp, ct->ct_syshandle); status = ct->ct_status; flags = ct->ct_flags; @@ -2269,7 +2269,7 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) char *sp = (char *)ct; sp += CTIO_SENSE_OFFSET; MEMCPY(tmd->cd_sense, sp, QLTM_SENSELEN); - xfr->td_lflags |= CDF_SNSVALID; + xact->td_lflags |= CDF_SNSVALID; } if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { resid = ct->ct_resid; @@ -2277,10 +2277,10 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) id = ct->ct_iid; } if (sentstatus) { - xfr->td_lflags |= TDFL_SENTSTATUS; + xact->td_lflags |= TDFL_SENTSTATUS; } - if (ok && sentstatus && (xfr->td_hflags & TDFH_SNSVALID)) { - xfr->td_lflags |= TDFL_SENTSENSE; + if (ok && sentstatus && (xact->td_hflags & TDFH_SNSVALID)) { + xact->td_lflags |= TDFL_SENTSENSE; } tmd->cd_moved -= resid; @@ -2315,7 +2315,7 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) } else { isp_prt(isp, ISP_LOGINFO, "[%llx] CTI%s ended with badstate (0x%x)", tmd->cd_tagval, cx, status); } - xfr->td_error = -EIO; + xact->td_error = -EIO; if (isp_target_putback_atio(isp, tmd)) { tmd->cd_lflags |= CDFL_RESRC_FILL; } @@ -2339,7 +2339,7 @@ isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) } } } - isp_complete_ctio(isp, xfr); + isp_complete_ctio(isp, xact); } static int @@ -2395,13 +2395,13 @@ isp_target_putback_atio(ispsoftc_t *isp, tmd_cmd_t *tmd) } static void -isp_complete_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) +isp_complete_ctio(ispsoftc_t *isp, tmd_xact_t *xact) { - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_cmd_t *tmd = xact->td_cmd; isp->isp_osinfo.cmds_completed++; if (isp->isp_osinfo.hcb || (tmd->cd_lflags & CDFL_LCL)) { if (isp->isp_osinfo.hcb == 0) { - isp_prt(isp, ISP_LOGWARN, "nobody to tell about CTIO complete, leaking xfr structure"); + isp_prt(isp, ISP_LOGWARN, "nobody to tell about CTIO complete, leaking xact structure"); MEMZERO(tmd, TMD_SIZE); if (isp->isp_osinfo.tfreelist) { isp->isp_osinfo.bfreelist->cd_next = tmd; @@ -2410,7 +2410,7 @@ isp_complete_ctio(ispsoftc_t *isp, tmd_xfr_t *xfr) } isp->isp_osinfo.bfreelist = tmd; } else { - CALL_PARENT_XFR(isp, xfr); + CALL_PARENT_XFR(isp, xact); } } } diff --git a/qla_isp/linux/isp_linux.h b/qla_isp/linux/isp_linux.h index c1ca39d55..6dcbb9771 100644 --- a/qla_isp/linux/isp_linux.h +++ b/qla_isp/linux/isp_linux.h @@ -1,4 +1,4 @@ -/* $Id: isp_linux.h,v 1.139 2007/10/27 18:16:29 mjacob Exp $ */ +/* $Id: isp_linux.h,v 1.142 2007/11/13 20:20:04 mjacob Exp $ */ /* * Copyright (c) 1997-2007 by Matthew Jacob * All rights reserved. @@ -261,7 +261,7 @@ struct enalun { typedef struct { struct scatterlist sg; - tmd_xfr_t xfr; + tmd_xact_t xact; } tgt_auxcmd_t; #define N_TGT_AUX 32 @@ -346,10 +346,10 @@ struct isposinfo { isp_notify_t * pending_n; /* pending list of notifies going upstream */ isp_notify_t * nfreelist; /* freelist */ isp_notify_t * npool; /* pool itself */ - struct tmd_xfr * pending_x; /* pending list of xfrs going upstream */ + struct tmd_xact * pending_x; /* pending list of xacts going upstream */ /* * When we have inquiry commands that we have to xfer data with - * locally we have to have some aux info (scatterlist, tmd_xfr_t) + * locally we have to have some aux info (scatterlist, tmd_xact_t) * to manage those commands. */ tgt_auxcmd_t auxinfo[N_TGT_AUX]; diff --git a/qla_isp/linux/isp_pci.c b/qla_isp/linux/isp_pci.c index d89fdb625..9b6aa558c 100644 --- a/qla_isp/linux/isp_pci.c +++ b/qla_isp/linux/isp_pci.c @@ -1,4 +1,4 @@ -/* $Id: isp_pci.c,v 1.139 2007/10/30 01:55:10 mjacob Exp $ */ +/* $Id: isp_pci.c,v 1.141 2007/11/13 20:20:04 mjacob Exp $ */ /* * Copyright (c) 1997-2007 by Matthew Jacob * All rights reserved. @@ -416,14 +416,13 @@ static __inline int map_isp_io(struct isp_pcisoftc *isp_pci, u_short cmd, vm_offset_t io_base) { if ((cmd & PCI_COMMAND_IO) && (io_base & 3) == 1) { - isp_pci->port = io_base & PCI_BASE_ADDRESS_IO_MASK; + isp_pci->port = io_base & PCI_BASE_ADDRESS_IO_MASK; request_region(isp_pci->port, 0xff, ISP_NAME); return (1); } return (0); } - void isplinux_pci_release(struct Scsi_Host *host) { @@ -1388,8 +1387,8 @@ bad: } #ifdef ISP_TARGET_MODE -static int tdma_mk(ispsoftc_t *, tmd_xfr_t *, ct_entry_t *, uint32_t *, uint32_t); -static int tdma_mkfc(ispsoftc_t *, tmd_xfr_t *, ct2_entry_t *, uint32_t *, uint32_t); +static int tdma_mk(ispsoftc_t *, tmd_xact_t *, ct_entry_t *, uint32_t *, uint32_t); +static int tdma_mkfc(ispsoftc_t *, tmd_xact_t *, ct2_entry_t *, uint32_t *, uint32_t); #define ALLOW_SYNTHETIC_CTIO 1 #ifndef ALLOW_SYNTHETIC_CTIO @@ -1415,7 +1414,7 @@ static int tdma_mkfc(ispsoftc_t *, tmd_xfr_t *, ct2_entry_t *, uint32_t *, uint3 */ static int -tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint32_t optr) +tdma_mk(ispsoftc_t *isp, tmd_xact_t *xact, ct_entry_t *cto, uint32_t *nxtip, uint32_t optr) { static const char ctx[] = "CTIO[%x] lun %d for iid%d flgs 0x%x sts 0x%x ssts 0x%x res %u %s"; struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; @@ -1428,7 +1427,7 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 tmd_cmd_t *tmd; int nth_ctio, nctios, send_status, nseg, new_seg_cnt; - tmd = xfr->td_cmd; + tmd = xact->td_cmd; curi = isp->isp_reqidx; qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); @@ -1437,7 +1436,7 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 cto->ct_header.rqs_entry_count = 1; MEMZERO(cto->ct_dataseg, sizeof (cto->ct_dataseg)); - if (xfr->td_xfrlen == 0) { + if (xact->td_xfrlen == 0) { ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); isp_prt(isp, ISP_LOGTDEBUG1, ctx, cto->ct_fwhandle, L0LUN_TO_FLATLUN(tmd->cd_lun), (int) cto->ct_iid, cto->ct_flags, cto->ct_status, cto->ct_scsi_status, cto->ct_resid, ""); @@ -1445,28 +1444,28 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 return (CMD_QUEUED); } - if (xfr->td_xfrlen <= 1024) { + if (xact->td_xfrlen <= 1024) { nseg = 0; - } else if (xfr->td_xfrlen <= 4096) { + } else if (xact->td_xfrlen <= 4096) { nseg = 1; - } else if (xfr->td_xfrlen <= 32768) { + } else if (xact->td_xfrlen <= 32768) { nseg = 2; - } else if (xfr->td_xfrlen <= 65536) { + } else if (xact->td_xfrlen <= 65536) { nseg = 3; - } else if (xfr->td_xfrlen <= 131372) { + } else if (xact->td_xfrlen <= 131372) { nseg = 4; - } else if (xfr->td_xfrlen <= 262144) { + } else if (xact->td_xfrlen <= 262144) { nseg = 5; - } else if (xfr->td_xfrlen <= 524288) { + } else if (xact->td_xfrlen <= 524288) { nseg = 6; } else { nseg = 7; } isp->isp_osinfo.bins[nseg]++; - sg = xfr->td_data; + sg = xact->td_data; nseg = 0; - resid = (int32_t) xfr->td_xfrlen; + resid = (int32_t) xact->td_xfrlen; while (resid > 0) { if (sg->length == 0) { isp_prt(isp, ISP_LOGWARN, "%s: zero length segment #%d for tag %llx\n", __FUNCTION__, nseg, tmd->cd_tagval); @@ -1477,7 +1476,7 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 resid -= sg->length; sg++; } - sg = xfr->td_data; + sg = xact->td_data; new_seg_cnt = pci_map_sg(pcs->pci_dev, sg, nseg, (cto->ct_flags & CT_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); @@ -1553,7 +1552,7 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 if (ISP_A64 && IS_HIGH_ISP_ADDR(addr)) { isp_prt(isp, ISP_LOGERR, "%s: 64 bit tgt mode not supported", __FUNCTION__); cto->ct_resid = -EFAULT; - pci_unmap_sg(pcs->pci_dev, xfr->td_data, nseg, (cto->ct_flags & CT_DATA_IN)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE); + pci_unmap_sg(pcs->pci_dev, xact->td_data, nseg, (cto->ct_flags & CT_DATA_IN)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE); return (CMD_COMPLETE); } /* @@ -1662,7 +1661,7 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 } } *nxtip = nxti; - isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: map %d segments at %p for handle 0x%x", tmd->cd_tagval, new_seg_cnt, xfr->td_data, cto->ct_syshandle); + isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: map %d segments at %p for handle 0x%x", tmd->cd_tagval, new_seg_cnt, xact->td_data, cto->ct_syshandle); return (CMD_QUEUED); } @@ -1693,14 +1692,14 @@ tdma_mk(ispsoftc_t *isp, tmd_xfr_t *xfr, ct_entry_t *cto, uint32_t *nxtip, uint3 * has been left unchanged. */ #ifndef ISP_DISABLE_2400_SUPPORT -static int tdma_mk_2400(ispsoftc_t *, tmd_xfr_t *, ct7_entry_t *, uint32_t *, uint32_t); +static int tdma_mk_2400(ispsoftc_t *, tmd_xact_t *, ct7_entry_t *, uint32_t *, uint32_t); static int -tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, uint32_t optr) +tdma_mk_2400(ispsoftc_t *isp, tmd_xact_t *xact, ct7_entry_t *cto, uint32_t *nxtip, uint32_t optr) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; static const char ctx[] = "CTIO7[%llx] cdb0 0x%02x lun %u nphdl 0x%x flgs 0x%x ssts 0x%x xfr %u moved %u/%u resid %d %s"; XS_DMA_ADDR_T addr, last_synthetic_addr; - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_cmd_t *tmd = xact->td_cmd; struct scatterlist *sg; void *qe; uint16_t swd; @@ -1717,17 +1716,17 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, qe = ISP_QUEUE_ENTRY(isp->isp_rquest, curi); if (cto->ct_resid || cto->ct_scsi_status) { - level = ISP_LOGTINFO; + level = ISP_LOGTDEBUG0; } else { level = ISP_LOGTDEBUG1; } isp_prt(isp, level, ctx, (unsigned long long) tmd->cd_tagval, tmd->cd_cdb[0], L0LUN_TO_FLATLUN(tmd->cd_lun), cto->ct_nphdl, cto->ct_flags, - cto->ct_scsi_status, xfr->td_xfrlen, tmd->cd_moved, tmd->cd_totlen, cto->ct_resid, ""); + cto->ct_scsi_status, xact->td_xfrlen, tmd->cd_moved, tmd->cd_totlen, cto->ct_resid, ""); /* * Handle commands that transfer no data right away. */ - if (xfr->td_xfrlen == 0) { + if (xact->td_xfrlen == 0) { cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno = 1; /* ct_syshandle contains the synchronization handle set by caller */ @@ -1735,19 +1734,19 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, ISP_TDQE(isp, "tdma_mk_2400[no data]", curi, qe); return (CMD_QUEUED); } - if (xfr->td_xfrlen <= 1024) { + if (xact->td_xfrlen <= 1024) { nseg = 0; - } else if (xfr->td_xfrlen <= 4096) { + } else if (xact->td_xfrlen <= 4096) { nseg = 1; - } else if (xfr->td_xfrlen <= 32768) { + } else if (xact->td_xfrlen <= 32768) { nseg = 2; - } else if (xfr->td_xfrlen <= 65536) { + } else if (xact->td_xfrlen <= 65536) { nseg = 3; - } else if (xfr->td_xfrlen <= 131372) { + } else if (xact->td_xfrlen <= 131372) { nseg = 4; - } else if (xfr->td_xfrlen <= 262144) { + } else if (xact->td_xfrlen <= 262144) { nseg = 5; - } else if (xfr->td_xfrlen <= 524288) { + } else if (xact->td_xfrlen <= 524288) { nseg = 6; } else { nseg = 7; @@ -1760,9 +1759,9 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, * we can have descriptors that are, in fact, * longer than our data transfer count. */ - sg = xfr->td_data; + sg = xact->td_data; nseg = 0; - xfcnt = xfr->td_xfrlen; + xfcnt = xact->td_xfrlen; while (xfcnt > 0) { if (sg->length == 0) { isp_prt(isp, ISP_LOGWARN, "%s: zero length segment #%d for tag %llx\n", __FUNCTION__, nseg, tmd->cd_tagval); @@ -1773,7 +1772,7 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, xfcnt -= sg->length; sg++; } - sg = xfr->td_data; + sg = xact->td_data; new_seg_cnt = pci_map_sg(pcs->pci_dev, sg, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (new_seg_cnt == 0) { isp_prt(isp, ISP_LOGWARN, "%s: unable to dma map request", __FUNCTION__); @@ -1785,11 +1784,11 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, /* * Check for sequential ordering of data frames */ - if (tmd->cd_lastoff + tmd->cd_lastsize != xfr->td_offset) { - isp_prt(isp, ISP_LOGWARN, "%s: [0x%llx] lastoff %u lastsize %u but curoff %u (totlen %u)", __FUNCTION__, (unsigned long long) tmd->cd_tagval, tmd->cd_lastoff, tmd->cd_lastsize, xfr->td_offset, tmd->cd_totlen); + if (tmd->cd_lastoff + tmd->cd_lastsize != xact->td_offset) { + isp_prt(isp, ISP_LOGWARN, "%s: [0x%llx] lastoff %u lastsize %u but curoff %u (totlen %u)", __FUNCTION__, (unsigned long long) tmd->cd_tagval, tmd->cd_lastoff, tmd->cd_lastsize, xact->td_offset, tmd->cd_totlen); } - tmd->cd_lastsize = xfr->td_xfrlen; - tmd->cd_lastoff = xfr->td_offset; + tmd->cd_lastsize = xact->td_xfrlen; + tmd->cd_lastoff = xact->td_offset; /* * Second, figure out whether we'll need to send a separate status CTIO. @@ -1819,7 +1818,7 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, cto2->ct_flags |= CT7_NO_DATA|CT7_NO_DATA|CT7_FLAG_MODE1; cto2->ct_seg_count = 0; MEMZERO(&cto2->rsp, sizeof (cto2->rsp)); - if ((swd & 0xff) == SCSI_CHECK && (xfr->td_hflags & TDFH_SNSVALID)) { + if ((swd & 0xff) == SCSI_CHECK && (xact->td_hflags & TDFH_SNSVALID)) { swd |= CT2_SNSLEN_VALID; cto2->rsp.m1.ct_resplen = min(TMD_SENSELEN, MAXRESPLEN_24XX); MEMCPY(cto2->rsp.m1.ct_resp, tmd->cd_sense, cto2->rsp.m1.ct_resplen); @@ -1841,12 +1840,9 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, * Third, fill in the data segments in the first CTIO2 itself. * This is also a good place to set the relative offset. */ - xfcnt = xfr->td_xfrlen; + xfcnt = xact->td_xfrlen; - /* - * cd_resid was already decremented by cd_xfrlen in isp_target_start_ctio - */ - cto->rsp.m0.reloff = xfr->td_offset; + cto->rsp.m0.reloff = xact->td_offset; seglim = 1; @@ -1900,7 +1896,7 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curip); nxti = ISP_NXT_QENTRY((curip), RQUEST_QUEUE_LEN(isp)); if (nxti == optr) { - pci_unmap_sg(pcs->pci_dev, xfr->td_data, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + pci_unmap_sg(pcs->pci_dev, xact->td_data, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: out of space for continuations (%d of %d segs done)", __FUNCTION__, cto->ct_seg_count, nseg); return (CMD_EAGAIN); } @@ -1964,7 +1960,7 @@ tdma_mk_2400(ispsoftc_t *isp, tmd_xfr_t *xfr, ct7_entry_t *cto, uint32_t *nxtip, } } while (seg < nseg || last_synthetic_count); - isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: map %d segments at %p for handle 0x%x", tmd->cd_tagval, new_seg_cnt, xfr->td_data, cto->ct_syshandle); + isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: map %d segments at %p for handle 0x%x", tmd->cd_tagval, new_seg_cnt, xact->td_data, cto->ct_syshandle); mbxsync: @@ -1978,7 +1974,7 @@ mbxsync: curi = nxti; nxti = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); if (nxti == optr) { - pci_unmap_sg(pcs->pci_dev, xfr->td_data, nseg, (cto->ct_flags & CT7_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + pci_unmap_sg(pcs->pci_dev, xact->td_data, nseg, (cto->ct_flags & CT7_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: request queue overflow", __FUNCTION__); cto->ct_resid = -EAGAIN; return (CMD_COMPLETE); @@ -2000,12 +1996,12 @@ mbxsync: #endif static int -tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, uint32_t optr) +tdma_mkfc(ispsoftc_t *isp, tmd_xact_t *xact, ct2_entry_t *cto, uint32_t *nxtip, uint32_t optr) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; static const char ctx[] = "CTIO2[%x] lun %d for iid %d flgs 0x%x sts 0x%x ssts 0x%x res %d %s"; XS_DMA_ADDR_T addr, last_synthetic_addr; - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_cmd_t *tmd = xact->td_cmd; struct scatterlist *sg; void *qe; uint16_t swd; @@ -2021,7 +2017,7 @@ tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, ui curi = isp->isp_reqidx; qe = ISP_QUEUE_ENTRY(isp->isp_rquest, curi); if (cto->ct_flags & CT2_FASTPOST) { - if ((xfr->td_hflags & (TDFH_STSVALID|TDFH_SNSVALID)) != TDFH_STSVALID) { + if ((xact->td_hflags & (TDFH_STSVALID|TDFH_SNSVALID)) != TDFH_STSVALID) { cto->ct_flags &= ~CT2_FASTPOST; } } @@ -2029,7 +2025,7 @@ tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, ui /* * Handle commands that transfer no data right away. */ - if (xfr->td_xfrlen == 0) { + if (xact->td_xfrlen == 0) { cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno = 1; /* ct_syshandle contains the synchronization handle set by caller */ @@ -2044,19 +2040,19 @@ tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, ui return (CMD_QUEUED); } - if (xfr->td_xfrlen <= 1024) { + if (xact->td_xfrlen <= 1024) { nseg = 0; - } else if (xfr->td_xfrlen <= 4096) { + } else if (xact->td_xfrlen <= 4096) { nseg = 1; - } else if (xfr->td_xfrlen <= 32768) { + } else if (xact->td_xfrlen <= 32768) { nseg = 2; - } else if (xfr->td_xfrlen <= 65536) { + } else if (xact->td_xfrlen <= 65536) { nseg = 3; - } else if (xfr->td_xfrlen <= 131372) { + } else if (xact->td_xfrlen <= 131372) { nseg = 4; - } else if (xfr->td_xfrlen <= 262144) { + } else if (xact->td_xfrlen <= 262144) { nseg = 5; - } else if (xfr->td_xfrlen <= 524288) { + } else if (xact->td_xfrlen <= 524288) { nseg = 6; } else { nseg = 7; @@ -2071,9 +2067,9 @@ tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, ui * we can have descriptors that are, in fact, * longer than our data transfer count. */ - sg = xfr->td_data; + sg = xact->td_data; nseg = 0; - xfcnt = xfr->td_xfrlen; + xfcnt = xact->td_xfrlen; while (xfcnt > 0) { if (sg->length == 0) { isp_prt(isp, ISP_LOGWARN, "%s: zero length segment #%d for tag %llx\n", __FUNCTION__, nseg, tmd->cd_tagval); @@ -2084,7 +2080,7 @@ tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, ui xfcnt -= sg->length; sg++; } - sg = xfr->td_data; + sg = xact->td_data; new_seg_cnt = pci_map_sg(pcs->pci_dev, sg, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (new_seg_cnt == 0) { isp_prt(isp, ISP_LOGWARN, "%s: unable to dma map request", __FUNCTION__); @@ -2144,8 +2140,8 @@ tdma_mkfc(ispsoftc_t *isp, tmd_xfr_t *xfr, ct2_entry_t *cto, uint32_t *nxtip, ui * Third, fill in the data segments in the first CTIO2 itself. * This is also a good place to set the relative offset. */ - xfcnt = xfr->td_xfrlen; - cto->ct_reloff = xfr->td_offset; + xfcnt = xact->td_xfrlen; + cto->ct_reloff = xact->td_offset; /* * This is a good place to return to if we need to redo this with @@ -2181,9 +2177,9 @@ again: if (cto2) { cto2->ct_header.rqs_entry_type = RQSTYPE_CTIO3; } - xfcnt = xfr->td_xfrlen; + xfcnt = xact->td_xfrlen; cto->rsp.m0.ct_xfrlen = 0; - sg = xfr->td_data; + sg = xact->td_data; seglim = ISP_RQDSEG_T3; isp_prt(isp, ISP_LOGTDEBUG2, "%s: found hi page", __FUNCTION__); goto again; @@ -2254,7 +2250,7 @@ again: qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curip); nxti = ISP_NXT_QENTRY((curip), RQUEST_QUEUE_LEN(isp)); if (nxti == optr) { - pci_unmap_sg(pcs->pci_dev, xfr->td_data, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + pci_unmap_sg(pcs->pci_dev, xact->td_data, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: out of space for continuations (%d of %d segs done)", __FUNCTION__, cto->ct_seg_count, nseg); return (CMD_EAGAIN); } @@ -2325,13 +2321,13 @@ again: if (ISP_A64 && IS_HIGH_ISP_ADDR(addr)) { nxti = *nxtip; cto->ct_header.rqs_entry_count = 1; - xfcnt = xfr->td_xfrlen; + xfcnt = xact->td_xfrlen; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; if (cto2) { cto2->ct_header.rqs_entry_type = RQSTYPE_CTIO3; } cto->rsp.m0.ct_xfrlen = 0; - sg = xfr->td_data; + sg = xact->td_data; seglim = ISP_RQDSEG_T3; isp_prt(isp, ISP_LOGTDEBUG1, "%s: found hi page in continuation, restarting", __FUNCTION__); goto again; @@ -2350,7 +2346,7 @@ again: } } while (seg < nseg || last_synthetic_count); - isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: map %d segments at %p for handle 0x%x", tmd->cd_tagval, new_seg_cnt, xfr->td_data, cto->ct_syshandle); + isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: map %d segments at %p for handle 0x%x", tmd->cd_tagval, new_seg_cnt, xact->td_data, cto->ct_syshandle); mbxsync: @@ -2364,7 +2360,7 @@ mbxsync: curi = nxti; nxti = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); if (nxti == optr) { - pci_unmap_sg(pcs->pci_dev, xfr->td_data, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + pci_unmap_sg(pcs->pci_dev, xact->td_data, nseg, (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: request queue overflow", __FUNCTION__); cto->ct_resid = -EAGAIN; return (CMD_COMPLETE); @@ -2400,9 +2396,9 @@ isp_pci_dmasetup(ispsoftc_t *isp, Scsi_Cmnd *Cmnd, ispreq_t *rq, uint32_t *nxi, rq->req_header.rqs_entry_type == RQSTYPE_CTIO3) { int s; if (IS_FC(isp)) { - s = tdma_mkfc(isp, (tmd_xfr_t *)Cmnd, (ct2_entry_t *)rq, nxi, optr); + s = tdma_mkfc(isp, (tmd_xact_t *)Cmnd, (ct2_entry_t *)rq, nxi, optr); } else { - s = tdma_mk(isp, (tmd_xfr_t *)Cmnd, (ct_entry_t *)rq, nxi, optr); + s = tdma_mk(isp, (tmd_xact_t *)Cmnd, (ct_entry_t *)rq, nxi, optr); } return (s); } @@ -2730,7 +2726,7 @@ isp_pci_2400_dmasetup(ispsoftc_t *isp, Scsi_Cmnd *Cmnd, ispreq_t *orig_rq, uint3 #ifdef ISP_TARGET_MODE if (orig_rq->req_header.rqs_entry_type == RQSTYPE_CTIO7) { - return tdma_mk_2400(isp, (tmd_xfr_t *)Cmnd, (ct7_entry_t *)orig_rq, nxi, optr); + return tdma_mk_2400(isp, (tmd_xact_t *)Cmnd, (ct7_entry_t *)orig_rq, nxi, optr); } #endif rq = (ispreqt7_t *) orig_rq; @@ -2922,12 +2918,12 @@ isp_pci_dmateardown(ispsoftc_t *isp, Scsi_Cmnd *Cmnd, uint32_t handle) * safest way to keep the two w/o redoing our internal apis. */ if (IS_TARGET_HANDLE(handle)) { - tmd_xfr_t *xfr = (tmd_xfr_t *) Cmnd; - tmd_cmd_t *tmd = xfr? xfr->td_cmd : NULL; + tmd_xact_t *xact = (tmd_xact_t *) Cmnd; + tmd_cmd_t *tmd = xact? xact->td_cmd : NULL; int nseg = tmd? tmd->cd_nseg : 0; - if (nseg && xfr->td_data) { - isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: pci_unmap %d segments at %p for handle 0x%x", tmd->cd_tagval, nseg, xfr->td_data, handle); - pci_unmap_sg(pcs->pci_dev, xfr->td_data, nseg, (xfr->td_hflags & TDFH_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + if (nseg && xact->td_data) { + isp_prt(isp, ISP_LOGTDEBUG2, "[%llx]: pci_unmap %d segments at %p for handle 0x%x", tmd->cd_tagval, nseg, xact->td_data, handle); + pci_unmap_sg(pcs->pci_dev, xact->td_data, nseg, (xact->td_hflags & TDFH_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); } } else #endif diff --git a/qla_isp/linux/isp_scst.c b/qla_isp/linux/isp_scst.c index 4deae6627..da069131d 100644 --- a/qla_isp/linux/isp_scst.c +++ b/qla_isp/linux/isp_scst.c @@ -441,7 +441,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) bus_t *bp; ini_t *ini; int ret; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; /* * First, find the bus. @@ -516,10 +516,10 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) err: tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_hflags |= TDFH_STSVALID; - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_xfrlen = 0; - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + xact->td_hflags |= TDFH_STSVALID; + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; + (*bp->h.r_action)(QIN_TMD_CONT, xact); return; } @@ -528,10 +528,10 @@ scsi_target_done_cmd(tmd_cmd_t *tmd, int from_intr) { bus_t *bp; struct scst_cmd *scst_cmd; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; SDprintk2("scsi_target: TMD_DONE[%llx] %p hf %x lf %x xfrlen %d totlen %d moved %d\n", - tmd->cd_tagval, tmd, xfr->td_hflags, xfr->td_lflags, xfr->td_xfrlen, tmd->cd_totlen, tmd->cd_moved); + tmd->cd_tagval, tmd, xact->td_hflags, xact->td_lflags, xact->td_xfrlen, tmd->cd_totlen, tmd->cd_moved); bp = tmd->cd_bus; scst_cmd = tmd->cd_scst_cmd; @@ -542,29 +542,29 @@ scsi_target_done_cmd(tmd_cmd_t *tmd, int from_intr) return; } - if (xfr->td_hflags & TDFH_STSVALID) { - if (xfr->td_hflags & TDFH_DATA_IN) { - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_xfrlen = 0; + if (xact->td_hflags & TDFH_STSVALID) { + if (xact->td_hflags & TDFH_DATA_IN) { + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; } - if (xfr->td_error) { + if (xact->td_error) { scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED); } scst_tgt_cmd_done(scst_cmd); return; } - if (xfr->td_hflags & TDFH_DATA_OUT) { + if (xact->td_hflags & TDFH_DATA_OUT) { if (tmd->cd_totlen == tmd->cd_moved) { - if (xfr->td_xfrlen) { + if (xact->td_xfrlen) { int rx_status = SCST_RX_STATUS_SUCCESS; - if (xfr->td_error) { + if (xact->td_error) { rx_status = SCST_RX_STATUS_ERROR; } scst_rx_data(scst_cmd, SCST_RX_STATUS_SUCCESS, SCST_CONTEXT_TASKLET); } else { - if (xfr->td_error) { + if (xact->td_error) { scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED); } scst_tgt_cmd_done(scst_cmd); @@ -572,10 +572,10 @@ scsi_target_done_cmd(tmd_cmd_t *tmd, int from_intr) } else { ; /* we don't have all data, do nothing */ } - } else if (xfr->td_hflags & TDFH_DATA_IN) { - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_xfrlen = 0; - if (xfr->td_error) { + } else if (xact->td_hflags & TDFH_DATA_IN) { + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; + if (xact->td_error) { scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED); } scst_tgt_cmd_done(scst_cmd); @@ -719,14 +719,14 @@ scsi_target_handler(qact_e action, void *arg) tmd_cmd_t *tmd = arg; SDprintk2("scsi_target: TMD_START[%llx] %p cdb0=%x\n", tmd->cd_tagval, tmd, tmd->cd_cdb[0] & 0xff); - tmd->cd_xfr.td_cmd = tmd; + tmd->cd_xact.td_cmd = tmd; scsi_target_start_cmd(arg, 1); break; } case QOUT_TMD_DONE: { - tmd_xfr_t *xfr = arg; - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_xact_t *xact = arg; + tmd_cmd_t *tmd = xact->td_cmd; SDprintk2("scsi_target: TMD_DONE[%llx] %p cdb0=%x\n", tmd->cd_tagval, tmd, tmd->cd_cdb[0] & 0xff); scsi_target_done_cmd(tmd, 1); @@ -886,15 +886,15 @@ isp_rdy_to_xfer(struct scst_cmd *scst_cmd) if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_WRITE) { tmd_cmd_t *tmd = (tmd_cmd_t *) scst_cmd_get_tgt_priv(scst_cmd); - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; - xfr->td_hflags |= TDFH_DATA_OUT; - xfr->td_data = scst_cmd_get_sg(scst_cmd); - xfr->td_xfrlen = scst_cmd_get_bufflen(scst_cmd); + xact->td_hflags |= TDFH_DATA_OUT; + xact->td_data = scst_cmd_get_sg(scst_cmd); + xact->td_xfrlen = scst_cmd_get_bufflen(scst_cmd); SDprintk("%s: write nbytes %u\n", __FUNCTION__, scst_cmd_get_bufflen(scst_cmd)); bp = tmd->cd_bus; - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + (*bp->h.r_action)(QIN_TMD_CONT, xact); } return (0); @@ -917,7 +917,7 @@ isp_xmit_response(struct scst_cmd *scst_cmd) { tmd_cmd_t *tmd = (tmd_cmd_t *) scst_cmd_get_tgt_priv(scst_cmd); bus_t *bp = tmd->cd_bus; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_READ) { unsigned int len = scst_cmd_get_resp_data_len(scst_cmd); @@ -929,22 +929,22 @@ isp_xmit_response(struct scst_cmd *scst_cmd) dump_stack(); memcpy(tmd->cd_sense, ifailure, TMD_SENSELEN); - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; tmd->cd_scsi_status = SCSI_CHECK; goto out; } else { - xfr->td_hflags |= TDFH_DATA_IN; - xfr->td_xfrlen = len; - xfr->td_data = scst_cmd_get_sg(scst_cmd); + xact->td_hflags |= TDFH_DATA_IN; + xact->td_xfrlen = len; + xact->td_data = scst_cmd_get_sg(scst_cmd); } } else { /* finished write to target or command with no data */ - xfr->td_xfrlen = 0; - xfr->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; + xact->td_hflags &= ~TDFH_DATA_MASK; } if (scst_cmd_get_tgt_resp_flags(scst_cmd) & SCST_TSC_FLAG_STATUS) { - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; tmd->cd_scsi_status = scst_cmd_get_status(scst_cmd); if (tmd->cd_scsi_status == SCSI_CHECK) { @@ -962,13 +962,13 @@ isp_xmit_response(struct scst_cmd *scst_cmd) } out: - if ((xfr->td_hflags & TDFH_STSVALID) && (tmd->cd_scsi_status == SCSI_CHECK)) { - xfr->td_xfrlen = 0; - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_hflags |= TDFH_SNSVALID; + if ((xact->td_hflags & TDFH_STSVALID) && (tmd->cd_scsi_status == SCSI_CHECK)) { + xact->td_xfrlen = 0; + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_hflags |= TDFH_SNSVALID; } - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + (*bp->h.r_action)(QIN_TMD_CONT, xact); return (0); } @@ -977,9 +977,9 @@ isp_on_free_cmd(struct scst_cmd *scst_cmd) { tmd_cmd_t *tmd = (tmd_cmd_t *) scst_cmd_get_tgt_priv(scst_cmd); bus_t *bp = tmd->cd_bus; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; - xfr->td_data = NULL; + xact->td_data = NULL; SDprintk("%s: TMD_FIN[%llx]\n", __FUNCTION__, tmd->cd_tagval); (*bp->h.r_action)(QIN_TMD_FIN, tmd); } diff --git a/qla_isp/linux/scsi_target.c b/qla_isp/linux/scsi_target.c index 2404d93b5..a61e28eac 100644 --- a/qla_isp/linux/scsi_target.c +++ b/qla_isp/linux/scsi_target.c @@ -1,4 +1,4 @@ -/* $Id: scsi_target.c,v 1.71 2007/10/31 05:28:28 mjacob Exp $ */ +/* $Id: scsi_target.c,v 1.72 2007/11/13 01:25:50 mjacob Exp $ */ /* * Copyright (c) 1997-2007 by Matthew Jacob * All rights reserved. @@ -140,6 +140,9 @@ #ifndef WRITE_16 #define WRITE_16 0x8a #endif +#ifndef REPORT_LUNS +#define REPORT_LUNS 0xa0 +#endif #define MODE_ALL_PAGES 0x3f #define MODE_VU_PAGE 0x00 @@ -169,7 +172,7 @@ /* * Size to allocate both a scatterlist + payload for small allocations */ -#define SGS_SIZE 512 +#define SGS_SIZE 1024 #define SGS0 (roundup(sizeof (struct scatterlist), sizeof (void *))) #define SGS_PAYLOAD_SIZE (SGS_SIZE - SGS0) #define SGS_SGP(x) ((struct scatterlist *)&((u8 *)(x))[SGS_PAYLOAD_SIZE]) @@ -382,7 +385,10 @@ static uint8_t ua[TMD_SENSELEN] = { 0xf0, 0, 0x6, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x29, 0x1 }; static uint8_t nosense[TMD_SENSELEN] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + 0xf0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; +static uint8_t invchg[TMD_SENSELEN] = { + 0xf0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3f, 0x0e }; static bus_t busses[MAX_BUS]; @@ -644,7 +650,7 @@ static void scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) { unsigned long flags; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; bus_t *bp; void *addr; ini_t *ini; @@ -699,10 +705,10 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) if (nptr == NULL) { spin_unlock_irqrestore(&scsi_target_lock, flags); tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_hflags |= TDFH_STSVALID; - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_xfrlen = 0; - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + xact->td_hflags |= TDFH_STSVALID; + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; + (*bp->h.r_action)(QIN_TMD_CONT, xact); return; } add_ini(bp, tmd->cd_iid, nptr); @@ -748,7 +754,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) return; } if (tmd->cd_totlen == 0) { - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; goto doit; } len = min(tmd->cd_totlen, tmd->cd_cdb[4]); @@ -757,7 +763,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) if (addr == NULL) { printk(KERN_WARNING "scsi_target_alloc: out of memory for inquiry data\n"); add_sdata(ini, enomem); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; goto doit; } buf = addr; @@ -801,7 +807,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) default: scsi_target_kfree(addr, SGS_SIZE); add_sdata(ini, invfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; goto doit; } } else { @@ -812,12 +818,12 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) } if (len == 0) { scsi_target_kfree(addr, SGS_SIZE); - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; } else { init_sg_elem(dp, NULL, 0, addr, len); - xfr->td_xfrlen = dp->length; - xfr->td_data = dp; - xfr->td_hflags |= TDFH_STSVALID|TDFH_DATA_IN; + xact->td_xfrlen = dp->length; + xact->td_data = dp; + xact->td_hflags |= TDFH_STSVALID|TDFH_DATA_IN; tmd->cd_flags |= CDF_PRIVATE_0; /* * If we're not here, say we aren't here. @@ -830,17 +836,17 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) } else { SDprintk2("scsi_target(%s%d): illegal field for inquiry data\n", bp->h.r_name, bp->h.r_inst); add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; } goto doit; } if (tmd->cd_cdb[0] == REQUEST_SENSE) { struct scatterlist *dp = NULL; - xfr->td_xfrlen = TMD_SENSELEN; - xfr->td_xfrlen = min(tmd->cd_cdb[4], xfr->td_xfrlen); - xfr->td_xfrlen = min(tmd->cd_totlen, xfr->td_xfrlen); - if (xfr->td_xfrlen != 0) { + xact->td_xfrlen = TMD_SENSELEN; + xact->td_xfrlen = min(tmd->cd_cdb[4], xact->td_xfrlen); + xact->td_xfrlen = min(tmd->cd_totlen, xact->td_xfrlen); + if (xact->td_xfrlen != 0) { if (from_intr) { scsi_cmd_sched_restart(tmd, "REQUEST_SENSE"); return; @@ -849,7 +855,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) if (addr == NULL) { printk("scsi_target_alloc: out of memory for sense data\n"); tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_xfrlen = 0; + xact->td_xfrlen = 0; } else { dp = SGS_SGP(addr); init_sg_elem(dp, NULL, 0, addr, TMD_SENSELEN); @@ -859,15 +865,73 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) memcpy(addr, ini->ini_sdata->sdata, TMD_SENSELEN); rem_sdata(ini); } - xfr->td_data = dp; - xfr->td_hflags |= TDFH_DATA_IN; + xact->td_data = dp; + xact->td_hflags |= TDFH_DATA_IN; tmd->cd_flags |= CDF_PRIVATE_0; SDprintk2("sense data in scsi_target for %s%d: %p (%p) len %d, key/asc/ascq 0x%x/0x%x/0x%x\n", bp->h.r_name, bp->h.r_inst, addr, dp, dp->length, ((u8 *)addr)[2]&0xf, ((u8 *)addr)[12]&0xff, ((u8 *)addr)[13]); } } - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; + goto doit; + } + + if (tmd->cd_cdb[0] == REPORT_LUNS) { + struct scatterlist *dp = NULL; + if (tmd->cd_totlen != 0) { + if (from_intr) { + scsi_cmd_sched_restart(tmd, "REPORT_LUNS"); + return; + } + addr = scsi_target_kzalloc(SGS_SIZE, GFP_KERNEL|GFP_ATOMIC); + if (addr == NULL) { + printk("scsi_target_alloc: out of memory for report luns\n"); + tmd->cd_scsi_status = SCSI_BUSY; + xact->td_xfrlen = 0; + } else { + int i; + uint32_t lim, nluns; + uint8_t *rpa = addr; + + lim = (tmd->cd_cdb[6] << 24) | (tmd->cd_cdb[7] << 16) | (tmd->cd_cdb[8] << 8) | tmd->cd_cdb[9]; + + spin_lock_irqsave(&scsi_target_lock, flags); + for (nluns = i = 0; i < MAX_LUN; i++) { + lun_t *lp = &bp->luns[i]; + if (lp->enabled) { + uint8_t *ptr = &rpa[8 + (nluns << 3)]; + if (i >= 256) { + ptr[0] = 0x40 | ((i >> 8) & 0x3f); + } + ptr[1] = i; + nluns++; + } + } + spin_unlock_irqrestore(&scsi_target_lock, flags); + + /* + * Make sure we always have *one* (lun 0) enabled + */ + if (nluns == 0) { + nluns = 1; + } + rpa[0] = (nluns << 3) >> 24; + rpa[1] = (nluns << 3) >> 16; + rpa[2] = (nluns << 3) >> 8; + rpa[3] = (nluns << 3); + + dp = SGS_SGP(addr); + lim = min(lim, tmd->cd_totlen); + lim = min(lim, (nluns << 3) + 8); + init_sg_elem(dp, NULL, 0, addr, lim); + xact->td_xfrlen = dp->length; + xact->td_data = dp; + xact->td_hflags |= TDFH_DATA_IN; + tmd->cd_flags |= CDF_PRIVATE_0; + } + } + xact->td_hflags |= TDFH_STSVALID; goto doit; } @@ -880,7 +944,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) return; } add_sdata(ini, nolun); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; goto doit; } @@ -888,7 +952,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) * All other commands first check for Contingent Allegiance */ if (ini->ini_sdata) { - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; goto doit; } @@ -899,7 +963,7 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) case SYNCHRONIZE_CACHE: case START_STOP: case TEST_UNIT_READY: - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; break; case READ_CAPACITY: if (from_intr) { @@ -943,14 +1007,14 @@ scsi_target_start_cmd(tmd_cmd_t *tmd, int from_intr) return; } add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; break; } doit: - if (xfr->td_hflags & TDFH_SNSVALID) { + if (xact->td_hflags & TDFH_SNSVALID) { tmd->cd_scsi_status = SCSI_CHECK; - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; if (ini && ini->ini_sdata) { memcpy(tmd->cd_sense, ini->ini_sdata->sdata, TMD_SENSELEN); } else { @@ -960,9 +1024,9 @@ doit: tmd->cd_tagval, tmd->cd_cdb[0] & 0xff, tmd->cd_totlen, tmd->cd_sense[2] & 0xf, tmd->cd_sense[12], tmd->cd_sense[13]); } else { SDprintk("INI(%#llx)=>LUN %d: [%llx] cdb0=0x%02x tl=%u ssts=%x hf 0x%x\n", tmd->cd_iid, L0LUN_TO_FLATLUN(tmd->cd_lun), - tmd->cd_tagval, tmd->cd_cdb[0] & 0xff, tmd->cd_totlen, tmd->cd_scsi_status, xfr->td_hflags); + tmd->cd_tagval, tmd->cd_cdb[0] & 0xff, tmd->cd_totlen, tmd->cd_scsi_status, xact->td_hflags); } - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + (*bp->h.r_action)(QIN_TMD_CONT, xact); } static void @@ -971,7 +1035,7 @@ scsi_target_read_capacity_16(tmd_cmd_t *tmd, ini_t *ini) bus_t *bp; void *addr; struct scatterlist *dp; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; lun_t *lp; bp = ini->ini_bus; @@ -980,7 +1044,7 @@ scsi_target_read_capacity_16(tmd_cmd_t *tmd, ini_t *ini) if (addr == NULL) { printk(KERN_WARNING "scsi_target_read_capacity: alloc failed\n"); tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; return; } @@ -1002,7 +1066,7 @@ scsi_target_read_capacity_16(tmd_cmd_t *tmd, ini_t *ini) tmd->cd_cdb[6] || tmd->cd_cdb[7] || tmd->cd_cdb[8] || tmd->cd_cdb[9]) { scsi_target_kfree(addr, SGS_SIZE); add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; return; } ((u8 *)addr)[0] = (blks >> 56) & 0xff; @@ -1019,9 +1083,9 @@ scsi_target_read_capacity_16(tmd_cmd_t *tmd, ini_t *ini) ((u8 *)addr)[10] = ((1 << LUN_BLOCK_SHIFT) >> 8) & 0xff; ((u8 *)addr)[11] = ((1 << LUN_BLOCK_SHIFT)) & 0xff; init_sg_elem(dp, NULL, 0, addr, min(32, tmd->cd_totlen)); - xfr->td_xfrlen = dp->length; - xfr->td_data = dp; - xfr->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; + xact->td_xfrlen = dp->length; + xact->td_data = dp; + xact->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; tmd->cd_flags |= CDF_PRIVATE_0; } @@ -1031,7 +1095,7 @@ scsi_target_read_capacity(tmd_cmd_t *tmd, ini_t *ini) bus_t *bp; void *addr; struct scatterlist *dp; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; lun_t *lp; bp = ini->ini_bus; @@ -1040,7 +1104,7 @@ scsi_target_read_capacity(tmd_cmd_t *tmd, ini_t *ini) if (addr == NULL) { printk(KERN_WARNING "scsi_target_read_capacity: alloc failed\n"); tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; return; } @@ -1057,7 +1121,7 @@ scsi_target_read_capacity(tmd_cmd_t *tmd, ini_t *ini) if (tmd->cd_cdb[2] || tmd->cd_cdb[3] || tmd->cd_cdb[4] || tmd->cd_cdb[5]) { scsi_target_kfree(addr, SGS_SIZE); add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; return; } if (blks < 0xffffffffull) { @@ -1077,9 +1141,9 @@ scsi_target_read_capacity(tmd_cmd_t *tmd, ini_t *ini) ((u8 *)addr)[6] = ((1 << LUN_BLOCK_SHIFT) >> 8) & 0xff; ((u8 *)addr)[7] = ((1 << LUN_BLOCK_SHIFT)) & 0xff; init_sg_elem(dp, NULL, 0, addr, min(8, tmd->cd_totlen)); - xfr->td_xfrlen = dp->length; - xfr->td_data = dp; - xfr->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; + xact->td_xfrlen = dp->length; + xact->td_data = dp; + xact->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; tmd->cd_flags |= CDF_PRIVATE_0; } @@ -1089,7 +1153,7 @@ scsi_target_modesense(tmd_cmd_t *tmd, ini_t *ini) bus_t *bp; lun_t *lp; int dlen, pgctl, page; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; struct scatterlist *dp; uint8_t *pgdata; uint32_t nblks; @@ -1112,7 +1176,7 @@ scsi_target_modesense(tmd_cmd_t *tmd, ini_t *ini) break; default: add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; return; } @@ -1120,7 +1184,7 @@ scsi_target_modesense(tmd_cmd_t *tmd, ini_t *ini) if (addr == NULL) { printk(KERN_WARNING "scsi_target_modesense: alloc failure\n"); tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; return; } dp = SGS_SGP(addr); @@ -1247,9 +1311,9 @@ scsi_target_modesense(tmd_cmd_t *tmd, ini_t *ini) dlen = min(tmd->cd_cdb[4], tmd->cd_totlen); dlen = min(dlen, SGS_PAYLOAD_SIZE); init_sg_elem(dp, NULL, 0, addr, dlen); - xfr->td_xfrlen = dp->length; - xfr->td_data = dp; - xfr->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; + xact->td_xfrlen = dp->length; + xact->td_data = dp; + xact->td_hflags |= TDFH_DATA_IN|TDFH_STSVALID; tmd->cd_flags |= CDF_PRIVATE_0; } @@ -1262,7 +1326,7 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) uint64_t lba, devoff; uint32_t transfer_count, byte_count, count, first_offset; struct scatterlist *dp; - tmd_xfr_t *xfr = &tmd->cd_xfr; + tmd_xact_t *xact = &tmd->cd_xact; int iswrite, page_idx, list_idx, sgidx; unsigned long flags; @@ -1334,7 +1398,7 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) return (-1); } add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; return (0); } @@ -1346,13 +1410,13 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) printk(KERN_WARNING "scsi_target: overflow devoff (0x%llx) + count (0x%llx) > limit (0x%llx)\n", (unsigned long long) devoff, (unsigned long long)(((uint64_t)transfer_count) << LUN_BLOCK_SHIFT), (unsigned long long) lp->nbytes); add_sdata(ini, illfld); - xfr->td_hflags |= TDFH_SNSVALID; + xact->td_hflags |= TDFH_SNSVALID; return (0); } if (unlikely(transfer_count == 0)) { printk(KERN_WARNING "%s: zero length transfer count\n", __FUNCTION__); - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; return (0); } @@ -1365,7 +1429,7 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) byte_count &= ~((1 << LUN_BLOCK_SHIFT) - 1); if (byte_count == 0) { printk(KERN_WARNING "%s: byte count less than a block\n", __FUNCTION__); - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; return (0); } transfer_count = byte_count >> LUN_BLOCK_SHIFT; @@ -1409,7 +1473,7 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) if (dp == NULL) { printk(KERN_WARNING "unable to allocate %d entry scatterlist\n", tmd->cd_nsgelems); tmd->cd_scsi_status = SCSI_BUSY; - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; return (0); } } @@ -1485,11 +1549,11 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) page_idx = 0; if (++list_idx >= lp->npglists) { printk(KERN_WARNING "bad list_idx for block %lld\n", lba); - xfr->td_data = dp; + xact->td_data = dp; tmd->cd_dp = dp; - xfr->td_xfrlen = 0; + xact->td_xfrlen = 0; add_sdata(ini, ifailure); - xfr->td_hflags |= TDFH_SNSVALID|TDFH_STSVALID; + xact->td_hflags |= TDFH_SNSVALID|TDFH_STSVALID; tmd->cd_flags |= CDF_PRIVATE_1; return (0); } @@ -1499,22 +1563,22 @@ scsi_target_rdwr(tmd_cmd_t *tmd, ini_t *ini, int from_intr) } out: - xfr->td_xfrlen = byte_count; - xfr->td_data = dp; + xact->td_xfrlen = byte_count; + xact->td_data = dp; tmd->cd_dp = dp; tmd->cd_flags |= CDF_PRIVATE_1; if (iswrite) { - xfr->td_hflags |= TDFH_DATA_OUT; + xact->td_hflags |= TDFH_DATA_OUT; /* * WCE is set, or we're *not* an overcommit disk, * the command is done as soon as data lands * in memory. */ if (/* lp->wce || */ lp->overcommit == 0) { - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; } } else { - xfr->td_hflags |= TDFH_DATA_IN; + xact->td_hflags |= TDFH_DATA_IN; /* * If we're an overcommit disk, then we don't do * anything with this command yet- we put it on @@ -1539,32 +1603,32 @@ out: spin_unlock_irqrestore(&scsi_target_lock, flags); return (1); } else { - xfr->td_hflags |= TDFH_STSVALID; + xact->td_hflags |= TDFH_STSVALID; } } return (0); } static int -scsi_target_ldfree(bus_t *bp, tmd_xfr_t *xfr, int from_intr) +scsi_target_ldfree(bus_t *bp, tmd_xact_t *xact, int from_intr) { int i; unsigned long flags; - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_cmd_t *tmd = xact->td_cmd; if (tmd->cd_flags & CDF_PRIVATE_0) { - struct scatterlist *dp = xfr->td_data; + struct scatterlist *dp = xact->td_data; if (from_intr) { goto resched; } - SDprintk("scsi_target: LDFREE[%llx] %p xfr->td_data %p\n", tmd->cd_tagval, tmd, dp); + SDprintk("scsi_target: LDFREE[%llx] %p xact->td_data %p\n", tmd->cd_tagval, tmd, dp); if (dp) { scsi_target_kfree(page_address(dp->page) + dp->offset, SGS_SIZE); } else { printk(KERN_ERR "scsi_target: LDFREE[%llx] null dp @ line %d\n", tmd->cd_tagval, __LINE__); return (0); } - xfr->td_data = NULL; + xact->td_data = NULL; tmd->cd_flags &= ~CDF_PRIVATE_0; } else if (tmd->cd_flags & CDF_PRIVATE_1) { struct scatterlist *dp = tmd->cd_dp; @@ -1608,7 +1672,7 @@ scsi_target_ldfree(bus_t *bp, tmd_xfr_t *xfr, int from_intr) spin_unlock_irqrestore(&scsi_target_lock, flags); scsi_target_kfree(dp, tmd->cd_nsgelems * sizeof (struct scatterlist)); } - xfr->td_data = NULL; + xact->td_data = NULL; tmd->cd_flags &= ~CDF_PRIVATE_1; } return (1); @@ -1682,14 +1746,14 @@ scsi_target_handler(qact_e action, void *arg) SDprintk2("scsi_target: TMD_START[%llx] %p cdb0=%x\n", tmd->cd_tagval, tmd, tmd->cd_cdb[0] & 0xff); - tmd->cd_xfr.td_cmd = tmd; + tmd->cd_xact.td_cmd = tmd; scsi_target_start_cmd(tmd, 1); break; } case QOUT_TMD_DONE: { - tmd_xfr_t *xfr = arg; - tmd_cmd_t *tmd = xfr->td_cmd; + tmd_xact_t *xact = arg; + tmd_cmd_t *tmd = xact->td_cmd; ini_t *nptr; bp = bus_from_tmd(tmd); @@ -1698,14 +1762,14 @@ scsi_target_handler(qact_e action, void *arg) break; } - SDprintk2("scsi_target: TMD_DONE[%llx] %p hf %x lf %x\n", tmd->cd_tagval, tmd, xfr->td_hflags, xfr->td_lflags); + SDprintk2("scsi_target: TMD_DONE[%llx] %p hf %x lf %x\n", tmd->cd_tagval, tmd, xact->td_hflags, xact->td_lflags); /* * Okay- were we moving data? If so, deal with the result. * * If so, check to see if we sent it. */ - if (xfr->td_hflags & TDFH_DATA_OUT) { + if (xact->td_hflags & TDFH_DATA_OUT) { lun_t *lp; SDprintk("scsi_target: [%llx] data receive done\n", tmd->cd_tagval); spin_lock_irqsave(&scsi_target_lock, flags); @@ -1716,8 +1780,7 @@ scsi_target_handler(qact_e action, void *arg) * Instead, we give the data to a user agent. It knows how much * to write based upon tmd->cd_totlen. * - * When the user agent is done, it will clear the cd_xfrlen field and the - * TDFH_DATA_OUT flags and send back status for the command. + * When the user agent is done, it will send back status for the command. */ if (lp->enabled && lp->overcommit) { tmd->cd_next = NULL; @@ -1732,24 +1795,24 @@ scsi_target_handler(qact_e action, void *arg) break; } spin_unlock_irqrestore(&scsi_target_lock, flags); - } else if (xfr->td_hflags & TDFH_DATA_IN) { + } else if (xact->td_hflags & TDFH_DATA_IN) { SDprintk("scsi_target: [%llx] data transmit done\n", tmd->cd_tagval); } - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_xfrlen = 0; + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; /* * Did we send status already? */ - if (xfr->td_hflags & TDFH_STSVALID) { - if ((xfr->td_lflags & TDFL_SENTSTATUS) == 0) { + if (xact->td_hflags & TDFH_STSVALID) { + if ((xact->td_lflags & TDFL_SENTSTATUS) == 0) { if (tmd->cd_flags & CDF_PRIVATE_2) { printk(KERN_ERR "[%llx] already tried to send status\n", tmd->cd_tagval); } else { tmd->cd_flags |= CDF_PRIVATE_2; SDprintk("[%llx] sending status\n", tmd->cd_tagval); - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + (*bp->h.r_action)(QIN_TMD_CONT, xact); break; } } @@ -1758,8 +1821,8 @@ scsi_target_handler(qact_e action, void *arg) /* * Did we send sense? If so, remove one sense structure. */ - if (xfr->td_hflags & TDFH_SNSVALID) { - if (xfr->td_lflags & TDFL_SENTSENSE) { + if (xact->td_hflags & TDFH_SNSVALID) { + if (xact->td_lflags & TDFL_SENTSENSE) { spin_lock_irqsave(&scsi_target_lock, flags); nptr = ini_from_tmd(bp, tmd); spin_unlock_irqrestore(&scsi_target_lock, flags); @@ -1769,7 +1832,7 @@ scsi_target_handler(qact_e action, void *arg) } } - if (scsi_target_ldfree(bp, xfr, 1)) { + if (scsi_target_ldfree(bp, xact, 1)) { SDprintk("%s: TMD_FIN[%llx]\n", __FUNCTION__, tmd->cd_tagval); (*bp->h.r_action)(QIN_TMD_FIN, tmd); } @@ -1906,7 +1969,7 @@ scsi_target_thread(void *arg) if (bp == NULL) { printk(KERN_WARNING "lost bus when tring to call TMD_FIN\n"); } else { - if (scsi_target_ldfree(bp, &tmd->cd_xfr, 0)) { + if (scsi_target_ldfree(bp, &tmd->cd_xact, 0)) { SDprintk("%s: TMD_FIN[%llx]\n", __FUNCTION__, tmd->cd_tagval); (*bp->h.r_action)(QIN_TMD_FIN, tmd); } @@ -2123,10 +2186,10 @@ scsi_target_start_user_io(sc_io_t *sc) printk(KERN_ERR "scsi_target: failed to copy data to user space\n"); memcpy(tmd->cd_sense, ifailure, TMD_SENSELEN); tmd->cd_scsi_status = CHECK_CONDITION; - tmd->cd_xfr.td_hflags &= ~TDFH_DATA_MASK; - tmd->cd_xfr.td_hflags |= TDFH_SNSVALID|TDFH_STSVALID; - tmd->cd_xfr.td_xfrlen = 0; - (*bp->h.r_action)(QIN_TMD_CONT, &tmd->cd_xfr); + tmd->cd_xact.td_hflags &= ~TDFH_DATA_MASK; + tmd->cd_xact.td_hflags |= TDFH_SNSVALID|TDFH_STSVALID; + tmd->cd_xact.td_xfrlen = 0; + (*bp->h.r_action)(QIN_TMD_CONT, &tmd->cd_xact); return (r); } sc->read = 0; @@ -2148,7 +2211,7 @@ scsi_target_end_user_io(sc_io_t *sc) bus_t *bp; lun_t *lp; tmd_cmd_t *tmd; - tmd_xfr_t *xfr; + tmd_xact_t *xact; bp = bus_from_name(sc->hba_name_unit); if (bp == NULL) { @@ -2162,7 +2225,7 @@ scsi_target_end_user_io(sc_io_t *sc) } lp = &bp->luns[sc->lun]; tmd = sc->tag; - xfr = &tmd->cd_xfr; + xact = &tmd->cd_xact; SDprintk2("scsi_target: USER->KERN [%llx] %p err %d len %u\n", tmd->cd_tagval, tmd, sc->err, sc->len); /* * If we had an error, stop right here and return something to the initiator. @@ -2172,10 +2235,10 @@ scsi_target_end_user_io(sc_io_t *sc) memcpy(tmd->cd_sense, mediaerr, TMD_SENSELEN); barf: tmd->cd_scsi_status = CHECK_CONDITION; - xfr->td_hflags &= ~TDFH_DATA_MASK; - xfr->td_hflags |= TDFH_SNSVALID|TDFH_STSVALID; - xfr->td_xfrlen = 0; - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + xact->td_hflags &= ~TDFH_DATA_MASK; + xact->td_hflags |= TDFH_SNSVALID|TDFH_STSVALID; + xact->td_xfrlen = 0; + (*bp->h.r_action)(QIN_TMD_CONT, xact); return (0); } @@ -2196,14 +2259,14 @@ scsi_target_end_user_io(sc_io_t *sc) memcpy(tmd->cd_sense, ifailure, TMD_SENSELEN); goto barf; } - xfr->td_xfrlen = sc->len; - xfr->td_hflags |= TDFH_DATA_IN; + xact->td_xfrlen = sc->len; + xact->td_hflags |= TDFH_DATA_IN; } else { - xfr->td_xfrlen = 0; - xfr->td_hflags &= ~TDFH_DATA_MASK; + xact->td_xfrlen = 0; + xact->td_hflags &= ~TDFH_DATA_MASK; } - xfr->td_hflags |= TDFH_STSVALID; - (*bp->h.r_action)(QIN_TMD_CONT, xfr); + xact->td_hflags |= TDFH_STSVALID; + (*bp->h.r_action)(QIN_TMD_CONT, xact); return (0); } @@ -2211,10 +2274,11 @@ static int scsi_target_endis(char *hba_name_unit, uint64_t nbytes, int lun, int en) { DECLARE_MUTEX_LOCKED(rsem); + unsigned long flags; enadis_t ec; lun_t *lp; bus_t *bp; - int rv; + int rv, i; /* * XXX: yes, there is a race condition here where the bus can @@ -2261,6 +2325,19 @@ scsi_target_endis(char *hba_name_unit, uint64_t nbytes, int lun, int en) scsi_free_disk(bp, lun); return (ec.en_error); } + + spin_lock_irqsave(&scsi_target_lock, flags); + for (i = 0; i < HASH_WIDTH; i++) { + ini_t *ini = bp->list[i]; + while (ini) { + spin_unlock_irqrestore(&scsi_target_lock, flags); + add_sdata(ini, invchg); + spin_lock_irqsave(&scsi_target_lock, flags); + ini = ini->ini_next; + } + } + spin_unlock_irqrestore(&scsi_target_lock, flags); + if (en == 0) { scsi_free_disk(bp, lun); } else {