diff --git a/scripts/generate-kernel-patch b/scripts/generate-kernel-patch index 79ffeff3a..431afb15c 100755 --- a/scripts/generate-kernel-patch +++ b/scripts/generate-kernel-patch @@ -211,12 +211,21 @@ fi # Strip patch level from the kernel version number. kver="$(kernel_version "$1")" +# Include fcst in the patch for kernel versions 2.6.33 and later. +if [ "${kver}" ">" "2.6.32" ]; then + include_fcst="true" +else + include_fcst="false" +fi + # Make sure that for kernel 2.6.33 and later the line # "#define CONFIG_SCST_PROC" is removed from scst/include/scst.h. if grep -qw scst_sysfs scst/kernel/in-tree/Makefile.scst-${kver} \ || [ "${generating_upstream_patch}" = "true" ]; then specialize_patch_options="${specialize_patch_options} -v config_scst_proc_undefined=1" +else + include_proc_impl="true" fi if [ "${debug_specialize}" = "true" ]; then specialize_patch_options="${specialize_patch_options} -v debug=1" @@ -288,7 +297,7 @@ scst_13_vdisk="scst/src/dev_handlers/scst_vdisk.c" scst_14_tg="scst/src/scst_tg.c" separate_patches="scst_03_public_headers scst_04_main scst_05_targ scst_06_lib scst_07_pres scst_08_sysfs scst_09_debug scst_10_sgv scst_user scst_13_vdisk scst_14_tg" -if [ "${generating_upstream_patch}" = "false" ]; then +if [ "$include_proc_impl" = "true" ]; then separate_patches+=" scst_proc" fi source_files_in_separate_patch="" @@ -319,7 +328,7 @@ mkdir -p "${tmpdir}" tmp_Kconfig="${tmpdir}/Kconfig.scst-${kver}" cat "scst/kernel/in-tree/Kconfig.scst" | \ -if [ -e "${fcst_patch_series}" ]; then +if [ "${include_fcst}" ]; then cat else grep -v '^source "drivers/scst/fcst/Kconfig"$' @@ -328,12 +337,12 @@ add_file "${tmp_Kconfig}" "drivers/scst/Kconfig" tmp_Makefile="${tmpdir}/Makefile.scst-${kver}" cat "scst/kernel/in-tree/Makefile.scst-${kver}" | \ -if [ "${generating_upstream_patch}" = "true" ]; then +if [ "$include_proc_impl" != "true" ]; then grep -v 'scst_proc' else cat fi | \ -if [ -e "${fcst_patch_series}" ]; then +if [ "${include_fcst}" -a "${kver}" != "2.6.37" -a "${kver}" != "2.6.38" ]; then cat else sed -e 's: fcst/* : :' diff --git a/scripts/generate-kernel-with-srp-patches b/scripts/generate-kernel-with-srp-patches deleted file mode 100755 index 7620e115c..000000000 --- a/scripts/generate-kernel-with-srp-patches +++ /dev/null @@ -1,2438 +0,0 @@ -#!/bin/bash - -# Generates a 2.6.34.*, 2.6.35.* or 2.6.36* kernel tree with the SRP_CRED_REQ -# and several other patches applied to the ib_srp initiator. - -# Adjust this variable such that it points to the directory with -# linux-x.y.z.tar.bz2 and patch-x.y.z.p.bz2 files on your system -TARBALLDIR=/home/bart/software/downloads - -# Local functions - -usage() { - echo "$0 " -} - -# Source: http://git.kernel.org/?p=linux/kernel/git/roland/infiniband.git;a=patch;h=$commit -get_2_6_36_patch() { -case "$1" in - 7a7008110b94dfaa90db4b0cc5b0c3f964c80506) - cat <buf; - - if (0) { -- int i; -- - shost_printk(KERN_ERR, target->scsi_host, - PFX "recv completion, opcode 0x%02x\n", opcode); -- -- for (i = 0; i < wc->byte_len; ++i) { -- if (i % 8 == 0) -- printk(KERN_ERR " [%02x] ", i); -- printk(" %02x", ((u8 *) iu->buf)[i]); -- if ((i + 1) % 8 == 0) -- printk("\n"); -- } -- -- if (wc->byte_len % 8) -- printk("\n"); -+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, -+ iu->buf, wc->byte_len, true); - } - - switch (opcode) { --- -1.7.3 - -EOF - ;; - c996bb47bb419b7c2f75499e11750142775e5da9) - cat <scsi_host->host_lock, flags); -+ -+ next = target->rx_head & (SRP_RQ_SIZE - 1); -+ wr.wr_id = next; -+ iu = target->rx_ring[next]; -+ -+ list.addr = iu->dma; -+ list.length = iu->size; -+ list.lkey = target->srp_host->srp_dev->mr->lkey; -+ -+ wr.next = NULL; -+ wr.sg_list = &list; -+ wr.num_sge = 1; -+ -+ ret = ib_post_recv(target->qp, &wr, &bad_wr); -+ if (!ret) -+ ++target->rx_head; -+ -+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags); -+ -+ return ret; -+} -+ - static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) - { - struct srp_request *req; -@@ -868,6 +900,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - { - struct ib_device *dev; - struct srp_iu *iu; -+ int res; - u8 opcode; - - iu = target->rx_ring[wc->wr_id]; -@@ -904,6 +937,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - - ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, - DMA_FROM_DEVICE); -+ -+ res = srp_post_recv(target); -+ if (res != 0) -+ shost_printk(KERN_ERR, target->scsi_host, -+ PFX "Recv failed with error code %d\n", res); - } - - static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) -@@ -943,45 +981,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) - } - } - --static int __srp_post_recv(struct srp_target_port *target) --{ -- struct srp_iu *iu; -- struct ib_sge list; -- struct ib_recv_wr wr, *bad_wr; -- unsigned int next; -- int ret; -- -- next = target->rx_head & (SRP_RQ_SIZE - 1); -- wr.wr_id = next; -- iu = target->rx_ring[next]; -- -- list.addr = iu->dma; -- list.length = iu->size; -- list.lkey = target->srp_host->srp_dev->mr->lkey; -- -- wr.next = NULL; -- wr.sg_list = &list; -- wr.num_sge = 1; -- -- ret = ib_post_recv(target->qp, &wr, &bad_wr); -- if (!ret) -- ++target->rx_head; -- -- return ret; --} -- --static int srp_post_recv(struct srp_target_port *target) --{ -- unsigned long flags; -- int ret; -- -- spin_lock_irqsave(target->scsi_host->host_lock, flags); -- ret = __srp_post_recv(target); -- spin_unlock_irqrestore(target->scsi_host->host_lock, flags); -- -- return ret; --} -- - /* - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. Lock cannot be dropped between call here and -@@ -1091,11 +1090,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - goto err; - } - -- if (__srp_post_recv(target)) { -- shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n"); -- goto err_unmap; -- } -- - ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, - DMA_TO_DEVICE); - -@@ -1238,6 +1232,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) - int attr_mask = 0; - int comp = 0; - int opcode = 0; -+ int i; - - switch (event->event) { - case IB_CM_REQ_ERROR: -@@ -1287,7 +1282,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) - if (target->status) - break; - -- target->status = srp_post_recv(target); -+ for (i = 0; i < SRP_RQ_SIZE; i++) { -+ target->status = srp_post_recv(target); -+ if (target->status) -+ break; -+ } - if (target->status) - break; - --- -1.7.3 - -EOF - ;; - 89de74866b846cc48780fda3de7fd223296aaca9) - cat <orig_dgid); - } - -+static ssize_t show_req_lim(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct srp_target_port *target = host_to_target(class_to_shost(dev)); -+ -+ if (target->state == SRP_TARGET_DEAD || -+ target->state == SRP_TARGET_REMOVED) -+ return -ENODEV; -+ -+ return sprintf(buf, "%d\n", target->req_lim); -+} -+ - static ssize_t show_zero_req_lim(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -1586,6 +1598,7 @@ static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); - static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); - static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); - static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); -+static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); - static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); - static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); - static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); -@@ -1597,6 +1610,7 @@ static struct device_attribute *srp_host_attrs[] = { - &dev_attr_pkey, - &dev_attr_dgid, - &dev_attr_orig_dgid, -+ &dev_attr_req_lim, - &dev_attr_zero_req_lim, - &dev_attr_local_ib_port, - &dev_attr_local_ib_device, --- -1.7.3 - -EOF - ;; - *) - echo ERROR - ;; - esac -} - -# Source: https://patchwork.kernel.org/patch/$p/raw/ -get_2_6_37_patch() { -case "$1" in - 143381) - cat <srp_host, target->rx_ring[i]); -- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) -+ for (i = 0; i < SRP_SQ_SIZE; ++i) - srp_free_iu(target->srp_host, target->tx_ring[i]); - } - -@@ -822,7 +822,7 @@ static int srp_post_recv(struct srp_target_port *target) - - spin_lock_irqsave(target->scsi_host->host_lock, flags); - -- next = target->rx_head & (SRP_RQ_SIZE - 1); -+ next = target->rx_head & SRP_RQ_MASK; - wr.wr_id = next; - iu = target->rx_ring[next]; - -@@ -989,19 +989,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) - static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, - enum srp_request_type req_type) - { -- s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; -+ s32 rsv = (req_type == SRP_REQ_TASK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; - - srp_send_completion(target->send_cq, target); - - if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) - return NULL; - -- if (target->req_lim < min) { -+ if (target->req_lim <= rsv) { - ++target->zero_req_lim; - return NULL; - } - -- return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; -+ return target->tx_ring[target->tx_head & SRP_SQ_MASK]; - } - - /* -@@ -1020,7 +1020,7 @@ static int __srp_post_send(struct srp_target_port *target, - list.lkey = target->srp_host->srp_dev->mr->lkey; - - wr.next = NULL; -- wr.wr_id = target->tx_head & SRP_SQ_SIZE; -+ wr.wr_id = target->tx_head & SRP_SQ_MASK; - wr.sg_list = &list; - wr.num_sge = 1; - wr.opcode = IB_WR_SEND; -@@ -1121,7 +1121,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) - goto err; - } - -- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { -+ for (i = 0; i < SRP_SQ_SIZE; ++i) { - target->tx_ring[i] = srp_alloc_iu(target->srp_host, - srp_max_iu_len, - GFP_KERNEL, DMA_TO_DEVICE); -@@ -1137,7 +1137,7 @@ err: - target->rx_ring[i] = NULL; - } - -- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { -+ for (i = 0; i < SRP_SQ_SIZE; ++i) { - srp_free_iu(target->srp_host, target->tx_ring[i]); - target->tx_ring[i] = NULL; - } -@@ -1626,9 +1626,9 @@ static struct scsi_host_template srp_template = { - .eh_abort_handler = srp_abort, - .eh_device_reset_handler = srp_reset_device, - .eh_host_reset_handler = srp_reset_host, -- .can_queue = SRP_SQ_SIZE, -+ .can_queue = SRP_CMD_SQ_SIZE, - .this_id = -1, -- .cmd_per_lun = SRP_SQ_SIZE, -+ .cmd_per_lun = SRP_CMD_SQ_SIZE, - .use_clustering = ENABLE_CLUSTERING, - .shost_attrs = srp_host_attrs - }; -@@ -1813,7 +1813,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) - printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); - goto out; - } -- target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); -+ target->scsi_host->cmd_per_lun -+ = min(token, SRP_CMD_SQ_SIZE); - break; - - case SRP_OPT_IO_CLASS: -@@ -1891,7 +1892,7 @@ static ssize_t srp_create_target(struct device *dev, - - INIT_LIST_HEAD(&target->free_reqs); - INIT_LIST_HEAD(&target->req_queue); -- for (i = 0; i < SRP_SQ_SIZE; ++i) { -+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { - target->req_ring[i].index = i; - list_add_tail(&target->req_ring[i].list, &target->free_reqs); - } -@@ -2159,6 +2160,9 @@ static int __init srp_init_module(void) - { - int ret; - -+ BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); -+ BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE); -+ - if (srp_sg_tablesize > 255) { - printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); - srp_sg_tablesize = 255; -diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h -index 5a80eac..7a959d5 100644 ---- a/drivers/infiniband/ulp/srp/ib_srp.h -+++ b/drivers/infiniband/ulp/srp/ib_srp.h -@@ -59,7 +59,14 @@ enum { - - SRP_RQ_SHIFT = 6, - SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, -- SRP_SQ_SIZE = SRP_RQ_SIZE - 1, -+ SRP_RQ_MASK = SRP_RQ_SIZE - 1, -+ -+ SRP_SQ_SIZE = SRP_RQ_SIZE, -+ SRP_SQ_MASK = SRP_SQ_SIZE - 1, -+ SRP_RSP_SQ_SIZE = 1, -+ SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, -+ SRP_TSK_MGMT_SQ_SIZE = 1, -+ SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, - - SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), - -@@ -144,11 +151,11 @@ struct srp_target_port { - - unsigned tx_head; - unsigned tx_tail; -- struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; -+ struct srp_iu *tx_ring[SRP_SQ_SIZE]; - - struct list_head free_reqs; - struct list_head req_queue; -- struct srp_request req_ring[SRP_SQ_SIZE]; -+ struct srp_request req_ring[SRP_CMD_SQ_SIZE]; - - struct work_struct work; - -EOF - ;; - 143391) - cat <scsi_host->host_lock, flags); - } - -+/* -+ * Must be called with target->scsi_host->host_lock locked to protect -+ * target->req_lim. -+ */ -+static void __srp_handle_cred_req(struct srp_target_port *target, -+ void *req_ptr, void *rsp_ptr) -+{ -+ struct srp_cred_req *req = req_ptr; -+ struct srp_cred_rsp *rsp = rsp_ptr; -+ -+ target->req_lim += be32_to_cpu(req->req_lim_delta); -+ -+ memset(rsp, 0, sizeof *rsp); -+ rsp->opcode = SRP_CRED_RSP; -+ rsp->tag = req->tag; -+} -+ -+/* -+ * Must be called with target->scsi_host->host_lock locked to protect -+ * target->req_lim. -+ */ -+static void __srp_handle_aer_req(struct srp_target_port *target, -+ void *req_ptr, void *rsp_ptr) -+{ -+ struct srp_aer_req *req = req_ptr; -+ struct srp_aer_rsp *rsp = rsp_ptr; -+ -+ target->req_lim += be32_to_cpu(req->req_lim_delta); -+ -+ shost_printk(KERN_ERR, target->scsi_host, -+ PFX "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); -+ -+ memset(rsp, 0, sizeof *rsp); -+ rsp->opcode = SRP_AER_RSP; -+ rsp->tag = req->tag; -+} -+ -+static void srp_handle_req(struct srp_target_port *target, -+ struct srp_iu *req_iu, -+ void (*req_fn)(struct srp_target_port *, -+ void *, void *)) -+{ -+ struct ib_device *dev; -+ u8 *req_buf; -+ unsigned long flags; -+ struct srp_iu *rsp_iu; -+ u8 *rsp_buf; -+ int res; -+ -+ dev = target->srp_host->srp_dev->dev; -+ req_buf = req_iu->buf; -+ -+ spin_lock_irqsave(target->scsi_host->host_lock, flags); -+ -+ rsp_iu = __srp_get_tx_iu(target, SRP_IU_RSP); -+ if (!rsp_iu) -+ goto out_unlock; -+ -+ rsp_buf = rsp_iu->buf; -+ -+ (*req_fn)(target, req_buf, rsp_buf); -+ -+ ib_dma_sync_single_for_device(dev, rsp_iu->dma, srp_max_iu_len, -+ DMA_TO_DEVICE); -+ -+ res = __srp_post_send(target, rsp_iu, sizeof *rsp_iu, SRP_SEND_RSP); -+ if (res) -+ shost_printk(KERN_ERR, target->scsi_host, -+ PFX "Sending response failed -- res = %d\n", res); -+ -+out_unlock: -+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags); -+} -+ - static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - { - struct ib_device *dev; -@@ -929,6 +1008,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - PFX "Got target logout request\n"); - break; - -+ case SRP_CRED_REQ: -+ srp_handle_req(target, iu, __srp_handle_cred_req); -+ break; -+ -+ case SRP_AER_REQ: -+ srp_handle_req(target, iu, __srp_handle_aer_req); -+ break; -+ - default: - shost_printk(KERN_WARNING, target->scsi_host, - PFX "Unhandled SRP opcode 0x%02x\n", opcode); -@@ -985,18 +1072,27 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. Lock cannot be dropped between call here and - * call to __srp_post_send(). -+ * -+ * Note: -+ * An upper limit for the number of allocated information units for each -+ * request type is: -+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues -+ * more than Scsi_Host.can_queue requests. -+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. -+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than -+ * one unanswered SRP request to an initiator. - */ - static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, -- enum srp_request_type req_type) -+ enum srp_tx_iu_type iu_type) - { -- s32 rsv = (req_type == SRP_REQ_TASK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; -+ s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; - - srp_send_completion(target->send_cq, target); - - if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) - return NULL; - -- if (target->req_lim <= rsv) { -+ if (iu_type != SRP_IU_RSP && target->req_lim <= rsv) { - ++target->zero_req_lim; - return NULL; - } -@@ -1009,7 +1105,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, - * req_lim and tx_head. - */ - static int __srp_post_send(struct srp_target_port *target, -- struct srp_iu *iu, int len) -+ struct srp_iu *iu, int len, -+ enum srp_send_iu_type iu_type) - { - struct ib_sge list; - struct ib_send_wr wr, *bad_wr; -@@ -1030,7 +1127,8 @@ static int __srp_post_send(struct srp_target_port *target, - - if (!ret) { - ++target->tx_head; -- --target->req_lim; -+ if (iu_type == SRP_SEND_REQ) -+ --target->req_lim; - } - - return ret; -@@ -1056,7 +1154,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - return 0; - } - -- iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL); -+ iu = __srp_get_tx_iu(target, SRP_IU_CMD); - if (!iu) - goto err; - -@@ -1093,7 +1191,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, - DMA_TO_DEVICE); - -- if (__srp_post_send(target, iu, len)) { -+ if (__srp_post_send(target, iu, len, SRP_SEND_REQ)) { - shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); - goto err_unmap; - } -@@ -1363,7 +1461,7 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, - - init_completion(&req->done); - -- iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT); -+ iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); - if (!iu) - goto out; - -@@ -1376,7 +1474,7 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, - tsk_mgmt->tsk_mgmt_func = func; - tsk_mgmt->task_tag = req->index; - -- if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) -+ if (__srp_post_send(target, iu, sizeof *tsk_mgmt, SRP_SEND_REQ)) - goto out; - - req->tsk_mgmt = iu; -diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h -index 7a959d5..854ec81 100644 ---- a/drivers/infiniband/ulp/srp/ib_srp.h -+++ b/drivers/infiniband/ulp/srp/ib_srp.h -@@ -82,9 +82,15 @@ enum srp_target_state { - SRP_TARGET_REMOVED - }; - --enum srp_request_type { -- SRP_REQ_NORMAL, -- SRP_REQ_TASK_MGMT, -+enum srp_tx_iu_type { -+ SRP_IU_CMD, -+ SRP_IU_TSK_MGMT, -+ SRP_IU_RSP, -+}; -+ -+enum srp_send_iu_type { -+ SRP_SEND_REQ, -+ SRP_SEND_RSP, - }; - - struct srp_device { -diff --git a/include/scsi/srp.h b/include/scsi/srp.h -index ad178fa..1ae84db 100644 ---- a/include/scsi/srp.h -+++ b/include/scsi/srp.h -@@ -239,4 +239,42 @@ struct srp_rsp { - u8 data[0]; - } __packed; - -+struct srp_cred_req { -+ u8 opcode; -+ u8 sol_not; -+ u8 reserved[2]; -+ __be32 req_lim_delta; -+ u64 tag; -+}; -+ -+struct srp_cred_rsp { -+ u8 opcode; -+ u8 reserved[7]; -+ u64 tag; -+}; -+ -+/* -+ * The SRP spec defines the fixed portion of the AER_REQ structure to be -+ * 36 bytes, so it needs to be packed to avoid having it padded to 40 bytes -+ * on 64-bit architectures. -+ */ -+struct srp_aer_req { -+ u8 opcode; -+ u8 sol_not; -+ u8 reserved[2]; -+ __be32 req_lim_delta; -+ u64 tag; -+ u32 reserved2; -+ __be64 lun; -+ __be32 sense_data_len; -+ u32 reserved3; -+ u8 sense_data[0]; -+} __packed; -+ -+struct srp_aer_rsp { -+ u8 opcode; -+ u8 reserved[7]; -+ u64 tag; -+}; -+ - #endif /* SCSI_SRP_H */ -EOF - ;; - 143401) - cat <scsi_host->host_lock held to protect -+ * req_lim and tx_head. Lock cannot be dropped between call here and -+ * call to __srp_post_send(). -+ * -+ * Note: -+ * An upper limit for the number of allocated information units for each -+ * request type is: -+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues -+ * more than Scsi_Host.can_queue requests. -+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. -+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than -+ * one unanswered SRP request to an initiator. -+ */ -+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, -+ enum srp_tx_iu_type iu_type) -+{ -+ s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; -+ -+ srp_send_completion(target->send_cq, target); -+ -+ if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) -+ return NULL; -+ -+ if (iu_type != SRP_IU_RSP && target->req_lim <= rsv) { -+ ++target->zero_req_lim; -+ return NULL; -+ } -+ -+ return target->tx_ring[target->tx_head & SRP_SQ_MASK]; -+} -+ -+/* -+ * Must be called with target->scsi_host->host_lock held to protect -+ * req_lim and tx_head. -+ */ -+static int __srp_post_send(struct srp_target_port *target, -+ struct srp_iu *iu, int len, -+ enum srp_send_iu_type iu_type) -+{ -+ struct ib_sge list; -+ struct ib_send_wr wr, *bad_wr; -+ int ret = 0; -+ -+ list.addr = iu->dma; -+ list.length = len; -+ list.lkey = target->srp_host->srp_dev->mr->lkey; -+ -+ wr.next = NULL; -+ wr.wr_id = target->tx_head & SRP_SQ_MASK; -+ wr.sg_list = &list; -+ wr.num_sge = 1; -+ wr.opcode = IB_WR_SEND; -+ wr.send_flags = IB_SEND_SIGNALED; -+ -+ ret = ib_post_send(target->qp, &wr, &bad_wr); -+ -+ if (!ret) { -+ ++target->tx_head; -+ if (iu_type == SRP_SEND_REQ) -+ --target->req_lim; -+ } -+ -+ return ret; -+} -+ -+/* - * Must be called with target->scsi_host->host_lock locked to protect - * target->req_lim. - */ -@@ -1068,72 +1129,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) - } - } - --/* -- * Must be called with target->scsi_host->host_lock held to protect -- * req_lim and tx_head. Lock cannot be dropped between call here and -- * call to __srp_post_send(). -- * -- * Note: -- * An upper limit for the number of allocated information units for each -- * request type is: -- * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues -- * more than Scsi_Host.can_queue requests. -- * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. -- * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than -- * one unanswered SRP request to an initiator. -- */ --static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, -- enum srp_tx_iu_type iu_type) --{ -- s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; -- -- srp_send_completion(target->send_cq, target); -- -- if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) -- return NULL; -- -- if (iu_type != SRP_IU_RSP && target->req_lim <= rsv) { -- ++target->zero_req_lim; -- return NULL; -- } -- -- return target->tx_ring[target->tx_head & SRP_SQ_MASK]; --} -- --/* -- * Must be called with target->scsi_host->host_lock held to protect -- * req_lim and tx_head. -- */ --static int __srp_post_send(struct srp_target_port *target, -- struct srp_iu *iu, int len, -- enum srp_send_iu_type iu_type) --{ -- struct ib_sge list; -- struct ib_send_wr wr, *bad_wr; -- int ret = 0; -- -- list.addr = iu->dma; -- list.length = len; -- list.lkey = target->srp_host->srp_dev->mr->lkey; -- -- wr.next = NULL; -- wr.wr_id = target->tx_head & SRP_SQ_MASK; -- wr.sg_list = &list; -- wr.num_sge = 1; -- wr.opcode = IB_WR_SEND; -- wr.send_flags = IB_SEND_SIGNALED; -- -- ret = ib_post_send(target->qp, &wr, &bad_wr); -- -- if (!ret) { -- ++target->tx_head; -- if (iu_type == SRP_SEND_REQ) -- --target->req_lim; -- } -- -- return ret; --} -- - static int srp_queuecommand(struct scsi_cmnd *scmnd, - void (*done)(struct scsi_cmnd *)) - { -EOF - ;; - 143411) - cat <max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); - target->req_lim = be32_to_cpu(rsp->req_lim_delta); - -- target->scsi_host->can_queue = min(target->req_lim, -- target->scsi_host->can_queue); -+ /* -+ * Set can_queue such that we don't needlessly -+ * bounce requests back to the SCSI mid-layer. -+ */ -+ target->scsi_host->can_queue -+ = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, -+ target->scsi_host->can_queue); - } else { - shost_printk(KERN_WARNING, target->scsi_host, - PFX "Unhandled RSP opcode %#x\n", opcode); -EOF - ;; - 143421) - cat <dma, srp_max_iu_len, - DMA_TO_DEVICE); - -- req = list_entry(target->free_reqs.next, struct srp_request, list); -+ req = list_first_entry(&target->free_reqs, struct srp_request, list); - - scmnd->scsi_done = done; - scmnd->result = 0; -EOF - ;; - *) - echo ERROR - ;; - esac -} - -get_locking_per_lun_patch() { -cat <done); - } - -+static bool srp_port_change_state(struct srp_target_port *target, -+ enum srp_target_state old, -+ enum srp_target_state new) -+{ -+ unsigned long flags; -+ bool ret; -+ -+ spin_lock_irqsave(&target->lock, flags); -+ if (target->state == old) { -+ target->state = new; -+ ret = true; -+ } else -+ ret = false; -+ spin_unlock_irqrestore(&target->lock, flags); -+ return ret; -+} -+ - static void srp_remove_work(struct work_struct *work) - { - struct srp_target_port *target = - container_of(work, struct srp_target_port, work); - -- spin_lock_irq(target->scsi_host->host_lock); -- if (target->state != SRP_TARGET_DEAD) { -- spin_unlock_irq(target->scsi_host->host_lock); -+ if (!srp_port_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED)) - return; -- } -- target->state = SRP_TARGET_REMOVED; -- spin_unlock_irq(target->scsi_host->host_lock); - - spin_lock(&target->srp_host->target_lock); - list_del(&target->list); -@@ -541,8 +554,13 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, - - static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) - { -+ unsigned long flags; -+ - srp_unmap_data(req->scmnd, target, req); -- list_move_tail(&req->list, &target->free_reqs); -+ req->scmnd = NULL; -+ spin_lock_irqsave(&target->lock, flags); -+ list_add_tail(&req->list, &target->free_reqs); -+ spin_unlock_irqrestore(&target->lock, flags); - } - - static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) -@@ -555,17 +573,14 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re - static int srp_reconnect_target(struct srp_target_port *target) - { - struct ib_qp_attr qp_attr; -- struct srp_request *req, *tmp; - struct ib_wc wc; - int ret; -+ int i; - -- spin_lock_irq(target->scsi_host->host_lock); -- if (target->state != SRP_TARGET_LIVE) { -- spin_unlock_irq(target->scsi_host->host_lock); -+ if (!srp_port_change_state(target, SRP_TARGET_LIVE, -+ SRP_TARGET_CONNECTING)) { - return -EAGAIN; - } -- target->state = SRP_TARGET_CONNECTING; -- spin_unlock_irq(target->scsi_host->host_lock); - - srp_disconnect_target(target); - /* -@@ -590,27 +605,20 @@ static int srp_reconnect_target(struct srp_target_port *target) - while (ib_poll_cq(target->send_cq, 1, &wc) > 0) - ; /* nothing */ - -- spin_lock_irq(target->scsi_host->host_lock); -- list_for_each_entry_safe(req, tmp, &target->req_queue, list) -- srp_reset_req(target, req); -- spin_unlock_irq(target->scsi_host->host_lock); -- -- target->rx_head = 0; -- target->tx_head = 0; -- target->tx_tail = 0; -+ for (i = 0; i < ARRAY_SIZE(target->req_ring); ++i) -+ if (target->req_ring[i].scmnd) -+ srp_reset_req(target, &target->req_ring[i]); - - target->qp_in_error = 0; - ret = srp_connect_target(target); - if (ret) - goto err; - -- spin_lock_irq(target->scsi_host->host_lock); -- if (target->state == SRP_TARGET_CONNECTING) { -+ if (srp_port_change_state(target, SRP_TARGET_CONNECTING, -+ SRP_TARGET_LIVE)) - ret = 0; -- target->state = SRP_TARGET_LIVE; -- } else -+ else - ret = -EAGAIN; -- spin_unlock_irq(target->scsi_host->host_lock); - - return ret; - -@@ -624,13 +632,11 @@ err: - * be in the context of the SCSI error handler now, which - * would deadlock if we call scsi_remove_host(). - */ -- spin_lock_irq(target->scsi_host->host_lock); -- if (target->state == SRP_TARGET_CONNECTING) { -- target->state = SRP_TARGET_DEAD; -+ if (srp_port_change_state(target, SRP_TARGET_CONNECTING, -+ SRP_TARGET_DEAD)) { - INIT_WORK(&target->work, srp_remove_work); - schedule_work(&target->work); - } -- spin_unlock_irq(target->scsi_host->host_lock); - - return ret; - } -@@ -811,20 +817,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, - return len; - } - --static int srp_post_recv(struct srp_target_port *target) -+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) - { -- unsigned long flags; -- struct srp_iu *iu; - struct ib_sge list; - struct ib_recv_wr wr, *bad_wr; -- unsigned int next; -- int ret; -- -- spin_lock_irqsave(target->scsi_host->host_lock, flags); - -- next = target->rx_head & SRP_RQ_MASK; -- wr.wr_id = next; -- iu = target->rx_ring[next]; -+ wr.wr_id = iu->index; - - list.addr = iu->dma; - list.length = iu->size; -@@ -834,13 +832,7 @@ static int srp_post_recv(struct srp_target_port *target) - wr.sg_list = &list; - wr.num_sge = 1; - -- ret = ib_post_recv(target->qp, &wr, &bad_wr); -- if (!ret) -- ++target->rx_head; -- -- spin_unlock_irqrestore(target->scsi_host->host_lock, flags); -- -- return ret; -+ return ib_post_recv(target->qp, &wr, &bad_wr); - } - - static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) -@@ -852,9 +844,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) - - delta = (s32) be32_to_cpu(rsp->req_lim_delta); - -- spin_lock_irqsave(target->scsi_host->host_lock, flags); -- -+ spin_lock_irqsave(&target->lock, flags); - target->req_lim += delta; -+ spin_unlock_irqrestore(&target->lock, flags); - - req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; - -@@ -892,14 +884,12 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) - } else - req->cmd_done = 1; - } -- -- spin_unlock_irqrestore(target->scsi_host->host_lock, flags); - } - - /* -- * Must be called with target->scsi_host->host_lock held to protect -- * req_lim and tx_head. Lock cannot be dropped between call here and -- * call to __srp_post_send(). -+ * It is the responsability of the caller to put back the returned information -+ * unit in the target->tx_free list and to increment target->req_lim if an -+ * error occurs before __srp_post_send() is called. - * - * Note: - * An upper limit for the number of allocated information units for each -@@ -914,24 +904,29 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, - enum srp_tx_iu_type iu_type) - { - s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; -+ struct srp_iu *res = NULL; -+ unsigned long flags; - -- srp_send_completion(target->send_cq, target); -+ spin_lock_irqsave(&target->lock, flags); - -- if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) -- return NULL; -+ __srp_send_completion(target->send_cq, target); -+ -+ if (list_empty(&target->tx_free)) -+ goto out; - - if (iu_type != SRP_IU_RSP && target->req_lim <= rsv) { - ++target->zero_req_lim; -- return NULL; -+ goto out; - } - -- return target->tx_ring[target->tx_head & SRP_SQ_MASK]; -+ --target->req_lim; -+ res = list_first_entry(&target->tx_free, struct srp_iu, list); -+ list_del(&res->list); -+out: -+ spin_unlock_irqrestore(&target->lock, flags); -+ return res; - } - --/* -- * Must be called with target->scsi_host->host_lock held to protect -- * req_lim and tx_head. -- */ - static int __srp_post_send(struct srp_target_port *target, - struct srp_iu *iu, int len, - enum srp_send_iu_type iu_type) -@@ -945,7 +940,7 @@ static int __srp_post_send(struct srp_target_port *target, - list.lkey = target->srp_host->srp_dev->mr->lkey; - - wr.next = NULL; -- wr.wr_id = target->tx_head & SRP_SQ_MASK; -+ wr.wr_id = iu->index; - wr.sg_list = &list; - wr.num_sge = 1; - wr.opcode = IB_WR_SEND; -@@ -953,43 +948,45 @@ static int __srp_post_send(struct srp_target_port *target, - - ret = ib_post_send(target->qp, &wr, &bad_wr); - -- if (!ret) { -- ++target->tx_head; -+ if (ret) { -+ unsigned long flags; -+ -+ spin_lock_irqsave(&target->lock, flags); - if (iu_type == SRP_SEND_REQ) -- --target->req_lim; -+ target->req_lim++; -+ list_add(&iu->list, &target->tx_free); -+ spin_unlock_irqrestore(&target->lock, flags); - } - - return ret; - } - --/* -- * Must be called with target->scsi_host->host_lock locked to protect -- * target->req_lim. -- */ --static void __srp_handle_cred_req(struct srp_target_port *target, -- void *req_ptr, void *rsp_ptr) -+static void srp_handle_cred_req(struct srp_target_port *target, -+ void *req_ptr, void *rsp_ptr) - { -+ unsigned long flags; - struct srp_cred_req *req = req_ptr; - struct srp_cred_rsp *rsp = rsp_ptr; - -+ spin_lock_irqsave(&target->lock, flags); - target->req_lim += be32_to_cpu(req->req_lim_delta); -+ spin_unlock_irqrestore(&target->lock, flags); - - memset(rsp, 0, sizeof *rsp); - rsp->opcode = SRP_CRED_RSP; - rsp->tag = req->tag; - } - --/* -- * Must be called with target->scsi_host->host_lock locked to protect -- * target->req_lim. -- */ --static void __srp_handle_aer_req(struct srp_target_port *target, -- void *req_ptr, void *rsp_ptr) -+static void srp_handle_aer_req(struct srp_target_port *target, -+ void *req_ptr, void *rsp_ptr) - { -+ unsigned long flags; - struct srp_aer_req *req = req_ptr; - struct srp_aer_rsp *rsp = rsp_ptr; - -+ spin_lock_irqsave(&target->lock, flags); - target->req_lim += be32_to_cpu(req->req_lim_delta); -+ spin_unlock_irqrestore(&target->lock, flags); - - shost_printk(KERN_ERR, target->scsi_host, - PFX "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); -@@ -1006,7 +1003,6 @@ static void srp_handle_req(struct srp_target_port *target, - { - struct ib_device *dev; - u8 *req_buf; -- unsigned long flags; - struct srp_iu *rsp_iu; - u8 *rsp_buf; - int res; -@@ -1014,11 +1010,9 @@ static void srp_handle_req(struct srp_target_port *target, - dev = target->srp_host->srp_dev->dev; - req_buf = req_iu->buf; - -- spin_lock_irqsave(target->scsi_host->host_lock, flags); -- - rsp_iu = __srp_get_tx_iu(target, SRP_IU_RSP); - if (!rsp_iu) -- goto out_unlock; -+ return; - - rsp_buf = rsp_iu->buf; - -@@ -1031,9 +1025,6 @@ static void srp_handle_req(struct srp_target_port *target, - if (res) - shost_printk(KERN_ERR, target->scsi_host, - PFX "Sending response failed -- res = %d\n", res); -- --out_unlock: -- spin_unlock_irqrestore(target->scsi_host->host_lock, flags); - } - - static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) -@@ -1070,11 +1061,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - break; - - case SRP_CRED_REQ: -- srp_handle_req(target, iu, __srp_handle_cred_req); -+ srp_handle_req(target, iu, srp_handle_cred_req); - break; - - case SRP_AER_REQ: -- srp_handle_req(target, iu, __srp_handle_aer_req); -+ srp_handle_req(target, iu, srp_handle_aer_req); - break; - - default: -@@ -1086,7 +1077,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, - DMA_FROM_DEVICE); - -- res = srp_post_recv(target); -+ res = srp_post_recv(target, iu); - if (res != 0) - shost_printk(KERN_ERR, target->scsi_host, - PFX "Recv failed with error code %d\n", res); -@@ -1095,38 +1086,59 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) - static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) - { - struct srp_target_port *target = target_ptr; -- struct ib_wc wc; -+ struct ib_wc *const wc = target->recv_wc; -+ int i, n; - - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); -- while (ib_poll_cq(cq, 1, &wc) > 0) { -- if (wc.status) { -- shost_printk(KERN_ERR, target->scsi_host, -- PFX "failed receive status %d\n", -- wc.status); -- target->qp_in_error = 1; -- break; -- } -+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(target->recv_wc), wc)) > 0) { -+ for (i = 0; i < n; ++i) { -+ if (wc[i].status) { -+ shost_printk(KERN_ERR, target->scsi_host, -+ PFX "failed receive status %d\n", -+ wc[i].status); -+ target->qp_in_error = 1; -+ goto out; -+ } - -- srp_handle_recv(target, &wc); -+ srp_handle_recv(target, &wc[i]); -+ } - } -+out: -+ return; - } - --static void srp_send_completion(struct ib_cq *cq, void *target_ptr) -+static void __srp_send_completion(struct ib_cq *cq, void *target_ptr) - { - struct srp_target_port *target = target_ptr; -- struct ib_wc wc; -+ struct ib_wc *const wc = target->send_wc; -+ int i, n; -+ -+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(target->send_wc), wc)) > 0) { -+ for (i = 0; i < n; ++i) { -+ if (wc[i].status) { -+ shost_printk(KERN_ERR, target->scsi_host, -+ PFX "failed send status %d\n", -+ wc[i].status); -+ target->qp_in_error = 1; -+ goto out; -+ } - -- while (ib_poll_cq(cq, 1, &wc) > 0) { -- if (wc.status) { -- shost_printk(KERN_ERR, target->scsi_host, -- PFX "failed send status %d\n", -- wc.status); -- target->qp_in_error = 1; -- break; -+ list_add_tail(&target->tx_ring[wc[i].wr_id]->list, -+ &target->tx_free); - } -- -- ++target->tx_tail; - } -+out: -+ return; -+} -+ -+static void srp_send_completion(struct ib_cq *cq, void *target_ptr) -+{ -+ struct srp_target_port *target = target_ptr; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&target->lock, flags); -+ __srp_send_completion(cq, target_ptr); -+ spin_unlock_irqrestore(&target->lock, flags); - } - - static int srp_queuecommand(struct scsi_cmnd *scmnd, -@@ -1138,6 +1150,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - struct srp_cmd *cmd; - struct ib_device *dev; - int len; -+ unsigned long flags; - - if (target->state == SRP_TARGET_CONNECTING) - goto err; -@@ -1157,7 +1170,10 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, - DMA_TO_DEVICE); - -+ spin_lock_irqsave(&target->lock, flags); - req = list_first_entry(&target->free_reqs, struct srp_request, list); -+ list_del(&req->list); -+ spin_unlock_irqrestore(&target->lock, flags); - - scmnd->scsi_done = done; - scmnd->result = 0; -@@ -1180,7 +1196,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - if (len < 0) { - shost_printk(KERN_ERR, target->scsi_host, - PFX "Failed to map data\n"); -- goto err; -+ goto err_putback; - } - - ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, -@@ -1188,16 +1204,22 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, - - if (__srp_post_send(target, iu, len, SRP_SEND_REQ)) { - shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); -+ spin_lock_irqsave(&target->lock, flags); -+ list_del(&req->list); -+ spin_unlock_irqrestore(&target->lock, flags); - goto err_unmap; - } - -- list_move_tail(&req->list, &target->req_queue); -- - return 0; - - err_unmap: - srp_unmap_data(scmnd, target, req); - -+err_putback: -+ spin_lock_irqsave(&target->lock, flags); -+ list_add(&req->list, &target->free_reqs); -+ spin_unlock_irqrestore(&target->lock, flags); -+ - err: - return SCSI_MLQUEUE_HOST_BUSY; - } -@@ -1212,14 +1234,19 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) - GFP_KERNEL, DMA_FROM_DEVICE); - if (!target->rx_ring[i]) - goto err; -+ target->rx_ring[i]->index = i; - } - -+ INIT_LIST_HEAD(&target->tx_free); -+ - for (i = 0; i < SRP_SQ_SIZE; ++i) { - target->tx_ring[i] = srp_alloc_iu(target->srp_host, - srp_max_iu_len, - GFP_KERNEL, DMA_TO_DEVICE); - if (!target->tx_ring[i]) - goto err; -+ target->tx_ring[i]->index = i; -+ list_add_tail(&target->tx_ring[i]->list, &target->tx_free); - } - - return 0; -@@ -1381,7 +1408,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) - break; - - for (i = 0; i < SRP_RQ_SIZE; i++) { -- target->status = srp_post_recv(target); -+ target->status = srp_post_recv(target, -+ target->rx_ring[i]); - if (target->status) - break; - } -@@ -1451,8 +1479,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, - struct srp_iu *iu; - struct srp_tsk_mgmt *tsk_mgmt; - -- spin_lock_irq(target->scsi_host->host_lock); -- - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) { - req->scmnd->result = DID_BAD_TARGET << 16; -@@ -1479,8 +1505,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, - - req->tsk_mgmt = iu; - -- spin_unlock_irq(target->scsi_host->host_lock); -- - if (!wait_for_completion_timeout(&req->done, - msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) - return -1; -@@ -1488,7 +1512,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, - return 0; - - out: -- spin_unlock_irq(target->scsi_host->host_lock); - return -1; - } - -@@ -1519,8 +1542,6 @@ static int srp_abort(struct scsi_cmnd *scmnd) - if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) - return FAILED; - -- spin_lock_irq(target->scsi_host->host_lock); -- - if (req->cmd_done) { - srp_remove_req(target, req); - scmnd->scsi_done(scmnd); -@@ -1530,15 +1551,14 @@ static int srp_abort(struct scsi_cmnd *scmnd) - } else - ret = FAILED; - -- spin_unlock_irq(target->scsi_host->host_lock); -- - return ret; - } - - static int srp_reset_device(struct scsi_cmnd *scmnd) - { - struct srp_target_port *target = host_to_target(scmnd->device->host); -- struct srp_request *req, *tmp; -+ struct srp_request *req; -+ int i; - - shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); - -@@ -1551,14 +1571,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) - if (req->tsk_status) - return FAILED; - -- spin_lock_irq(target->scsi_host->host_lock); -- -- list_for_each_entry_safe(req, tmp, &target->req_queue, list) -- if (req->scmnd->device == scmnd->device) -+ for (i = 0; i < ARRAY_SIZE(target->req_ring); ++i) -+ if (req->scmnd && req->scmnd->device == scmnd->device) - srp_reset_req(target, req); - -- spin_unlock_irq(target->scsi_host->host_lock); -- - return SUCCESS; - } - -@@ -1575,6 +1591,26 @@ static int srp_reset_host(struct scsi_cmnd *scmnd) - return ret; - } - -+static ssize_t show_max_host_blocked(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ -+ return sprintf(buf, "%d\n", max(1U, host->max_host_blocked)); -+} -+ -+ -+static ssize_t set_max_host_blocked(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct Scsi_Host *host = class_to_shost(dev); -+ -+ host->max_host_blocked = max(1UL, simple_strtoul(buf, NULL, 0)); -+ return count; -+} -+ - static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, - char *buf) - { -@@ -1690,6 +1726,8 @@ static ssize_t show_local_ib_device(struct device *dev, - return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); - } - -+static DEVICE_ATTR(max_host_blocked, S_IWUSR | S_IRUGO, -+ show_max_host_blocked, set_max_host_blocked); - static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); - static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); - static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); -@@ -1702,6 +1740,7 @@ static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); - static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); - - static struct device_attribute *srp_host_attrs[] = { -+ &dev_attr_max_host_blocked, - &dev_attr_id_ext, - &dev_attr_ioc_guid, - &dev_attr_service_id, -@@ -1981,6 +2020,7 @@ static ssize_t srp_create_target(struct device *dev, - target_host->transportt = ib_srp_transport_template; - target_host->max_lun = SRP_MAX_LUN; - target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; -+ target_host->unlocked_qcmds = true; - - target = host_to_target(target_host); - -@@ -1988,8 +2028,9 @@ static ssize_t srp_create_target(struct device *dev, - target->scsi_host = target_host; - target->srp_host = host; - -+ spin_lock_init(&target->lock); -+ - INIT_LIST_HEAD(&target->free_reqs); -- INIT_LIST_HEAD(&target->req_queue); - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { - target->req_ring[i].index = i; - list_add_tail(&target->req_ring[i].list, &target->free_reqs); -@@ -2200,6 +2241,7 @@ static void srp_remove_one(struct ib_device *device) - struct srp_host *host, *tmp_host; - LIST_HEAD(target_list); - struct srp_target_port *target, *tmp_target; -+ unsigned long flags; - - srp_dev = ib_get_client_data(device, &srp_client); - -@@ -2217,9 +2259,9 @@ static void srp_remove_one(struct ib_device *device) - */ - spin_lock(&host->target_lock); - list_for_each_entry(target, &host->target_list, list) { -- spin_lock_irq(target->scsi_host->host_lock); -+ spin_lock_irqsave(&target->lock, flags); - target->state = SRP_TARGET_REMOVED; -- spin_unlock_irq(target->scsi_host->host_lock); -+ spin_unlock_irqrestore(&target->lock, flags); - } - spin_unlock(&host->target_lock); - -diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h -index 854ec81..1d5ab5d 100644 ---- a/drivers/infiniband/ulp/srp/ib_srp.h -+++ b/drivers/infiniband/ulp/srp/ib_srp.h -@@ -146,21 +146,23 @@ struct srp_target_port { - struct ib_cq *recv_cq; - struct ib_cq *send_cq; - struct ib_qp *qp; -+ struct ib_wc send_wc[16]; -+ struct ib_wc recv_wc[16]; - - int max_ti_iu_len; -+ -+ spinlock_t lock; -+ - s32 req_lim; - - int zero_req_lim; - -- unsigned rx_head; - struct srp_iu *rx_ring[SRP_RQ_SIZE]; - -- unsigned tx_head; -- unsigned tx_tail; -+ struct list_head tx_free; - struct srp_iu *tx_ring[SRP_SQ_SIZE]; - - struct list_head free_reqs; -- struct list_head req_queue; - struct srp_request req_ring[SRP_CMD_SQ_SIZE]; - - struct work_struct work; -@@ -173,9 +175,11 @@ struct srp_target_port { - }; - - struct srp_iu { -+ struct list_head list; - u64 dma; - void *buf; - size_t size; -+ short index; - enum dma_data_direction direction; - }; - -diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c -index ad0ed21..2f9110c 100644 ---- a/drivers/scsi/scsi.c -+++ b/drivers/scsi/scsi.c -@@ -737,23 +737,31 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) - goto out; - } - -- spin_lock_irqsave(host->host_lock, flags); -- /* -- * AK: unlikely race here: for some reason the timer could -- * expire before the serial number is set up below. -- * -- * TODO: kill serial or move to blk layer -- */ -- scsi_cmd_get_serial(host, cmd); -+ if (!host->unlocked_qcmds) { -+ spin_lock_irqsave(host->host_lock, flags); -+ /* -+ * AK: unlikely race here: for some reason the timer could -+ * expire before the serial number is set up below. -+ * -+ * TODO: kill serial or move to blk layer -+ */ -+ scsi_cmd_get_serial(host, cmd); -+ } else -+ cmd->serial_number = 1; - - if (unlikely(host->shost_state == SHOST_DEL)) { -+ if (host->unlocked_qcmds) -+ spin_lock_irqsave(host->host_lock, flags); - cmd->result = (DID_NO_CONNECT << 16); - scsi_done(cmd); -+ spin_unlock_irqrestore(host->host_lock, flags); - } else { - trace_scsi_dispatch_cmd_start(cmd); - rtn = host->hostt->queuecommand(cmd, scsi_done); -+ if (!host->unlocked_qcmds) -+ spin_unlock_irqrestore(host->host_lock, flags); - } -- spin_unlock_irqrestore(host->host_lock, flags); -+ - if (rtn) { - trace_scsi_dispatch_cmd_error(cmd, rtn); - if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && -diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h -index b7bdecb..1814c51 100644 ---- a/include/scsi/scsi_host.h -+++ b/include/scsi/scsi_host.h -@@ -636,6 +636,9 @@ struct Scsi_Host { - /* Asynchronous scan in progress */ - unsigned async_scan:1; - -+ /* call queuecommand without Scsi_Host lock held */ -+ unsigned unlocked_qcmds:1; -+ - /* - * Optional work queue to be utilized by the transport - */ - -EOF -} - -# Source: http://git.kernel.dk/?p=linux-2.6-block.git;a=patch;h=$commit -get_block_layer_patch() { -case "$1" in - 5a00f237eb167a5e98d5f3bb56e11e4da406a5bc) - cat <nr_batch_requests == q->nr_batching || -- (ioc->nr_batch_requests > 0 -- && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); --} -- --/* -- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This -- * will cause the process to be a "batcher" on all queues in the system. This -- * is the behaviour we want though - once it gets a wakeup it should be given -- * a nice run. -- */ --static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) --{ -- if (!ioc || ioc_batching(q, ioc)) -- return; -- -- ioc->nr_batch_requests = q->nr_batching; -- ioc->last_waited = jiffies; --} -- - static void __freed_request(struct request_queue *q, int sync) - { - struct request_list *rl = &q->rq; -@@ -749,7 +715,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags, - { - struct request *rq = NULL; - struct request_list *rl = &q->rq; -- struct io_context *ioc = NULL; - const bool is_sync = rw_is_sync(rw_flags) != 0; - int may_queue, priv; - -@@ -757,41 +722,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags, - if (may_queue == ELV_MQUEUE_NO) - goto rq_starved; - -- if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { -- if (rl->count[is_sync]+1 >= q->nr_requests) { -- ioc = current_io_context(GFP_ATOMIC, q->node); -- /* -- * The queue will fill after this allocation, so set -- * it as full, and mark this process as "batching". -- * This process will be allowed to complete a batch of -- * requests, others will be blocked. -- */ -- if (!blk_queue_full(q, is_sync)) { -- ioc_set_batching(q, ioc); -- blk_set_queue_full(q, is_sync); -- } else { -- if (may_queue != ELV_MQUEUE_MUST -- && !ioc_batching(q, ioc)) { -- /* -- * The queue is full and the allocating -- * process is not a "batcher", and not -- * exempted by the IO scheduler -- */ -- goto out; -- } -- } -- } -- blk_set_queue_congested(q, is_sync); -- } -- -- /* -- * Only allow batching queuers to allocate up to 50% over the defined -- * limit of requests, otherwise we could have thousands of requests -- * allocated with any setting of ->nr_requests -- */ -- if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) -- goto out; -- - rl->count[is_sync]++; - rl->starved[is_sync] = 0; - -@@ -829,15 +759,6 @@ rq_starved: - goto out; - } - -- /* -- * ioc may be NULL here, and ioc_batching will be false. That's -- * OK, if the queue is under the request limit then requests need -- * not count toward the nr_batch_requests limit. There will always -- * be some limit enforced by BLK_BATCH_TIME. -- */ -- if (ioc_batching(q, ioc)) -- ioc->nr_batch_requests--; -- - trace_block_getrq(q, bio, rw_flags & 1); - out: - return rq; -@@ -858,7 +779,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, - rq = get_request(q, rw_flags, bio, GFP_NOIO); - while (!rq) { - DEFINE_WAIT(wait); -- struct io_context *ioc; - struct request_list *rl = &q->rq; - - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, -@@ -870,15 +790,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, - spin_unlock_irq(q->queue_lock); - io_schedule(); - -- /* -- * After sleeping, we become a "batching" process and -- * will be able to allocate at least one request, and -- * up to a big batch of them for a small period time. -- * See ioc_batching, ioc_set_batching -- */ -- ioc = current_io_context(GFP_NOIO, q->node); -- ioc_set_batching(q, ioc); -- - spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); - -diff --git a/block/blk-ioc.c b/block/blk-ioc.c -index d22c4c5..49beb97 100644 ---- a/block/blk-ioc.c -+++ b/block/blk-ioc.c -@@ -92,8 +92,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) - spin_lock_init(&ret->lock); - ret->ioprio_changed = 0; - ret->ioprio = 0; -- ret->last_waited = 0; /* doesn't matter... */ -- ret->nr_batch_requests = 0; /* because this is 0 */ - INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); - INIT_HLIST_HEAD(&ret->cic_list); - ret->ioc_data = NULL; -diff --git a/block/blk-settings.c b/block/blk-settings.c -index a234f4b..4ce5d80 100644 ---- a/block/blk-settings.c -+++ b/block/blk-settings.c -@@ -161,7 +161,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) - q->make_request_fn = mfn; - blk_queue_dma_alignment(q, 511); - blk_queue_congestion_threshold(q); -- q->nr_batching = BLK_BATCH_REQ; - - q->unplug_thresh = 4; /* hmm */ - q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ -diff --git a/block/blk.h b/block/blk.h -index d6b911a..709e351 100644 ---- a/block/blk.h -+++ b/block/blk.h -@@ -1,12 +1,6 @@ - #ifndef BLK_INTERNAL_H - #define BLK_INTERNAL_H - --/* Amount of time in which a process may batch requests */ --#define BLK_BATCH_TIME (HZ/50UL) -- --/* Number of requests a "batching" process may submit */ --#define BLK_BATCH_REQ 32 -- - extern struct kmem_cache *blk_requestq_cachep; - extern struct kobj_type blk_queue_ktype; - -diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c -index dd318ff..2ad9087 100644 ---- a/fs/btrfs/volumes.c -+++ b/fs/btrfs/volumes.c -@@ -164,7 +164,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) - unsigned long num_sync_run; - unsigned long batch_run = 0; - unsigned long limit; -- unsigned long last_waited = 0; - int force_reg = 0; - - bdi = blk_get_backing_dev_info(device->bdev); -@@ -279,39 +278,6 @@ loop_lock: - */ - if (pending && bdi_write_congested(bdi) && batch_run > 8 && - fs_info->fs_devices->open_devices > 1) { -- struct io_context *ioc; -- -- ioc = current->io_context; -- -- /* -- * the main goal here is that we don't want to -- * block if we're going to be able to submit -- * more requests without blocking. -- * -- * This code does two great things, it pokes into -- * the elevator code from a filesystem _and_ -- * it makes assumptions about how batching works. -- */ -- if (ioc && ioc->nr_batch_requests > 0 && -- time_before(jiffies, ioc->last_waited + HZ/50UL) && -- (last_waited == 0 || -- ioc->last_waited == last_waited)) { -- /* -- * we want to go through our batch of -- * requests and stop. So, we copy out -- * the ioc->last_waited time and test -- * against it before looping -- */ -- last_waited = ioc->last_waited; -- if (need_resched()) { -- if (num_sync_run) { -- blk_run_backing_dev(bdi, NULL); -- num_sync_run = 0; -- } -- cond_resched(); -- } -- continue; -- } - spin_lock(&device->io_lock); - requeue_list(pending_bios, pending, tail); - device->running_pending = 1; -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 2c54906..2fc7917 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -326,7 +326,6 @@ struct request_queue - unsigned long nr_requests; /* Max # of requests */ - unsigned int nr_congestion_on; - unsigned int nr_congestion_off; -- unsigned int nr_batching; - - void *dma_drain_buffer; - unsigned int dma_drain_size; -diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h -index 64d5291..e8c9165 100644 ---- a/include/linux/iocontext.h -+++ b/include/linux/iocontext.h -@@ -45,12 +45,6 @@ struct io_context { - unsigned short cgroup_changed; - #endif - -- /* -- * For request batching -- */ -- int nr_batch_requests; /* Number of requests left in the batch */ -- unsigned long last_waited; /* Time last woken after wait for request */ -- - struct radix_tree_root radix_root; - struct hlist_head cic_list; - void *ioc_data; --- -1.7.1.426.gb436 - -EOF - ;; - 76241c12f6a730241b9fa6a795dff55f826ce391) - cat <rq; - -- if (unlikely(rl->rq_pool)) -+ if (unlikely(rl->rq_pool[0])) - return 0; - - rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; -@@ -476,11 +476,17 @@ static int blk_init_free_list(struct request_queue *q) - init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); - init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); - -- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, -+ rl->rq_pool[0] = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, q->node); -+ if (!rl->rq_pool[0]) -+ return -ENOMEM; - -- if (!rl->rq_pool) -+ rl->rq_pool[1] = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, -+ mempool_free_slab, request_cachep, q->node); -+ if (!rl->rq_pool[1]) { -+ mempool_destroy(rl->rq_pool[0]); - return -ENOMEM; -+ } - - return 0; - } -@@ -644,16 +650,21 @@ int blk_get_queue(struct request_queue *q) - - static inline void blk_free_request(struct request_queue *q, struct request *rq) - { -+ const bool is_sync = rq_is_sync(rq) != 0; -+ - if (rq->cmd_flags & REQ_ELVPRIV) - elv_put_request(q, rq); -- mempool_free(rq, q->rq.rq_pool); -+ -+ mempool_free(rq, q->rq.rq_pool[is_sync]); - } - - static struct request * - blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) - { -- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); -+ const bool is_sync = rw_is_sync(flags) != 0; -+ struct request *rq; - -+ rq = mempool_alloc(q->rq.rq_pool[is_sync], gfp_mask); - if (!rq) - return NULL; - -@@ -663,7 +674,7 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) - - if (priv) { - if (unlikely(elv_set_request(q, rq, gfp_mask))) { -- mempool_free(rq, q->rq.rq_pool); -+ mempool_free(rq, q->rq.rq_pool[is_sync]); - return NULL; - } - rq->cmd_flags |= REQ_ELVPRIV; -diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c -index 0749b89..ee44ce5 100644 ---- a/block/blk-sysfs.c -+++ b/block/blk-sysfs.c -@@ -460,8 +460,10 @@ static void blk_release_queue(struct kobject *kobj) - - blk_sync_queue(q); - -- if (rl->rq_pool) -- mempool_destroy(rl->rq_pool); -+ if (rl->rq_pool[0]) -+ mempool_destroy(rl->rq_pool[0]); -+ if (rl->rq_pool[1]) -+ mempool_destroy(rl->rq_pool[1]); - - if (q->queue_tags) - __blk_queue_free_tags(q); -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 2fc7917..858235d 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -45,7 +45,7 @@ struct request_list { - int count[2]; - int starved[2]; - int elvpriv; -- mempool_t *rq_pool; -+ mempool_t *rq_pool[2]; - wait_queue_head_t wait[2]; - }; - --- -1.7.1.426.gb436 - -EOF - ;; - 35f2046ac858ca165a8aba477c9236e53a8dbffa) - cat <count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; -- rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; - init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); - init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); -@@ -711,9 +710,6 @@ static void freed_request(struct request_queue *q, int sync, int priv) - rl->elvpriv--; - - __freed_request(q, sync); -- -- if (unlikely(rl->starved[sync ^ 1])) -- __freed_request(q, sync ^ 1); - } - - /* -@@ -731,10 +727,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, - - may_queue = elv_may_queue(q, rw_flags); - if (may_queue == ELV_MQUEUE_NO) -- goto rq_starved; -+ goto out; - - rl->count[is_sync]++; -- rl->starved[is_sync] = 0; - - priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); - if (priv) -@@ -755,18 +750,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags, - */ - spin_lock_irq(q->queue_lock); - freed_request(q, is_sync, priv); -- -- /* -- * in the very unlikely event that allocation failed and no -- * requests for this direction was pending, mark us starved -- * so that freeing of a request in the other direction will -- * notice us. another possible fix would be to split the -- * rq mempool into READ and WRITE -- */ --rq_starved: -- if (unlikely(rl->count[is_sync] == 0)) -- rl->starved[is_sync] = 1; -- - goto out; - } - -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 858235d..089b8a2 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -39,11 +39,10 @@ typedef void (rq_end_io_fn)(struct request *, int); - - struct request_list { - /* -- * count[], starved[], and wait[] are indexed by -+ * count[], and wait[] are indexed by - * BLK_RW_SYNC/BLK_RW_ASYNC - */ - int count[2]; -- int starved[2]; - int elvpriv; - mempool_t *rq_pool[2]; - wait_queue_head_t wait[2]; --- -1.7.1.426.gb436 - -EOF - ;; - 38bb177765247024dad4b70a2abe0044d0574998) - cat <rq; - const bool is_sync = rw_is_sync(rw_flags) != 0; -+ const bool drop_lock = (gfp_mask & __GFP_WAIT) != 0; - int may_queue, priv; - - may_queue = elv_may_queue(q, rw_flags); -@@ -737,7 +736,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, - - if (blk_queue_io_stat(q)) - rw_flags |= REQ_IO_STAT; -- spin_unlock_irq(q->queue_lock); -+ -+ if (drop_lock) -+ spin_unlock_irq(q->queue_lock); - - rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); - if (unlikely(!rq)) { -@@ -748,12 +749,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags, - * Allocating task should really be put onto the front of the - * wait queue, but this is pretty rare. - */ -- spin_lock_irq(q->queue_lock); -+ if (drop_lock) -+ spin_lock_irq(q->queue_lock); -+ - freed_request(q, is_sync, priv); - goto out; - } - - trace_block_getrq(q, bio, rw_flags & 1); -+ -+ if (drop_lock) -+ spin_lock_irq(q->queue_lock); - out: - return rq; - } -@@ -762,7 +768,7 @@ out: - * No available requests for this queue, unplug the device and wait for some - * requests to become available. - * -- * Called with q->queue_lock held, and returns with it unlocked. -+ * Called with q->queue_lock held. - */ - static struct request *get_request_wait(struct request_queue *q, int rw_flags, - struct bio *bio) -@@ -770,7 +776,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, - const bool is_sync = rw_is_sync(rw_flags) != 0; - struct request *rq; - -- rq = get_request(q, rw_flags, bio, GFP_NOIO); -+ rq = get_request(q, rw_flags, bio, GFP_ATOMIC); - while (!rq) { - DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; -@@ -800,15 +806,13 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) - BUG_ON(rw != READ && rw != WRITE); - - spin_lock_irq(q->queue_lock); -- if (gfp_mask & __GFP_WAIT) { -+ -+ if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw, NULL); -- } else { -+ else - rq = get_request(q, rw, NULL, gfp_mask); -- if (!rq) -- spin_unlock_irq(q->queue_lock); -- } -- /* q->queue_lock is unlocked at this point */ - -+ spin_unlock_irq(q->queue_lock); - return rq; - } - EXPORT_SYMBOL(blk_get_request); -@@ -1200,8 +1204,7 @@ get_rq: - rw_flags |= REQ_SYNC; - - /* -- * Grab a free request. This is might sleep but can not fail. -- * Returns with the queue unlocked. -+ * Grab a free request. - */ - req = get_request_wait(q, rw_flags, bio); - -@@ -1213,7 +1216,6 @@ get_rq: - */ - init_request_from_bio(req, bio); - -- spin_lock_irq(q->queue_lock); - if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || - bio_flagged(bio, BIO_CPU_AFFINE)) - req->cpu = blk_cpu_to_group(smp_processor_id()); -diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c -index f65c6f0..3d8635d 100644 ---- a/block/cfq-iosched.c -+++ b/block/cfq-iosched.c -@@ -2543,12 +2543,12 @@ static void cfq_put_queue(struct cfq_queue *cfqq) - cfq_put_cfqg(orig_cfqg); - } - -+typedef void (cic_call_fn)(struct io_context *, struct cfq_io_context *); -+ - /* - * Must always be called with the rcu_read_lock() held - */ --static void --__call_for_each_cic(struct io_context *ioc, -- void (*func)(struct io_context *, struct cfq_io_context *)) -+static void __call_for_each_cic(struct io_context *ioc, cic_call_fn *func) - { - struct cfq_io_context *cic; - struct hlist_node *n; -@@ -2560,9 +2560,7 @@ __call_for_each_cic(struct io_context *ioc, - /* - * Call func for each cic attached to this ioc. - */ --static void --call_for_each_cic(struct io_context *ioc, -- void (*func)(struct io_context *, struct cfq_io_context *)) -+static void call_for_each_cic(struct io_context *ioc, cic_call_fn *func) - { - rcu_read_lock(); - __call_for_each_cic(ioc, func); -@@ -2787,13 +2785,10 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) - { - struct cfq_data *cfqd = cic_to_cfqd(cic); - struct cfq_queue *cfqq; -- unsigned long flags; - - if (unlikely(!cfqd)) - return; - -- spin_lock_irqsave(cfqd->queue->queue_lock, flags); -- - cfqq = cic->cfqq[BLK_RW_ASYNC]; - if (cfqq) { - struct cfq_queue *new_cfqq; -@@ -2808,8 +2803,6 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) - cfqq = cic->cfqq[BLK_RW_SYNC]; - if (cfqq) - cfq_mark_cfqq_prio_changed(cfqq); -- -- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); - } - - static void cfq_ioc_set_ioprio(struct io_context *ioc) -@@ -3057,11 +3050,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, - - radix_tree_preload_end(); - -- if (!ret) { -- spin_lock_irqsave(cfqd->queue->queue_lock, flags); -+ if (!ret) - list_add(&cic->queue_list, &cfqd->cic_list); -- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); -- } - } - - if (ret) -@@ -3081,8 +3071,6 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) - struct io_context *ioc = NULL; - struct cfq_io_context *cic; - -- might_sleep_if(gfp_mask & __GFP_WAIT); -- - ioc = get_io_context(gfp_mask, cfqd->queue->node); - if (!ioc) - return NULL; -@@ -3633,14 +3621,10 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) - const int rw = rq_data_dir(rq); - const bool is_sync = rq_is_sync(rq); - struct cfq_queue *cfqq; -- unsigned long flags; - - might_sleep_if(gfp_mask & __GFP_WAIT); - - cic = cfq_get_io_context(cfqd, gfp_mask); -- -- spin_lock_irqsave(q->queue_lock, flags); -- - if (!cic) - goto queue_fail; - -@@ -3673,8 +3657,6 @@ new_queue: - cfqq->allocated[rw]++; - atomic_inc(&cfqq->ref); - -- spin_unlock_irqrestore(q->queue_lock, flags); -- - rq->elevator_private = cic; - rq->elevator_private2 = cfqq; - rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); -@@ -3685,7 +3667,6 @@ queue_fail: - put_io_context(cic->ioc); - - cfq_schedule_dispatch(cfqd); -- spin_unlock_irqrestore(q->queue_lock, flags); - cfq_log(cfqd, "set_request fail"); - return 1; - } --- -1.7.1.426.gb436 - -EOF - ;; - *) - echo ERROR - ;; - esac -} - - -# Argument processing - -if [ $# != 1 ]; then - usage - exit 1 -fi - -if [ "${1#[0-9]*.[0-9]*.[0-9]*.[0-9]*}" != "$1" ]; then - kernel_version="${1%.[0-9]*}" - patch_level="${1#${kernel_version}.}" -else - kernel_version="$1" -fi - -# Actual kernel source generation - -if [ -e linux-${kernel_version} -o -e linux-$1 ]; then - echo "Error: directoy not clean." - exit 1 -fi - -echo "Extracting kernel sources ..." -tar xaf $TARBALLDIR/linux-${kernel_version}.tar.bz2 || exit $? -if [ "${patch_level}" != "" ]; then - mv -i linux-${kernel_version} linux-$1 || exit $? -fi -cd linux-$1 || exit $? - -if [ "${patch_level}" != "" ]; then - patchfile="patch-${kernel_version}.${patch_level}" - echo "Applying ${patchfile} ..." - bzip2 -cd <${TARBALLDIR}/${patchfile}.bz2 | \ - patch -p1 -s -fi -if [ "${kernel_version}" "<" "2.6.36" ]; then - # IB/srp: Use print_hex_dump() - # IB/srp: Make receive buffer handling more robust - # IB/srp: Export req_lim via sysfs - for commit in \ - 7a7008110b94dfaa90db4b0cc5b0c3f964c80506 \ - c996bb47bb419b7c2f75499e11750142775e5da9 \ - 89de74866b846cc48780fda3de7fd223296aaca9 - do - echo "Applying patch $commit ..." - get_2_6_36_patch $commit | patch -p1 -s - done -fi -# IB/srp: Preparation for transmit ring response allocation -# IB/srp: Implement SRP_CRED_REQ and SRP_AER_REQ -# IB/srp: Eliminate two forward declarations -# IB/srp: Reduce number of BUSY conditions -# IB/srp: Introduce list_first_entry() -for p in \ - 143381 \ - 143391 \ - 143401 \ - 143411 \ - 143421 -do - echo "Applying patch $p ..." - get_2_6_37_patch $p | patch -p1 -s -done - -echo "Applying locking-per-lun patch ..." -get_locking_per_lun_patch | patch -p1 -s - -echo "Applying Jens' block layer optimization patches ..." -if [ "${kernel_version}" "<" "2.6.38" ]; then - # block: kill request batching - # block: add separate sync/async rq allocation mempools - # block: get rid of request_list->starved[] - # block: optimize rq allocation path for less queue locking - for commit in \ - 5a00f237eb167a5e98d5f3bb56e11e4da406a5bc \ - 76241c12f6a730241b9fa6a795dff55f826ce391 \ - 35f2046ac858ca165a8aba477c9236e53a8dbffa \ - 38bb177765247024dad4b70a2abe0044d0574998 - do - echo "Applying patch $commit ..." - get_block_layer_patch $commit | patch -p1 -s - done -fi diff --git a/scripts/generate-scst-patch b/scripts/generate-scst-patch new file mode 100755 index 000000000..ddcb6d68d --- /dev/null +++ b/scripts/generate-scst-patch @@ -0,0 +1,26 @@ +#!/bin/bash +# +# A script to prepare a patch for posting on scst-devel@sourceforge.net. +# +# Copyright (C) 2012 Chetan Loke +# Copyright (C) 2012 Bart Van Assche +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation, version 2 +# of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +echo "Patch description:" +echo "Your description goes here..." +echo +echo "Signed-off-by: Write your name here " +echo --- +/usr/bin/svn diff "$@" | /usr/bin/diffstat +echo +/usr/bin/svn diff -x -p "$@" +echo diff --git a/scripts/rebuild-rhel-kernel-rpm b/scripts/rebuild-rhel-kernel-rpm index 7061657b0..d292e4112 100755 --- a/scripts/rebuild-rhel-kernel-rpm +++ b/scripts/rebuild-rhel-kernel-rpm @@ -58,7 +58,7 @@ case "$distro" in if [ $releasevermajor = 5 ]; then srpm_url=("http://vault.centos.org/${releasever}/os/SRPMS" "http://vault.centos.org/${releasever}/updates/SRPMS") else - srpm_url=("http://mirror.centos.org/centos/${releasever}/os/SRPMS/Packages" "http://mirror.centos.org/centos/${releasever}/updates/SRPMS") + srpm_url=("http://vault.centos.org/${releasever}/os/Source/SPackages" "http://vault.centos.org/${releasever}/updates/Source/SPackages") fi ;; "Red Hat Enterprise Linux"*) @@ -121,17 +121,6 @@ log "Creating directory ${rpmbuild_dir}" mkdir -p ${rpmbuild_dir}/{BUILD,RPMS,SOURCES,SPECS,SRPMS} -log "Updating ~/.rpmmacros" - -if [ -e ~/.rpmmacros ]; then - cp ~/.rpmmacros ~/.rpmmacros.tmp -else - touch ~/.rpmmacros.tmp -fi -{ cat ~/.rpmmacros.tmp | grep -v '^%_topdir '; \ - echo "%_topdir ${rpmbuild_dir}"; } > ~/.rpmmacros -rm -f ~/.rpmmacros.tmp - log "Installing, unpacking and preparing kernel source files" mkdir -p ${downloaddir} @@ -158,12 +147,16 @@ sudo yum-builddep -q -y ${downloaddir}/${kernel_src_rpm} log "Installing kernel sources in ${rpmbuild_dir}" cd ${rpmbuild_dir} -{ rpm -i ${downloaddir}/${kernel_src_rpm} 2>&1 \ - | grep -v ' does not exist'; } +rpm --define="%_topdir ${rpmbuild_dir}" -i ${downloaddir}/${kernel_src_rpm} 2>&1 \ + | grep -v ' does not exist' cd SPECS -{ rpmbuild -bp --target=${arch} kernel*.spec; \ - rc=$?; if [ rc != 0 ]; then exit $rc; fi; } 2>&1 \ - | tee prep-err.log +{ + rpmbuild --define="%_topdir ${rpmbuild_dir}" -bp --target=${arch} kernel*.spec + rc=$? + if [ rc != 0 ]; then + exit $rc + fi +} 2>&1 | tee prep-err.log log "Copying SCST patches to the SOURCES directory" @@ -304,8 +297,13 @@ fi log "Rebuilding kernel" cd ${rpmbuild_dir}/SPECS -{ rpmbuild -bb --target=${arch} --with baseonly --with firmware --without kabichk kernel*.spec; rc=$?; if [ $rc != 0 ]; then exit $rc; fi; } 2>&1 \ - | tee build.log +{ + rpmbuild --define="%_topdir ${rpmbuild_dir}" -bb --target=${arch} --with baseonly --with firmware --without kabichk kernel*.spec + rc=$? + if [ $rc != 0 ]; then + exit $rc + fi +} 2>&1 | tee build.log log "Ready. You can now install the freshly built kernel RPM as follows:\n"\ diff --git a/scripts/run-regression-tests b/scripts/run-regression-tests index 8af731e6e..92527c52c 100755 --- a/scripts/run-regression-tests +++ b/scripts/run-regression-tests @@ -301,25 +301,28 @@ function run_sparse { shift echo "Running sparse on the patched kernel in ${subdir} $@ ..." - ( - cd "${outputdir}/linux-$k" \ - && make -s prepare \ - && make -s scripts \ - && if grep -q '^CONFIG_PPC=y$' .config; then LC_ALL=C make -k M=arch/powerpc/lib; fi \ - && LC_ALL=C make -k C=2 CF=-D__CHECK_ENDIAN__ M="${subdir}" "$@" - ) &> "${outputfile}" - local errors=$(grep -c ' error:' "${outputfile}") - local warnings=$(grep -c ' warning:' "${outputfile}") - echo "${errors} errors / ${warnings} warnings." - cat "${outputfile}" \ - | grep -E 'warning:|error:' \ - | sed -e 's/^[^ ]*:[^ ]*:[^ ]*: //' \ - -e "s/context imbalance in '[^']*':/context imbalance in :/g" \ - -e "s/context problem in '[^']*': '[^']*'/context problem in : /g" \ - -e "s/function '[^']*'/function/g" \ - -e "s/symbol '[^']*'/symbol/g" \ - | sort \ - | uniq -c + if (cd "${outputdir}/linux-$k" \ + && make -s prepare \ + && make -s scripts \ + && if grep -q '^CONFIG_PPC=y$' .config; then LC_ALL=C make -k M=arch/powerpc/lib; fi \ + && LC_ALL=C make -k C=2 CF=-D__CHECK_ENDIAN__ M="${subdir}" "$@" + ) &> "${outputfile}" + then + local errors=$(grep -c ' error:' "${outputfile}") + local warnings=$(grep -c ' warning:' "${outputfile}") + echo "${errors} errors / ${warnings} warnings." + cat "${outputfile}" \ + | grep -E 'warning:|error:' \ + | sed -e 's/^[^ ]*:[^ ]*:[^ ]*: //' \ + -e "s/context imbalance in '[^']*':/context imbalance in :/g" \ + -e "s/context problem in '[^']*': '[^']*'/context problem in : /g" \ + -e "s/function '[^']*'/function/g" \ + -e "s/symbol '[^']*'/symbol/g" \ + | sort \ + | uniq -c + else + echo FAILED + fi return 0 }