diff --git a/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt b/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt index cee8e5026..a2549013a 100644 --- a/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt +++ b/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt @@ -40,7 +40,6 @@ work. cd /usr/src/linux-2.6.39-gentoo-r3 patch -p1 < /root/scst/iscsi-scst/kernel/patches/put_page_callback-2.6.39.patch - patch -p1 < /root/scst/scst/kernel/scst_exec_req_fifo-2.6.39.patch make clean diff --git a/iscsi-scst/doc/iscsi-scst-howto.txt b/iscsi-scst/doc/iscsi-scst-howto.txt index 97271f74f..7b9b5fffc 100644 --- a/iscsi-scst/doc/iscsi-scst-howto.txt +++ b/iscsi-scst/doc/iscsi-scst-howto.txt @@ -21,7 +21,6 @@ the example below): cd /usr/src/kernels/linux-2.6.38.8 patch -p1 < $HOME/scst/iscsi-scst/kernel/patches/put_page_callback-2.6.38.patch - patch -p1 < $HOME/scst/scst/kernel/scst_exec_req_fifo-2.6.38.patch make clean Next, build and install the kernel: diff --git a/qla2x00t/doc/qla2x00t-howto.html b/qla2x00t/doc/qla2x00t-howto.html index 66b418723..4ed582fd4 100644 --- a/qla2x00t/doc/qla2x00t-howto.html +++ b/qla2x00t/doc/qla2x00t-howto.html @@ -80,13 +80,6 @@ Instructions for obtaining a distribution-specific kernel source tree vary. An e [root@proj src ]# tar xjf linux-source-`uname -r`.tar.bz2 -
  • - Patch the kernel that has just been downloaded: -
    [root@proj src ]# ln -s linux-3.11 linux
    -[root@proj src ]# cd linux
    -[root@proj linux ]# patch -p1 < /root/scst/scst/kernel/scst_exec_req_fifo-3.11.patch
    -
  • -
  • The next step is to configure the kernel:
    [root@proj linux ]# pwd
     /usr/src/linux
    diff --git a/scripts/generate-kernel-patch b/scripts/generate-kernel-patch
    index 90d96eabd..dbcf623fe 100755
    --- a/scripts/generate-kernel-patch
    +++ b/scripts/generate-kernel-patch
    @@ -282,6 +282,7 @@ for p in scst/kernel/*-${kver}.patch \
     		echo iscsi-scst/kernel/patches/*-${kver}.patch;
     	   fi)
     do
    +  [ -e "$p" ] || continue
       # Exclude the put_page_callback patch when command-line option -u has been
       # specified since the current approach is not considered acceptable for
       # upstream kernel inclusion. See also http://lkml.org/lkml/2008/12/11/213.
    diff --git a/scripts/rebuild-rhel-kernel-rpm b/scripts/rebuild-rhel-kernel-rpm
    index df8569a2f..5e92901df 100755
    --- a/scripts/rebuild-rhel-kernel-rpm
    +++ b/scripts/rebuild-rhel-kernel-rpm
    @@ -220,11 +220,7 @@ cd SPECS
     log "Copying SCST patches to the SOURCES directory"
     
     cd ${rpmbuild_dir}/SOURCES
    -copy_best_matching_patch $scst_dir/scst/kernel/rhel/scst_exec_req_fifo scst_exec_req_fifo.patch ||
    -{
    -    echo "No matching scst_exec_req_fifo patch found for kernel version $kver";
    -    exit 1;
    -}
    +copy_best_matching_patch $scst_dir/scst/kernel/rhel/scst_exec_req_fifo scst_exec_req_fifo.patch
     copy_best_matching_patch $scst_dir/iscsi-scst/kernel/patches/rhel/put_page_callback put_page_callback.patch ||
     {
         echo "No matching put_page_callback patch found for kernel version $kver";
    @@ -300,7 +296,7 @@ diff -u SPECS/kernel.spec{.orig,}
      Source82: config-s390x-debug
      Source83: config-s390x-debug-rhel
      
    -+Patch200: scst_exec_req_fifo.patch
    ++#Patch200: scst_exec_req_fifo.patch
     +Patch201: put_page_callback.patch
     +
      # empty final patch file to facilitate testing of kernel patches
    @@ -310,7 +306,7 @@ diff -u SPECS/kernel.spec{.orig,}
      # Dynamically generate kernel .config files from config-* files
      make -f %{SOURCE20} VERSION=%{version} configs
      
    -+ApplyPatch scst_exec_req_fifo.patch
    ++#ApplyPatch scst_exec_req_fifo.patch
     +ApplyPatch put_page_callback.patch
     +
      ApplyOptionalPatch linux-kernel-test.patch
    @@ -339,7 +335,7 @@ diff -u SPECS/kernel.spec{.orig,}
      Source82: config-generic
      Source83: config-x86_64-debug-rhel
      
    -+Patch200: scst_exec_req_fifo.patch
    ++#Patch200: scst_exec_req_fifo.patch
     +Patch201: put_page_callback.patch
     +
      # empty final patch file to facilitate testing of kernel patches
    @@ -349,7 +345,7 @@ diff -u SPECS/kernel.spec{.orig,}
      # Dynamically generate kernel .config files from config-* files
      make -f %{SOURCE20} VERSION=%{version} configs
      
    -+ApplyPatch scst_exec_req_fifo.patch
    ++#ApplyPatch scst_exec_req_fifo.patch
     +ApplyPatch put_page_callback.patch
     +
      ApplyOptionalPatch linux-kernel-test.patch
    @@ -375,7 +371,7 @@ diff -u SPECS/kernel.spec{.orig,}
      Source85: config-powerpc64-debug-rhel
      Source86: config-s390x-debug-rhel
      
    -+Patch200: scst_exec_req_fifo.patch
    ++#Patch200: scst_exec_req_fifo.patch
     +Patch201: put_page_callback.patch
     +
      # empty final patch file to facilitate testing of kernel patches
    @@ -385,7 +381,7 @@ diff -u SPECS/kernel.spec{.orig,}
      # Dynamically generate kernel .config files from config-* files
      make -f %{SOURCE20} VERSION=%{version} configs
      
    -+ApplyPatch scst_exec_req_fifo.patch
    ++#ApplyPatch scst_exec_req_fifo.patch
     +ApplyPatch put_page_callback.patch
     +
      ApplyOptionalPatch linux-kernel-test.patch
    @@ -418,7 +414,7 @@ patch -p1 ${rpmbuild_dir}/SPECS/kernel.spec <<'EOF' || exit $?
      Source2000: cpupower.service
      Source2001: cpupower.config
      
    -+Patch200: scst_exec_req_fifo.patch
    ++#Patch200: scst_exec_req_fifo.patch
     +Patch201: put_page_callback.patch
     +
      # empty final patch to facilitate testing of kernel patches
    @@ -428,7 +424,7 @@ patch -p1 ${rpmbuild_dir}/SPECS/kernel.spec <<'EOF' || exit $?
      # Drop some necessary files from the source dir into the buildroot
      cp $RPM_SOURCE_DIR/kernel-%{version}-*.config .
      
    -+ApplyPatch scst_exec_req_fifo.patch
    ++#ApplyPatch scst_exec_req_fifo.patch
     +ApplyPatch put_page_callback.patch
     +
      ApplyOptionalPatch linux-kernel-test.patch
    diff --git a/scst/README b/scst/README
    index ac19e55ce..b7eff48e0 100644
    --- a/scst/README
    +++ b/scst/README
    @@ -70,27 +70,13 @@ following patches for the kernel in the "kernel" subdirectory. All of
     them are optional, so, if you don't need the corresponding
     functionality, you may not apply them.
     
    -1. scst_exec_req_fifo-2.6.X.patch. This patch is necessary for
    -pass-through dev handlers, because in the mainstream kernels
    -scsi_do_req()/scsi_execute_async() work in LIFO order, instead of
    -expected and required FIFO. So SCST needs new functions
    -scsi_do_req_fifo() or scsi_execute_async_fifo() to be added in the
    -kernel. This patch does that. You may not patch the kernel if you don't
    -need the pass-through support. Alternatively, you can define
    -CONFIG_SCST_STRICT_SERIALIZING compile option during the compilation
    -(see description below). Unfortunately, the CONFIG_SCST_STRICT_SERIALIZING
    -trick doesn't work on kernels starting from 2.6.30, because those
    -kernels don't have the required functionality (scsi_execute_async())
    -anymore. So, on them to have pass-through working you have to apply
    -scst_exec_req_fifo-2.6.X.patch.
    -
    -2. readahead-2.6.X.patch. This patch fixes problem in Linux readahead
    +1. readahead-2.6.X.patch. This patch fixes problem in Linux readahead
     subsystem and greatly improves performance for software RAIDs. See
     http://sourceforge.net/mailarchive/forum.php?thread_name=a0272b440906030714g67eabc5k8f847fb1e538cc62%40mail.gmail.com&forum_name=scst-devel
     thread for more details. It is included in the mainstream kernels 2.6.33
     and 2.6.32.11.
     
    -3. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
    +2. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
     of the context readahead patch http://lkml.org/lkml/2009/4/12/9, big
     thanks to Wu Fengguang. This is a performance improvement patch. It is
     included in the mainstream kernel 2.6.31.
    diff --git a/scst/include/scst.h b/scst/include/scst.h
    index a79985bce..36207ed32 100644
    --- a/scst/include/scst.h
    +++ b/scst/include/scst.h
    @@ -4856,7 +4856,7 @@ void scst_init_threads(struct scst_cmd_threads *cmd_threads);
     void scst_deinit_threads(struct scst_cmd_threads *cmd_threads);
     
     void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid);
    -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
     	void (*done)(void *data, char *sense, int result, int resid));
     #endif
    diff --git a/scst/kernel/rhel/scst_exec_req_fifo-2.6.32.patch b/scst/kernel/rhel/scst_exec_req_fifo-2.6.32.patch
    deleted file mode 100644
    index bcb3b02e2..000000000
    --- a/scst/kernel/rhel/scst_exec_req_fifo-2.6.32.patch
    +++ /dev/null
    @@ -1,529 +0,0 @@
    -diff -upkr linux-2.6.32/block/blk-map.c linux-2.6.32/block/blk-map.c
    ---- linux-2.6.32/block/blk-map.c	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/block/blk-map.c	2011-05-17 20:56:18.341812997 -0400
    -@@ -5,6 +5,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res = 0;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.32/include/linux/blkdev.h linux-2.6.32/include/linux/blkdev.h
    ---- linux-2.6.32/include/linux/blkdev.h	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/include/linux/blkdev.h	2009-12-16 07:21:35.000000000 -0500
    -@@ -708,6 +708,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -812,6 +814,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.32/include/linux/scatterlist.h linux-2.6.32/include/linux/scatterlist.h
    ---- linux-2.6.32/include/linux/scatterlist.h	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/include/linux/scatterlist.h	2009-12-16 07:21:35.000000000 -0500
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.32/lib/scatterlist.c linux-2.6.32/lib/scatterlist.c
    ---- linux-2.6.32/lib/scatterlist.c	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/lib/scatterlist.c	2009-12-16 07:21:35.000000000 -0500
    -@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-121.patch b/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-121.patch
    deleted file mode 120000
    index 6a3acd053..000000000
    --- a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-121.patch
    +++ /dev/null
    @@ -1 +0,0 @@
    -../scst_exec_req_fifo-3.10.patch
    \ No newline at end of file
    diff --git a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-123.patch b/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-123.patch
    deleted file mode 100644
    index d60ddd1de..000000000
    --- a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-123.patch
    +++ /dev/null
    @@ -1,524 +0,0 @@
    -diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/block/blk-map.c ./block/blk-map.c
    ---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/block/blk-map.c	2014-07-16 20:25:31.000000000 +0200
    -+++ ./block/blk-map.c	2014-08-07 09:09:11.751302961 +0200
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/blkdev.h ./include/linux/blkdev.h
    ---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/blkdev.h	2014-07-16 20:25:31.000000000 +0200
    -+++ ./include/linux/blkdev.h	2014-08-07 09:09:11.751302961 +0200
    -@@ -719,6 +719,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -838,6 +840,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/scatterlist.h ./include/linux/scatterlist.h
    ---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/scatterlist.h	2014-07-16 20:25:31.000000000 +0200
    -+++ ./include/linux/scatterlist.h	2014-08-07 09:09:11.751302961 +0200
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -244,6 +245,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/lib/scatterlist.c ./lib/scatterlist.c
    ---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/lib/scatterlist.c	2014-07-16 20:25:31.000000000 +0200
    -+++ ./lib/scatterlist.c	2014-08-07 09:09:11.751302961 +0200
    -@@ -628,3 +628,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.30.patch b/scst/kernel/scst_exec_req_fifo-2.6.30.patch
    deleted file mode 100644
    index 9b9cb78e1..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.30.patch
    +++ /dev/null
    @@ -1,529 +0,0 @@
    -diff -upkr linux-2.6.30/block/blk-map.c linux-2.6.30/block/blk-map.c
    ---- linux-2.6.30/block/blk-map.c	2009-06-09 23:05:27.000000000 -0400
    -+++ linux-2.6.30/block/blk-map.c	2011-05-17 21:03:29.661813000 -0400
    -@@ -5,6 +5,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -272,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = rq->data = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = rq->data = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = 0;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.30/include/linux/blkdev.h linux-2.6.30/include/linux/blkdev.h
    ---- linux-2.6.30/include/linux/blkdev.h	2009-06-09 23:05:27.000000000 -0400
    -+++ linux-2.6.30/include/linux/blkdev.h	2009-08-12 11:48:06.000000000 -0400
    -@@ -704,6 +704,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -807,6 +809,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.30/include/linux/scatterlist.h linux-2.6.30/include/linux/scatterlist.h
    ---- linux-2.6.30/include/linux/scatterlist.h	2009-06-09 23:05:27.000000000 -0400
    -+++ linux-2.6.30/include/linux/scatterlist.h	2009-08-12 11:50:02.000000000 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.30/lib/scatterlist.c linux-2.6.30/lib/scatterlist.c
    ---- linux-2.6.30/lib/scatterlist.c	2009-06-09 23:05:27.000000000 -0400
    -+++ linux-2.6.30/lib/scatterlist.c	2009-08-12 11:56:04.000000000 -0400
    -@@ -485,3 +485,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.31.patch b/scst/kernel/scst_exec_req_fifo-2.6.31.patch
    deleted file mode 100644
    index 0d9c06e6d..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.31.patch
    +++ /dev/null
    @@ -1,529 +0,0 @@
    -diff -upkr linux-2.6.31/block/blk-map.c linux-2.6.31/block/blk-map.c
    ---- linux-2.6.31/block/blk-map.c	2009-09-09 18:13:59.000000000 -0400
    -+++ linux-2.6.31/block/blk-map.c	2011-05-17 21:05:32.669812993 -0400
    -@@ -5,6 +5,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.31/include/linux/blkdev.h linux-2.6.31/include/linux/blkdev.h
    ---- linux-2.6.31/include/linux/blkdev.h	2009-09-09 18:13:59.000000000 -0400
    -+++ linux-2.6.31/include/linux/blkdev.h	2009-09-23 06:17:33.000000000 -0400
    -@@ -699,6 +699,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -803,6 +805,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.31/include/linux/scatterlist.h linux-2.6.31/include/linux/scatterlist.h
    ---- linux-2.6.31/include/linux/scatterlist.h	2009-09-09 18:13:59.000000000 -0400
    -+++ linux-2.6.31/include/linux/scatterlist.h	2009-09-23 06:17:33.000000000 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.31/lib/scatterlist.c linux-2.6.31/lib/scatterlist.c
    ---- linux-2.6.31/lib/scatterlist.c	2009-09-09 18:13:59.000000000 -0400
    -+++ linux-2.6.31/lib/scatterlist.c	2009-09-23 06:17:33.000000000 -0400
    -@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.32.patch b/scst/kernel/scst_exec_req_fifo-2.6.32.patch
    deleted file mode 100644
    index bc0171019..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.32.patch
    +++ /dev/null
    @@ -1,529 +0,0 @@
    -diff -upkr linux-2.6.32/block/blk-map.c linux-2.6.32/block/blk-map.c
    ---- linux-2.6.32/block/blk-map.c	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/block/blk-map.c	2011-05-17 20:56:18.341812997 -0400
    -@@ -5,6 +5,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.32/include/linux/blkdev.h linux-2.6.32/include/linux/blkdev.h
    ---- linux-2.6.32/include/linux/blkdev.h	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/include/linux/blkdev.h	2009-12-16 07:21:35.000000000 -0500
    -@@ -708,6 +708,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -812,6 +814,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.32/include/linux/scatterlist.h linux-2.6.32/include/linux/scatterlist.h
    ---- linux-2.6.32/include/linux/scatterlist.h	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/include/linux/scatterlist.h	2009-12-16 07:21:35.000000000 -0500
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.32/lib/scatterlist.c linux-2.6.32/lib/scatterlist.c
    ---- linux-2.6.32/lib/scatterlist.c	2009-12-02 22:51:21.000000000 -0500
    -+++ linux-2.6.32/lib/scatterlist.c	2009-12-16 07:21:35.000000000 -0500
    -@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.33.patch b/scst/kernel/scst_exec_req_fifo-2.6.33.patch
    deleted file mode 100644
    index fee571fce..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.33.patch
    +++ /dev/null
    @@ -1,529 +0,0 @@
    -diff -upkr linux-2.6.33/block/blk-map.c linux-2.6.33/block/blk-map.c
    ---- linux-2.6.33/block/blk-map.c	2010-02-24 13:52:17.000000000 -0500
    -+++ linux-2.6.33/block/blk-map.c	2011-05-17 21:09:00.317812998 -0400
    -@@ -5,6 +5,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.33/include/linux/blkdev.h linux-2.6.33/include/linux/blkdev.h
    ---- linux-2.6.33/include/linux/blkdev.h	2010-02-24 13:52:17.000000000 -0500
    -+++ linux-2.6.33/include/linux/blkdev.h	2010-03-01 07:41:59.000000000 -0500
    -@@ -710,6 +710,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -825,6 +827,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.33/include/linux/scatterlist.h linux-2.6.33/include/linux/scatterlist.h
    ---- linux-2.6.33/include/linux/scatterlist.h	2010-02-24 13:52:17.000000000 -0500
    -+++ linux-2.6.33/include/linux/scatterlist.h	2010-03-01 07:41:59.000000000 -0500
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.33/lib/scatterlist.c linux-2.6.33/lib/scatterlist.c
    ---- linux-2.6.33/lib/scatterlist.c	2010-02-24 13:52:17.000000000 -0500
    -+++ linux-2.6.33/lib/scatterlist.c	2010-03-01 07:41:59.000000000 -0500
    -@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.34.patch b/scst/kernel/scst_exec_req_fifo-2.6.34.patch
    deleted file mode 100644
    index c7021f573..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.34.patch
    +++ /dev/null
    @@ -1,530 +0,0 @@
    -diff -upkr linux-2.6.34/block/blk-map.c linux-2.6.34/block/blk-map.c
    ---- linux-2.6.34/block/blk-map.c	2010-05-16 17:17:36.000000000 -0400
    -+++ linux-2.6.34/block/blk-map.c	2011-05-17 21:10:43.745812995 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.34/include/linux/blkdev.h linux-2.6.34/include/linux/blkdev.h
    ---- linux-2.6.34/include/linux/blkdev.h	2010-05-16 17:17:36.000000000 -0400
    -+++ linux-2.6.34/include/linux/blkdev.h	2010-05-24 06:51:22.000000000 -0400
    -@@ -713,6 +713,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -828,6 +830,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.34/include/linux/scatterlist.h linux-2.6.34/include/linux/scatterlist.h
    ---- linux-2.6.34/include/linux/scatterlist.h	2010-05-16 17:17:36.000000000 -0400
    -+++ linux-2.6.34/include/linux/scatterlist.h	2010-05-24 06:51:22.000000000 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.34/lib/scatterlist.c linux-2.6.34/lib/scatterlist.c
    ---- linux-2.6.34/lib/scatterlist.c	2010-05-16 17:17:36.000000000 -0400
    -+++ linux-2.6.34/lib/scatterlist.c	2010-05-24 06:51:22.000000000 -0400
    -@@ -494,3 +494,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.35.patch b/scst/kernel/scst_exec_req_fifo-2.6.35.patch
    deleted file mode 100644
    index b10ae1b5a..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.35.patch
    +++ /dev/null
    @@ -1,530 +0,0 @@
    -diff -upkr linux-2.6.35/block/blk-map.c linux-2.6.35/block/blk-map.c
    ---- linux-2.6.35/block/blk-map.c	2010-08-01 18:11:14.000000000 -0400
    -+++ linux-2.6.35/block/blk-map.c	2011-05-17 21:12:23.125813000 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= 1 << BIO_RW;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.35/include/linux/blkdev.h linux-2.6.35/include/linux/blkdev.h
    ---- linux-2.6.35/include/linux/blkdev.h	2010-08-01 18:11:14.000000000 -0400
    -+++ linux-2.6.35/include/linux/blkdev.h	2010-08-04 04:21:59.737128732 -0400
    -@@ -717,6 +717,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -832,6 +834,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.35/include/linux/scatterlist.h linux-2.6.35/include/linux/scatterlist.h
    ---- linux-2.6.35/include/linux/scatterlist.h	2010-08-01 18:11:14.000000000 -0400
    -+++ linux-2.6.35/include/linux/scatterlist.h	2010-08-04 04:21:59.741129485 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.35/lib/scatterlist.c linux-2.6.35/lib/scatterlist.c
    ---- linux-2.6.35/lib/scatterlist.c	2010-08-01 18:11:14.000000000 -0400
    -+++ linux-2.6.35/lib/scatterlist.c	2010-08-04 04:21:59.741129485 -0400
    -@@ -494,3 +494,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.36.patch b/scst/kernel/scst_exec_req_fifo-2.6.36.patch
    deleted file mode 100644
    index d90bdcb8e..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.36.patch
    +++ /dev/null
    @@ -1,532 +0,0 @@
    -diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
    ---- linux-2.6.36/block/blk-map.c	2010-10-20 16:30:22.000000000 -0400
    -+++ linux-2.6.36/block/blk-map.c	2011-05-17 21:13:42.301812997 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -271,6 +273,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.36/include/linux/blkdev.h linux-2.6.36/include/linux/blkdev.h
    ---- linux-2.6.36/include/linux/blkdev.h	2010-10-20 16:30:22.000000000 -0400
    -+++ linux-2.6.36/include/linux/blkdev.h	2010-10-26 04:00:15.899759399 -0400
    -@@ -629,6 +629,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.36/include/linux/scatterlist.h linux-2.6.36/include/linux/scatterlist.h
    ---- linux-2.6.36/include/linux/scatterlist.h	2010-10-20 16:30:22.000000000 -0400
    -+++ linux-2.6.36/include/linux/scatterlist.h	2010-10-26 04:00:15.899759399 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.36/lib/scatterlist.c linux-2.6.36/lib/scatterlist.c
    ---- linux-2.6.36/lib/scatterlist.c	2010-10-20 16:30:22.000000000 -0400
    -+++ linux-2.6.36/lib/scatterlist.c	2010-10-26 04:00:15.899759399 -0400
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.37.patch b/scst/kernel/scst_exec_req_fifo-2.6.37.patch
    deleted file mode 100644
    index ab94c18ae..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.37.patch
    +++ /dev/null
    @@ -1,532 +0,0 @@
    -diff -upkr linux-2.6.37/block/blk-map.c linux-2.6.37/block/blk-map.c
    ---- linux-2.6.37/block/blk-map.c	2011-01-04 19:50:19.000000000 -0500
    -+++ linux-2.6.37/block/blk-map.c	2011-05-17 21:15:14.329812999 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.37/include/linux/blkdev.h linux-2.6.37/include/linux/blkdev.h
    ---- linux-2.6.37/include/linux/blkdev.h	2011-01-04 19:50:19.000000000 -0500
    -+++ linux-2.6.37/include/linux/blkdev.h	2011-01-08 08:45:54.350430208 -0500
    -@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.37/include/linux/scatterlist.h linux-2.6.37/include/linux/scatterlist.h
    ---- linux-2.6.37/include/linux/scatterlist.h	2011-01-04 19:50:19.000000000 -0500
    -+++ linux-2.6.37/include/linux/scatterlist.h	2011-01-08 08:45:54.354431761 -0500
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.37/lib/scatterlist.c linux-2.6.37/lib/scatterlist.c
    ---- linux-2.6.37/lib/scatterlist.c	2011-01-04 19:50:19.000000000 -0500
    -+++ linux-2.6.37/lib/scatterlist.c	2011-01-08 08:45:54.401930472 -0500
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.38.patch b/scst/kernel/scst_exec_req_fifo-2.6.38.patch
    deleted file mode 100644
    index 4561ba2e9..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.38.patch
    +++ /dev/null
    @@ -1,532 +0,0 @@
    -diff -upkr linux-2.6.38/block/blk-map.c linux-2.6.38/block/blk-map.c
    ---- linux-2.6.38/block/blk-map.c	2011-03-14 21:20:32.000000000 -0400
    -+++ linux-2.6.38/block/blk-map.c	2011-05-11 22:07:37.589813000 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.38/include/linux/blkdev.h linux-2.6.38/include/linux/blkdev.h
    ---- linux-2.6.38/include/linux/blkdev.h	2011-03-14 21:20:32.000000000 -0400
    -+++ linux-2.6.38/include/linux/blkdev.h	2011-03-18 10:19:00.000000000 -0400
    -@@ -593,6 +593,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.38/include/linux/scatterlist.h linux-2.6.38/include/linux/scatterlist.h
    ---- linux-2.6.38/include/linux/scatterlist.h	2011-03-14 21:20:32.000000000 -0400
    -+++ linux-2.6.38/include/linux/scatterlist.h	2011-03-18 10:19:00.000000000 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.38/lib/scatterlist.c linux-2.6.38/lib/scatterlist.c
    ---- linux-2.6.38/lib/scatterlist.c	2011-03-14 21:20:32.000000000 -0400
    -+++ linux-2.6.38/lib/scatterlist.c	2011-03-18 10:46:41.000000000 -0400
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-2.6.39.patch b/scst/kernel/scst_exec_req_fifo-2.6.39.patch
    deleted file mode 100644
    index 7ecca2958..000000000
    --- a/scst/kernel/scst_exec_req_fifo-2.6.39.patch
    +++ /dev/null
    @@ -1,532 +0,0 @@
    -diff -upkr linux-2.6.39/block/blk-map.c linux-2.6.39/block/blk-map.c
    ---- linux-2.6.39/block/blk-map.c	2011-05-19 00:06:34.000000000 -0400
    -+++ linux-2.6.39/block/blk-map.c	2011-05-19 10:49:02.753812997 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-2.6.39/include/linux/blkdev.h linux-2.6.39/include/linux/blkdev.h
    ---- linux-2.6.39/include/linux/blkdev.h	2011-05-19 00:06:34.000000000 -0400
    -+++ linux-2.6.39/include/linux/blkdev.h	2011-05-19 10:49:02.753812997 -0400
    -@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -707,6 +709,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-2.6.39/include/linux/scatterlist.h linux-2.6.39/include/linux/scatterlist.h
    ---- linux-2.6.39/include/linux/scatterlist.h	2011-05-19 00:06:34.000000000 -0400
    -+++ linux-2.6.39/include/linux/scatterlist.h	2011-05-19 10:49:02.753812997 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-2.6.39/lib/scatterlist.c linux-2.6.39/lib/scatterlist.c
    ---- linux-2.6.39/lib/scatterlist.c	2011-05-19 00:06:34.000000000 -0400
    -+++ linux-2.6.39/lib/scatterlist.c	2011-05-19 10:49:02.753812997 -0400
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-3.0.patch b/scst/kernel/scst_exec_req_fifo-3.0.patch
    deleted file mode 100644
    index 998f4a32c..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.0.patch
    +++ /dev/null
    @@ -1,532 +0,0 @@
    -diff -upkr linux-3.0.0-orig/block/blk-map.c linux-3.0.0-scst-dbg/block/blk-map.c
    ---- linux-3.0.0-orig/block/blk-map.c	2011-07-21 22:17:23.000000000 -0400
    -+++ linux-3.0.0-scst-dbg/block/blk-map.c	2011-07-22 19:40:27.131230804 -0400
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff -upkr linux-3.0.0-orig/include/linux/blkdev.h linux-3.0.0-scst-dbg/include/linux/blkdev.h
    ---- linux-3.0.0-orig/include/linux/blkdev.h	2011-07-21 22:17:23.000000000 -0400
    -+++ linux-3.0.0-scst-dbg/include/linux/blkdev.h	2011-07-22 19:24:27.803231156 -0400
    -@@ -594,6 +594,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff -upkr linux-3.0.0-orig/include/linux/scatterlist.h linux-3.0.0-scst-dbg/include/linux/scatterlist.h
    ---- linux-3.0.0-orig/include/linux/scatterlist.h	2011-07-21 22:17:23.000000000 -0400
    -+++ linux-3.0.0-scst-dbg/include/linux/scatterlist.h	2011-07-22 19:24:27.803231156 -0400
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff -upkr linux-3.0.0-orig/lib/scatterlist.c linux-3.0.0-scst-dbg/lib/scatterlist.c
    ---- linux-3.0.0-orig/lib/scatterlist.c	2011-07-21 22:17:23.000000000 -0400
    -+++ linux-3.0.0-scst-dbg/lib/scatterlist.c	2011-07-22 19:40:27.131230804 -0400
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    diff --git a/scst/kernel/scst_exec_req_fifo-3.1.patch b/scst/kernel/scst_exec_req_fifo-3.1.patch
    deleted file mode 100644
    index d6bb5346b..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.1.patch
    +++ /dev/null
    @@ -1,536 +0,0 @@
    -=== modified file 'linux-3.1-scst/block/blk-map.c'
    ---- linux-3.1-orig/block/blk-map.c	2011-10-26 20:34:50 +0000
    -+++ linux-3.1-scst/block/blk-map.c	2011-10-26 20:58:56 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'linux-3.1-scst/include/linux/blkdev.h'
    ---- linux-3.1-orig/include/linux/blkdev.h	2011-10-26 20:34:50 +0000
    -+++ linux-3.1-scst/include/linux/blkdev.h	2011-10-26 20:58:56 +0000
    -@@ -599,6 +599,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -714,6 +716,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'linux-3.1-scst/include/linux/scatterlist.h'
    ---- linux-3.1-orig/include/linux/scatterlist.h	2011-10-26 20:34:50 +0000
    -+++ linux-3.1-scst/include/linux/scatterlist.h	2011-10-26 20:58:56 +0000
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'linux-3.1-scst/lib/scatterlist.c'
    ---- linux-3.1-orig/lib/scatterlist.c	2011-10-26 20:34:50 +0000
    -+++ linux-3.1-scst/lib/scatterlist.c	2011-10-26 20:58:56 +0000
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.10.patch b/scst/kernel/scst_exec_req_fifo-3.10.patch
    deleted file mode 100644
    index 69fce3a5f..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.10.patch
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2013-07-23 02:45:53 +0000
    -+++ new/block/blk-map.c	2013-07-23 02:50:11 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2013-07-23 02:45:53 +0000
    -+++ new/include/linux/blkdev.h	2013-07-23 02:50:11 +0000
    -@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2013-07-23 02:45:53 +0000
    -+++ new/include/linux/scatterlist.h	2013-07-23 02:50:11 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -244,6 +245,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2013-07-23 02:45:53 +0000
    -+++ new/lib/scatterlist.c	2013-07-23 02:50:11 +0000
    -@@ -627,3 +627,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.11.patch b/scst/kernel/scst_exec_req_fifo-3.11.patch
    deleted file mode 100644
    index 63b2da453..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.11.patch
    +++ /dev/null
    @@ -1,528 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2013-09-28 00:14:38 +0000
    -+++ new/block/blk-map.c	2013-09-28 00:23:26 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2013-09-28 00:14:38 +0000
    -+++ new/include/linux/blkdev.h	2013-09-28 00:23:26 +0000
    -@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2013-09-28 00:14:38 +0000
    -+++ new/include/linux/scatterlist.h	2013-09-28 00:23:26 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2013-09-28 00:14:38 +0000
    -+++ new/lib/scatterlist.c	2013-09-28 00:23:26 +0000
    -@@ -716,3 +716,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.12.patch b/scst/kernel/scst_exec_req_fifo-3.12.patch
    deleted file mode 100644
    index b08d43f3e..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.12.patch
    +++ /dev/null
    @@ -1,528 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2013-11-30 00:34:22 +0000
    -+++ new/block/blk-map.c	2013-11-30 00:39:53 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2013-11-30 00:34:22 +0000
    -+++ new/include/linux/blkdev.h	2013-11-30 00:39:53 +0000
    -@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2013-11-30 00:34:22 +0000
    -+++ new/include/linux/scatterlist.h	2013-11-30 00:39:53 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2013-11-30 00:34:22 +0000
    -+++ new/lib/scatterlist.c	2013-11-30 00:39:53 +0000
    -@@ -717,3 +717,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.13.patch b/scst/kernel/scst_exec_req_fifo-3.13.patch
    deleted file mode 100644
    index 84980e46a..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.13.patch
    +++ /dev/null
    @@ -1,528 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2014-01-30 00:25:53 +0000
    -+++ new/block/blk-map.c	2014-01-30 00:44:50 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2014-01-30 00:25:53 +0000
    -+++ new/include/linux/blkdev.h	2014-01-30 00:44:50 +0000
    -@@ -712,6 +712,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -831,6 +833,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2014-01-30 00:25:53 +0000
    -+++ new/include/linux/scatterlist.h	2014-01-30 00:44:50 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2014-01-30 00:25:53 +0000
    -+++ new/lib/scatterlist.c	2014-01-30 00:44:50 +0000
    -@@ -717,3 +717,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.14.patch b/scst/kernel/scst_exec_req_fifo-3.14.patch
    deleted file mode 100644
    index 70c47797d..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.14.patch
    +++ /dev/null
    @@ -1,528 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2014-04-17 22:02:06 +0000
    -+++ new/block/blk-map.c	2014-04-17 22:08:48 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2014-04-17 22:02:06 +0000
    -+++ new/include/linux/blkdev.h	2014-04-17 22:08:48 +0000
    -@@ -705,6 +705,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -825,6 +827,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2014-04-17 22:02:06 +0000
    -+++ new/include/linux/scatterlist.h	2014-04-17 22:08:48 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2014-04-17 22:02:06 +0000
    -+++ new/lib/scatterlist.c	2014-04-17 22:08:48 +0000
    -@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.15.patch b/scst/kernel/scst_exec_req_fifo-3.15.patch
    deleted file mode 100644
    index 665cc2606..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.15.patch
    +++ /dev/null
    @@ -1,528 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2014-06-18 01:32:48 +0000
    -+++ new/block/blk-map.c	2014-06-18 01:40:34 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2014-06-18 01:32:48 +0000
    -+++ new/include/linux/blkdev.h	2014-06-18 01:40:34 +0000
    -@@ -717,6 +717,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -837,6 +839,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, const struct sg_iovec *,
    - 			       int, unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2014-06-18 01:32:48 +0000
    -+++ new/include/linux/scatterlist.h	2014-06-18 01:40:34 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2014-06-18 01:32:48 +0000
    -+++ new/lib/scatterlist.c	2014-06-18 01:40:34 +0000
    -@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.16.patch b/scst/kernel/scst_exec_req_fifo-3.16.patch
    deleted file mode 100644
    index a08921920..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.16.patch
    +++ /dev/null
    @@ -1,524 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2014-08-19 01:00:36 +0000
    -+++ new/block/blk-map.c	2014-08-19 01:37:01 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2014-08-19 01:00:36 +0000
    -+++ new/include/linux/blkdev.h	2014-08-19 01:06:48 +0000
    -@@ -735,6 +735,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -856,6 +858,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, const struct sg_iovec *,
    - 			       int, unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2014-08-19 01:00:36 +0000
    -+++ new/include/linux/scatterlist.h	2014-08-19 01:06:48 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2014-08-19 01:00:36 +0000
    -+++ new/lib/scatterlist.c	2014-08-19 01:06:48 +0000
    -@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.17.patch b/scst/kernel/scst_exec_req_fifo-3.17.patch
    deleted file mode 100644
    index 46215abb0..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.17.patch
    +++ /dev/null
    @@ -1,524 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2014-11-21 03:17:49 +0000
    -+++ new/block/blk-map.c	2014-11-21 03:43:00 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2014-11-21 03:17:49 +0000
    -+++ new/include/linux/blkdev.h	2014-11-21 03:43:00 +0000
    -@@ -737,6 +737,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -858,6 +860,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, const struct sg_iovec *,
    - 			       int, unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2014-11-21 03:17:49 +0000
    -+++ new/include/linux/scatterlist.h	2014-11-21 03:43:00 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2014-11-21 03:17:49 +0000
    -+++ new/lib/scatterlist.c	2014-11-21 03:43:00 +0000
    -@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatter
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.18.patch b/scst/kernel/scst_exec_req_fifo-3.18.patch
    deleted file mode 100644
    index e64a14fde..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.18.patch
    +++ /dev/null
    @@ -1,536 +0,0 @@
    -Subject: [PATCH] scst_exec_req_fifo
    -
    ----
    - block/blk-map.c             | 329 ++++++++++++++++++++++++++++++++++++++++++++
    - include/linux/blkdev.h      |   5 +
    - include/linux/scatterlist.h |   4 +
    - lib/scatterlist.c           | 124 +++++++++++++++++
    - 4 files changed, 462 insertions(+)
    -
    -diff --git a/block/blk-map.c b/block/blk-map.c
    -index f890d43..d4b8509 100644
    ---- a/block/blk-map.c
    -+++ b/block/blk-map.c
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
    -index aac0f9e..5cd3afa 100644
    ---- a/include/linux/blkdev.h
    -+++ b/include/linux/blkdev.h
    -@@ -731,6 +731,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -852,6 +854,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, const struct sg_iovec *,
    - 			       int, unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
    -index ed8f9e7..f64e02f 100644
    ---- a/include/linux/scatterlist.h
    -+++ b/include/linux/scatterlist.h
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
    - size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			  void *buf, size_t buflen, off_t skip);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff --git a/lib/scatterlist.c b/lib/scatterlist.c
    -index c9f2e8c..ba693d1 100644
    ---- a/lib/scatterlist.c
    -+++ b/lib/scatterlist.c
    -@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
    - }
    - EXPORT_SYMBOL(sg_pcopy_to_buffer);
    -+
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    --- 
    -2.1.2
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.2.patch b/scst/kernel/scst_exec_req_fifo-3.2.patch
    deleted file mode 100644
    index 2b8257ce3..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.2.patch
    +++ /dev/null
    @@ -1,536 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2012-01-10 22:58:17 +0000
    -+++ new/block/blk-map.c	2012-01-10 23:01:21 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2012-01-10 22:58:17 +0000
    -+++ new/include/linux/blkdev.h	2012-01-10 23:01:21 +0000
    -@@ -599,6 +599,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -716,6 +718,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2012-01-10 22:58:17 +0000
    -+++ new/include/linux/scatterlist.h	2012-01-10 23:01:21 +0000
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2012-01-10 22:58:17 +0000
    -+++ new/lib/scatterlist.c	2012-01-10 23:01:21 +0000
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.3.patch b/scst/kernel/scst_exec_req_fifo-3.3.patch
    deleted file mode 100644
    index 293d96633..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.3.patch
    +++ /dev/null
    @@ -1,536 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2012-03-19 23:46:01 +0000
    -+++ new/block/blk-map.c	2012-03-20 00:10:37 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,339 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
    -+					KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy,
    -+			KM_USER0, KM_USER1);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2012-03-19 23:46:01 +0000
    -+++ new/include/linux/blkdev.h	2012-03-20 00:10:37 +0000
    -@@ -612,6 +612,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -731,6 +733,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2012-03-19 23:46:01 +0000
    -+++ new/include/linux/scatterlist.h	2012-03-20 00:10:37 +0000
    -@@ -3,6 +3,7 @@
    - 
    - #include 
    - #include 
    -+#include 
    - #include 
    - #include 
    - #include 
    -@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2012-03-19 23:46:01 +0000
    -+++ new/lib/scatterlist.c	2012-03-20 00:10:37 +0000
    -@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len,
    -+			enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page +
    -+					 (src_offs >> PAGE_SHIFT), s_km_type) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page +
    -+					(dst_offs >> PAGE_SHIFT), d_km_type) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr, s_km_type);
    -+		kunmap_atomic(daddr, d_km_type);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ * @d_km_type:	kmap_atomic type for the destination SG
    -+ * @s_km_type:	kmap_atomic type for the source SG
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len,
    -+	    enum km_type d_km_type, enum km_type s_km_type)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len, d_km_type, s_km_type);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.4.patch b/scst/kernel/scst_exec_req_fifo-3.4.patch
    deleted file mode 100644
    index 53ac80d0c..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.4.patch
    +++ /dev/null
    @@ -1,528 +0,0 @@
    -diff --git a/block/blk-map.c b/block/blk-map.c
    -index 623e1cd..20349d0 100644
    ---- a/block/blk-map.c
    -+++ b/block/blk-map.c
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
    -index 4d4ac24..3fa6a30 100644
    ---- a/include/linux/blkdev.h
    -+++ b/include/linux/blkdev.h
    -@@ -609,6 +609,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -728,6 +730,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
    -index ac9586d..4b743d7 100644
    ---- a/include/linux/scatterlist.h
    -+++ b/include/linux/scatterlist.h
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -220,6 +221,9 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -diff --git a/lib/scatterlist.c b/lib/scatterlist.c
    -index 6096e89..1786ca9 100644
    ---- a/lib/scatterlist.c
    -+++ b/lib/scatterlist.c
    -@@ -517,3 +517,126 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.5.patch b/scst/kernel/scst_exec_req_fifo-3.5.patch
    deleted file mode 100644
    index 78c3f0720..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.5.patch
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2012-08-08 02:57:29 +0000
    -+++ new/block/blk-map.c	2012-08-08 03:02:56 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2012-08-08 02:57:29 +0000
    -+++ new/include/linux/blkdev.h	2012-08-08 03:02:56 +0000
    -@@ -627,6 +627,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2012-08-08 02:57:29 +0000
    -+++ new/include/linux/scatterlist.h	2012-08-08 03:02:56 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -220,6 +221,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2012-08-08 02:57:29 +0000
    -+++ new/lib/scatterlist.c	2012-08-08 03:02:56 +0000
    -@@ -517,3 +517,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.6.patch b/scst/kernel/scst_exec_req_fifo-3.6.patch
    deleted file mode 100644
    index bf9cf76c0..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.6.patch
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2012-10-01 18:39:34 +0000
    -+++ new/block/blk-map.c	2012-10-01 20:50:07 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2012-10-01 18:39:34 +0000
    -+++ new/include/linux/blkdev.h	2012-10-01 18:45:47 +0000
    -@@ -638,6 +638,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -757,6 +759,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2012-10-01 18:39:34 +0000
    -+++ new/include/linux/scatterlist.h	2012-10-01 18:45:47 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -224,6 +225,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2012-10-01 18:39:34 +0000
    -+++ new/lib/scatterlist.c	2012-10-01 20:50:07 +0000
    -@@ -573,3 +573,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.7.patch b/scst/kernel/scst_exec_req_fifo-3.7.patch
    deleted file mode 100644
    index 465558d8f..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.7.patch
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2012-12-17 19:41:04 +0000
    -+++ new/block/blk-map.c	2012-12-17 22:29:54 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2012-12-17 19:41:04 +0000
    -+++ new/include/linux/blkdev.h	2012-12-17 22:29:54 +0000
    -@@ -660,6 +660,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -779,6 +781,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2012-12-17 19:41:04 +0000
    -+++ new/include/linux/scatterlist.h	2012-12-17 22:29:54 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -225,6 +226,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2012-12-17 19:41:04 +0000
    -+++ new/lib/scatterlist.c	2012-12-17 22:29:54 +0000
    -@@ -592,3 +592,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.8.patch b/scst/kernel/scst_exec_req_fifo-3.8.patch
    deleted file mode 100644
    index 0476a9331..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.8.patch
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2013-02-22 21:12:31 +0000
    -+++ new/block/blk-map.c	2013-02-23 00:07:57 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2013-02-22 21:12:31 +0000
    -+++ new/include/linux/blkdev.h	2013-02-22 21:21:51 +0000
    -@@ -668,6 +668,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -787,6 +789,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2013-02-22 21:12:31 +0000
    -+++ new/include/linux/scatterlist.h	2013-02-22 21:21:51 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -225,6 +226,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2013-02-22 21:12:31 +0000
    -+++ new/lib/scatterlist.c	2013-02-23 00:07:57 +0000
    -@@ -593,3 +593,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/kernel/scst_exec_req_fifo-3.9.patch b/scst/kernel/scst_exec_req_fifo-3.9.patch
    deleted file mode 100644
    index fca368b29..000000000
    --- a/scst/kernel/scst_exec_req_fifo-3.9.patch
    +++ /dev/null
    @@ -1,527 +0,0 @@
    -=== modified file 'block/blk-map.c'
    ---- old/block/blk-map.c	2013-05-11 05:39:14 +0000
    -+++ new/block/blk-map.c	2013-05-14 01:25:01 +0000
    -@@ -5,6 +5,8 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    -+#include 
    - #include 		/* for struct sg_iovec */
    - 
    - #include "blk.h"
    -@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
    - }
    - EXPORT_SYMBOL(blk_rq_unmap_user);
    - 
    -+struct blk_kern_sg_work {
    -+	atomic_t bios_inflight;
    -+	struct sg_table sg_table;
    -+	struct scatterlist *src_sgl;
    -+};
    -+
    -+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    -+{
    -+	struct sg_table *sgt = &bw->sg_table;
    -+	struct scatterlist *sg;
    -+	int i;
    -+
    -+	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    -+		struct page *pg = sg_page(sg);
    -+		if (pg == NULL)
    -+			break;
    -+		__free_page(pg);
    -+	}
    -+
    -+	sg_free_table(sgt);
    -+	kfree(bw);
    -+	return;
    -+}
    -+
    -+static void blk_bio_map_kern_endio(struct bio *bio, int err)
    -+{
    -+	struct blk_kern_sg_work *bw = bio->bi_private;
    -+
    -+	if (bw != NULL) {
    -+		/* Decrement the bios in processing and, if zero, free */
    -+		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    -+		if (atomic_dec_and_test(&bw->bios_inflight)) {
    -+			if ((bio_data_dir(bio) == READ) && (err == 0)) {
    -+				unsigned long flags;
    -+
    -+				local_irq_save(flags);	/* to protect KMs */
    -+				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
    -+				local_irq_restore(flags);
    -+			}
    -+			blk_free_kern_sg_work(bw);
    -+		}
    -+	}
    -+
    -+	bio_put(bio);
    -+	return;
    -+}
    -+
    -+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			       int nents, struct blk_kern_sg_work **pbw,
    -+			       gfp_t gfp, gfp_t page_gfp)
    -+{
    -+	int res = 0, i;
    -+	struct scatterlist *sg;
    -+	struct scatterlist *new_sgl;
    -+	int new_sgl_nents;
    -+	size_t len = 0, to_copy;
    -+	struct blk_kern_sg_work *bw;
    -+
    -+	bw = kzalloc(sizeof(*bw), gfp);
    -+	if (bw == NULL)
    -+		goto out;
    -+
    -+	bw->src_sgl = sgl;
    -+
    -+	for_each_sg(sgl, sg, nents, i)
    -+		len += sg->length;
    -+	to_copy = len;
    -+
    -+	new_sgl_nents = PFN_UP(len);
    -+
    -+	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
    -+	if (res != 0)
    -+		goto err_free;
    -+
    -+	new_sgl = bw->sg_table.sgl;
    -+
    -+	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    -+		struct page *pg;
    -+
    -+		pg = alloc_page(page_gfp);
    -+		if (pg == NULL)
    -+			goto err_free;
    -+
    -+		sg_assign_page(sg, pg);
    -+		sg->length = min_t(size_t, PAGE_SIZE, len);
    -+
    -+		len -= PAGE_SIZE;
    -+	}
    -+
    -+	if (rq_data_dir(rq) == WRITE) {
    -+		/*
    -+		 * We need to limit amount of copied data to to_copy, because
    -+		 * sgl might have the last element in sgl not marked as last in
    -+		 * SG chaining.
    -+		 */
    -+		sg_copy(new_sgl, sgl, 0, to_copy);
    -+	}
    -+
    -+	*pbw = bw;
    -+	/*
    -+	 * REQ_COPY_USER name is misleading. It should be something like
    -+	 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
    -+	 */
    -+	rq->cmd_flags |= REQ_COPY_USER;
    -+
    -+out:
    -+	return res;
    -+
    -+err_free:
    -+	blk_free_kern_sg_work(bw);
    -+	res = -ENOMEM;
    -+	goto out;
    -+}
    -+
    -+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+	int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
    -+{
    -+	int res;
    -+	struct request_queue *q = rq->q;
    -+	int rw = rq_data_dir(rq);
    -+	int max_nr_vecs, i;
    -+	size_t tot_len;
    -+	bool need_new_bio;
    -+	struct scatterlist *sg, *prev_sg = NULL;
    -+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    -+	int bios;
    -+
    -+	if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
    -+		WARN_ON(1);
    -+		res = -EINVAL;
    -+		goto out;
    -+	}
    -+
    -+	/*
    -+	 * Let's keep each bio allocation inside a single page to decrease
    -+	 * probability of failure.
    -+	 */
    -+	max_nr_vecs =  min_t(size_t,
    -+		((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
    -+		BIO_MAX_PAGES);
    -+
    -+	need_new_bio = true;
    -+	tot_len = 0;
    -+	bios = 0;
    -+	for_each_sg(sgl, sg, nents, i) {
    -+		struct page *page = sg_page(sg);
    -+		void *page_addr = page_address(page);
    -+		size_t len = sg->length, l;
    -+		size_t offset = sg->offset;
    -+
    -+		tot_len += len;
    -+		prev_sg = sg;
    -+
    -+		/*
    -+		 * Each segment must be aligned on DMA boundary and
    -+		 * not on stack. The last one may have unaligned
    -+		 * length as long as the total length is aligned to
    -+		 * DMA padding alignment.
    -+		 */
    -+		if (i == nents - 1)
    -+			l = 0;
    -+		else
    -+			l = len;
    -+		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    -+		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    -+			res = -EINVAL;
    -+			goto out_free_bios;
    -+		}
    -+
    -+		while (len > 0) {
    -+			size_t bytes;
    -+			int rc;
    -+
    -+			if (need_new_bio) {
    -+				bio = bio_kmalloc(gfp, max_nr_vecs);
    -+				if (bio == NULL) {
    -+					res = -ENOMEM;
    -+					goto out_free_bios;
    -+				}
    -+
    -+				if (rw == WRITE)
    -+					bio->bi_rw |= REQ_WRITE;
    -+
    -+				bios++;
    -+				bio->bi_private = bw;
    -+				bio->bi_end_io = blk_bio_map_kern_endio;
    -+
    -+				if (hbio == NULL)
    -+					hbio = tbio = bio;
    -+				else
    -+					tbio = tbio->bi_next = bio;
    -+			}
    -+
    -+			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    -+
    -+			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    -+			if (rc < bytes) {
    -+				if (unlikely(need_new_bio || (rc < 0))) {
    -+					if (rc < 0)
    -+						res = rc;
    -+					else
    -+						res = -EIO;
    -+					goto out_free_bios;
    -+				} else {
    -+					need_new_bio = true;
    -+					len -= rc;
    -+					offset += rc;
    -+					continue;
    -+				}
    -+			}
    -+
    -+			need_new_bio = false;
    -+			offset = 0;
    -+			len -= bytes;
    -+			page = nth_page(page, 1);
    -+		}
    -+	}
    -+
    -+	if (hbio == NULL) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	/* Total length must be aligned on DMA padding alignment */
    -+	if ((tot_len & q->dma_pad_mask) &&
    -+	    !(rq->cmd_flags & REQ_COPY_USER)) {
    -+		res = -EINVAL;
    -+		goto out_free_bios;
    -+	}
    -+
    -+	if (bw != NULL)
    -+		atomic_set(&bw->bios_inflight, bios);
    -+
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio->bi_next = NULL;
    -+
    -+		blk_queue_bounce(q, &bio);
    -+
    -+		res = blk_rq_append_bio(q, rq, bio);
    -+		if (unlikely(res != 0)) {
    -+			bio->bi_next = hbio;
    -+			hbio = bio;
    -+			/* We can have one or more bios bounced */
    -+			goto out_unmap_bios;
    -+		}
    -+	}
    -+
    -+	res = 0;
    -+
    -+	rq->buffer = NULL;
    -+out:
    -+	return res;
    -+
    -+out_unmap_bios:
    -+	blk_rq_unmap_kern_sg(rq, res);
    -+
    -+out_free_bios:
    -+	while (hbio != NULL) {
    -+		bio = hbio;
    -+		hbio = hbio->bi_next;
    -+		bio_put(bio);
    -+	}
    -+	goto out;
    -+}
    -+
    -+/**
    -+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
    -+ * @rq:		request to fill
    -+ * @sgl:	area to map
    -+ * @nents:	number of elements in @sgl
    -+ * @gfp:	memory allocation flags
    -+ *
    -+ * Description:
    -+ *    Data will be mapped directly if possible. Otherwise a bounce
    -+ *    buffer will be used.
    -+ */
    -+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+		       int nents, gfp_t gfp)
    -+{
    -+	int res;
    -+
    -+	res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
    -+	if (unlikely(res != 0)) {
    -+		struct blk_kern_sg_work *bw = NULL;
    -+
    -+		res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
    -+				gfp, rq->q->bounce_gfp | gfp);
    -+		if (unlikely(res != 0))
    -+			goto out;
    -+
    -+		res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
    -+				bw->sg_table.nents, bw, gfp);
    -+		if (res != 0) {
    -+			blk_free_kern_sg_work(bw);
    -+			goto out;
    -+		}
    -+	}
    -+
    -+	rq->buffer = NULL;
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(blk_rq_map_kern_sg);
    -+
    -+/**
    -+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
    -+ * @rq:		request to unmap
    -+ * @err:	non-zero error code
    -+ *
    -+ * Description:
    -+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
    -+ *    only in case of an error!
    -+ */
    -+void blk_rq_unmap_kern_sg(struct request *rq, int err)
    -+{
    -+	struct bio *bio = rq->bio;
    -+
    -+	while (bio) {
    -+		struct bio *b = bio;
    -+		bio = bio->bi_next;
    -+		b->bi_end_io(b, err);
    -+	}
    -+	rq->bio = NULL;
    -+
    -+	return;
    -+}
    -+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
    -+
    - /**
    -  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
    -  * @q:		request queue where request should be inserted
    -
    -=== modified file 'include/linux/blkdev.h'
    ---- old/include/linux/blkdev.h	2013-05-11 05:39:14 +0000
    -+++ new/include/linux/blkdev.h	2013-05-11 05:48:04 +0000
    -@@ -670,6 +670,8 @@ extern unsigned long blk_max_low_pfn, bl
    - #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
    - #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
    - 
    -+#define SCSI_EXEC_REQ_FIFO_DEFINED
    -+
    - #ifdef CONFIG_BOUNCE
    - extern int init_emergency_isa_pool(void);
    - extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
    -@@ -789,6 +791,9 @@ extern int blk_rq_map_kern(struct reques
    - extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
    - 			       struct rq_map_data *, struct sg_iovec *, int,
    - 			       unsigned int, gfp_t);
    -+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
    -+			      int nents, gfp_t gfp);
    -+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
    - extern int blk_execute_rq(struct request_queue *, struct gendisk *,
    - 			  struct request *, int);
    - extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
    -
    -=== modified file 'include/linux/scatterlist.h'
    ---- old/include/linux/scatterlist.h	2013-05-11 05:39:14 +0000
    -+++ new/include/linux/scatterlist.h	2013-05-11 05:48:04 +0000
    -@@ -8,6 +8,7 @@
    - #include 
    - #include 
    - #include 
    -+#include 
    - 
    - struct sg_table {
    - 	struct scatterlist *sgl;	/* the list */
    -@@ -225,6 +226,9 @@ size_t sg_copy_from_buffer(struct scatte
    - size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
    - 			 void *buf, size_t buflen);
    - 
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len);
    -+
    - /*
    -  * Maximum number of entries that will be allocated in one piece, if
    -  * a list larger than this is required then chaining will be utilized.
    -
    -=== modified file 'lib/scatterlist.c'
    ---- old/lib/scatterlist.c	2013-05-11 05:39:14 +0000
    -+++ new/lib/scatterlist.c	2013-05-14 01:25:01 +0000
    -@@ -629,3 +629,126 @@ size_t sg_copy_to_buffer(struct scatterl
    - 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
    - }
    - EXPORT_SYMBOL(sg_copy_to_buffer);
    -+
    -+/*
    -+ * Can switch to the next dst_sg element, so, to copy to strictly only
    -+ * one dst_sg element, it must be either last in the chain, or
    -+ * copy_len == dst_sg->length.
    -+ */
    -+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
    -+			size_t *pdst_offs, struct scatterlist *src_sg,
    -+			size_t copy_len)
    -+{
    -+	int res = 0;
    -+	struct scatterlist *dst_sg;
    -+	size_t src_len, dst_len, src_offs, dst_offs;
    -+	struct page *src_page, *dst_page;
    -+
    -+	dst_sg = *pdst_sg;
    -+	dst_len = *pdst_len;
    -+	dst_offs = *pdst_offs;
    -+	dst_page = sg_page(dst_sg);
    -+
    -+	src_page = sg_page(src_sg);
    -+	src_len = src_sg->length;
    -+	src_offs = src_sg->offset;
    -+
    -+	do {
    -+		void *saddr, *daddr;
    -+		size_t n;
    -+
    -+		saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
    -+				    (src_offs & ~PAGE_MASK);
    -+		daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
    -+				    (dst_offs & ~PAGE_MASK);
    -+
    -+		if (((src_offs & ~PAGE_MASK) == 0) &&
    -+		    ((dst_offs & ~PAGE_MASK) == 0) &&
    -+		    (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
    -+		    (copy_len >= PAGE_SIZE)) {
    -+			copy_page(daddr, saddr);
    -+			n = PAGE_SIZE;
    -+		} else {
    -+			n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
    -+					  PAGE_SIZE - (src_offs & ~PAGE_MASK));
    -+			n = min(n, src_len);
    -+			n = min(n, dst_len);
    -+			n = min_t(size_t, n, copy_len);
    -+			memcpy(daddr, saddr, n);
    -+		}
    -+		dst_offs += n;
    -+		src_offs += n;
    -+
    -+		kunmap_atomic(saddr);
    -+		kunmap_atomic(daddr);
    -+
    -+		res += n;
    -+		copy_len -= n;
    -+		if (copy_len == 0)
    -+			goto out;
    -+
    -+		src_len -= n;
    -+		dst_len -= n;
    -+		if (dst_len == 0) {
    -+			dst_sg = sg_next(dst_sg);
    -+			if (dst_sg == NULL)
    -+				goto out;
    -+			dst_page = sg_page(dst_sg);
    -+			dst_len = dst_sg->length;
    -+			dst_offs = dst_sg->offset;
    -+		}
    -+	} while (src_len > 0);
    -+
    -+out:
    -+	*pdst_sg = dst_sg;
    -+	*pdst_len = dst_len;
    -+	*pdst_offs = dst_offs;
    -+	return res;
    -+}
    -+
    -+/**
    -+ * sg_copy - copy one SG vector to another
    -+ * @dst_sg:	destination SG
    -+ * @src_sg:	source SG
    -+ * @nents_to_copy: maximum number of entries to copy
    -+ * @copy_len:	maximum amount of data to copy. If 0, then copy all.
    -+ *
    -+ * Description:
    -+ *    Data from the source SG vector will be copied to the destination SG
    -+ *    vector. End of the vectors will be determined by sg_next() returning
    -+ *    NULL. Returns number of bytes copied.
    -+ */
    -+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
    -+	    int nents_to_copy, size_t copy_len)
    -+{
    -+	int res = 0;
    -+	size_t dst_len, dst_offs;
    -+
    -+	if (copy_len == 0)
    -+		copy_len = 0x7FFFFFFF; /* copy all */
    -+
    -+	if (nents_to_copy == 0)
    -+		nents_to_copy = 0x7FFFFFFF; /* copy all */
    -+
    -+	dst_len = dst_sg->length;
    -+	dst_offs = dst_sg->offset;
    -+
    -+	do {
    -+		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
    -+				src_sg, copy_len);
    -+		copy_len -= copied;
    -+		res += copied;
    -+		if ((copy_len == 0) || (dst_sg == NULL))
    -+			goto out;
    -+
    -+		nents_to_copy--;
    -+		if (nents_to_copy == 0)
    -+			goto out;
    -+
    -+		src_sg = sg_next(src_sg);
    -+	} while (src_sg != NULL);
    -+
    -+out:
    -+	return res;
    -+}
    -+EXPORT_SYMBOL(sg_copy);
    -
    diff --git a/scst/src/dev_handlers/scst_disk.c b/scst/src/dev_handlers/scst_disk.c
    index 555c8c093..ad58fc438 100644
    --- a/scst/src/dev_handlers/scst_disk.c
    +++ b/scst/src/dev_handlers/scst_disk.c
    @@ -47,7 +47,7 @@ static void disk_detach(struct scst_device *dev);
     static int disk_parse(struct scst_cmd *cmd);
     static int disk_perf_exec(struct scst_cmd *cmd);
     static int disk_done(struct scst_cmd *cmd);
    -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     static int disk_exec(struct scst_cmd *cmd);
     static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd);
     #endif
    @@ -61,7 +61,7 @@ static struct scst_dev_type disk_devtype = {
     	.attach =		disk_attach,
     	.detach =		disk_detach,
     	.parse =		disk_parse,
    -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     	.exec =			disk_exec,
     	.on_sg_tablesize_low = disk_on_sg_tablesize_low,
     #endif
    @@ -82,7 +82,7 @@ static struct scst_dev_type disk_devtype_perf = {
     	.parse =		disk_parse,
     	.exec =			disk_perf_exec,
     	.dev_done =		disk_done,
    -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     	.on_sg_tablesize_low = disk_on_sg_tablesize_low,
     #endif
     #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
    @@ -293,7 +293,7 @@ static int disk_done(struct scst_cmd *cmd)
     	return res;
     }
     
    -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     
     static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd)
     {
    @@ -544,7 +544,7 @@ out_error:
     	goto out_done;
     }
     
    -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
    +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
     
     static int disk_perf_exec(struct scst_cmd *cmd)
     {
    diff --git a/scst/src/scst_lib.c b/scst/src/scst_lib.c
    index de879a2b4..a344cfb6f 100644
    --- a/scst/src/scst_lib.c
    +++ b/scst/src/scst_lib.c
    @@ -119,7 +119,8 @@ int hex_to_bin(char ch)
     EXPORT_SYMBOL(hex_to_bin);
     #endif
     
    -#if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) && !defined(HAVE_SG_COPY)
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || \
    +	!defined(SCSI_EXEC_REQ_FIFO_DEFINED)
     static int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
     #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
     	    int nents_to_copy, size_t copy_len,
    @@ -6221,7 +6222,371 @@ out:
     	return;
     }
     
    -#if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) && !defined(HAVE_SG_COPY)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
    +struct blk_kern_sg_work {
    +	atomic_t bios_inflight;
    +	struct sg_table sg_table;
    +	struct scatterlist *src_sgl;
    +};
    +
    +static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
    +{
    +	struct sg_table *sgt = &bw->sg_table;
    +	struct scatterlist *sg;
    +	struct page *pg;
    +	int i;
    +
    +	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
    +		pg = sg_page(sg);
    +		if (pg == NULL)
    +			break;
    +		__free_page(pg);
    +	}
    +
    +	sg_free_table(sgt);
    +	kfree(bw);
    +	return;
    +}
    +
    +static void blk_bio_map_kern_endio(struct bio *bio, int err)
    +{
    +	struct blk_kern_sg_work *bw = bio->bi_private;
    +
    +	if (bw != NULL) {
    +		/* Decrement the bios in processing and, if zero, free */
    +		BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
    +		if (atomic_dec_and_test(&bw->bios_inflight)) {
    +			if (bio_data_dir(bio) == READ && err == 0) {
    +				unsigned long flags;
    +
    +				local_irq_save(flags);	/* to protect KMs */
    +				sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
    +					, KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ
    +#endif
    +					);
    +				local_irq_restore(flags);
    +			}
    +			blk_free_kern_sg_work(bw);
    +		}
    +	}
    +
    +	bio_put(bio);
    +	return;
    +}
    +
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
    +/*
    + * See also patch "block: Add blk_make_request(), takes bio, returns a
    + * request" (commit 79eb63e9e5875b84341a3a05f8e6ae9cdb4bb6f6).
    + */
    +static struct request *blk_make_request(struct request_queue *q,
    +					struct bio *bio,
    +					gfp_t gfp_mask)
    +{
    +	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
    +
    +	if (unlikely(!rq))
    +		return ERR_PTR(-ENOMEM);
    +
    +	rq->cmd_type = REQ_TYPE_BLOCK_PC;
    +
    +	for ( ; bio; bio = bio->bi_next) {
    +		struct bio *bounce_bio = bio;
    +		int ret;
    +
    +		blk_queue_bounce(q, &bounce_bio);
    +		ret = blk_rq_append_bio(q, rq, bounce_bio);
    +		if (unlikely(ret)) {
    +			blk_put_request(rq);
    +			return ERR_PTR(ret);
    +		}
    +	}
    +
    +	return rq;
    +}
    +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
    +
    +/*
    + * Copy an sg-list. This function is related to bio_copy_kern() but duplicates
    + * an sg-list instead of creating a bio out of a single kernel address range.
    + */
    +static struct blk_kern_sg_work *blk_copy_kern_sg(struct request_queue *q,
    +	struct scatterlist *sgl, int nents, gfp_t gfp_mask, bool reading)
    +{
    +	int res = 0, i;
    +	struct scatterlist *sg;
    +	struct scatterlist *new_sgl;
    +	int new_sgl_nents;
    +	size_t len = 0, to_copy;
    +	struct blk_kern_sg_work *bw;
    +
    +	res = -ENOMEM;
    +	bw = kzalloc(sizeof(*bw), gfp_mask);
    +	if (bw == NULL)
    +		goto err;
    +
    +	bw->src_sgl = sgl;
    +
    +	for_each_sg(sgl, sg, nents, i)
    +		len += sg->length;
    +	to_copy = len;
    +
    +	new_sgl_nents = PFN_UP(len);
    +
    +	res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp_mask);
    +	if (res != 0)
    +		goto err_free_bw;
    +
    +	new_sgl = bw->sg_table.sgl;
    +
    +	res = -ENOMEM;
    +	for_each_sg(new_sgl, sg, new_sgl_nents, i) {
    +		struct page *pg;
    +
    +		pg = alloc_page(q->bounce_gfp | gfp_mask);
    +		if (pg == NULL)
    +			goto err_free_table;
    +
    +		sg_assign_page(sg, pg);
    +		sg->length = min_t(size_t, PAGE_SIZE, len);
    +
    +		len -= PAGE_SIZE;
    +	}
    +
    +	if (!reading) {
    +		/*
    +		 * We need to limit amount of copied data to to_copy, because
    +		 * sgl might have the last element in sgl not marked as last in
    +		 * SG chaining.
    +		 */
    +		sg_copy(new_sgl, sgl, 0, to_copy
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
    +			, KM_USER0, KM_USER1
    +#endif
    +			);
    +	}
    +
    +out:
    +	return bw;
    +
    +err_free_table:
    +	sg_free_table(&bw->sg_table);
    +
    +err_free_bw:
    +	blk_free_kern_sg_work(bw);
    +
    +err:
    +	sBUG_ON(res == 0);
    +	bw = ERR_PTR(res);
    +	goto out;
    +}
    +
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
    +static void bio_kmalloc_destructor(struct bio *bio)
    +{
    +	kfree(bio->bi_io_vec);
    +	kfree(bio);
    +}
    +#endif
    +
    +/* __blk_map_kern_sg - map kernel data to a request for REQ_TYPE_BLOCK_PC */
    +static struct request *__blk_map_kern_sg(struct request_queue *q,
    +	struct scatterlist *sgl, int nents, struct blk_kern_sg_work *bw,
    +	gfp_t gfp_mask, bool reading)
    +{
    +	struct request *rq;
    +	int max_nr_vecs, i;
    +	size_t tot_len;
    +	bool need_new_bio;
    +	struct scatterlist *sg;
    +	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
    +	int bios;
    +
    +	if (unlikely(sgl == NULL || sgl->length == 0 || nents <= 0)) {
    +		WARN_ON_ONCE(true);
    +		rq = ERR_PTR(-EINVAL);
    +		goto out;
    +	}
    +
    +	/*
    +	 * Restrict bio size to a single page to minimize the probability that
    +	 * bio allocation fails.
    +	 */
    +	max_nr_vecs = min_t(int,
    +		(PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
    +		BIO_MAX_PAGES);
    +
    +	need_new_bio = true;
    +	tot_len = 0;
    +	bios = 0;
    +	for_each_sg(sgl, sg, nents, i) {
    +		struct page *page = sg_page(sg);
    +		void *page_addr = page_address(page);
    +		size_t len = sg->length, l;
    +		size_t offset = sg->offset;
    +
    +		tot_len += len;
    +
    +		/*
    +		 * Each segment must be DMA-aligned and must not reside not on
    +		 * the stack. The last segment may have unaligned length as
    +		 * long as the total length satisfies the DMA padding
    +		 * alignment requirements.
    +		 */
    +		if (i == nents - 1)
    +			l = 0;
    +		else
    +			l = len;
    +		if (((sg->offset | l) & queue_dma_alignment(q)) ||
    +		    (page_addr && object_is_on_stack(page_addr + sg->offset))) {
    +			rq = ERR_PTR(-EINVAL);
    +			goto out_free_bios;
    +		}
    +
    +		while (len > 0) {
    +			size_t bytes;
    +			int rc;
    +
    +			if (need_new_bio) {
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
    +				bio = bio_alloc_bioset(gfp_mask, max_nr_vecs, NULL);
    +				if (bio)
    +					bio->bi_destructor =
    +						bio_kmalloc_destructor;
    +#else
    +				bio = bio_kmalloc(gfp_mask, max_nr_vecs);
    +#endif
    +				if (bio == NULL) {
    +					rq = ERR_PTR(-ENOMEM);
    +					goto out_free_bios;
    +				}
    +
    +				if (!reading)
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
    +					bio->bi_rw |= 1 << BIO_RW;
    +#else
    +					bio->bi_rw |= REQ_WRITE;
    +#endif
    +				bios++;
    +				bio->bi_private = bw;
    +				bio->bi_end_io = blk_bio_map_kern_endio;
    +
    +				if (hbio == NULL)
    +					hbio = bio;
    +				else
    +					tbio->bi_next = bio;
    +				tbio = bio;
    +			}
    +
    +			bytes = min_t(size_t, len, PAGE_SIZE - offset);
    +
    +			rc = bio_add_pc_page(q, bio, page, bytes, offset);
    +			if (rc < bytes) {
    +				if (unlikely(need_new_bio || rc < 0)) {
    +					rq = ERR_PTR(rc < 0 ? rc : -EIO);
    +					goto out_free_bios;
    +				} else {
    +					need_new_bio = true;
    +					len -= rc;
    +					offset += rc;
    +				}
    +			} else {
    +				need_new_bio = false;
    +				offset = 0;
    +				len -= bytes;
    +				page = nth_page(page, 1);
    +			}
    +		}
    +	}
    +
    +	if (hbio == NULL) {
    +		rq = ERR_PTR(-EINVAL);
    +		goto out_free_bios;
    +	}
    +
    +	/* Total length must satisfy DMA padding alignment */
    +	if ((tot_len & q->dma_pad_mask) && bw != NULL) {
    +		rq = ERR_PTR(-EINVAL);
    +		goto out_free_bios;
    +	}
    +
    +	rq = blk_make_request(q, hbio, gfp_mask);
    +	if (unlikely(IS_ERR(rq)))
    +		goto out_free_bios;
    +
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
    +	/*
    +	 * See also patch "block: add blk_rq_set_block_pc()" (commit
    +	 * f27b087b81b7).
    +	 */
    +	rq->cmd_type = REQ_TYPE_BLOCK_PC;
    +#endif
    +
    +	if (bw != NULL) {
    +		atomic_set(&bw->bios_inflight, bios);
    +		rq->cmd_flags |= REQ_COPY_USER;
    +	}
    +
    +out:
    +	return rq;
    +
    +out_free_bios:
    +	while (hbio != NULL) {
    +		bio = hbio;
    +		hbio = hbio->bi_next;
    +		bio_put(bio);
    +	}
    +	goto out;
    +}
    +
    +/**
    + * blk_map_kern_sg - map kernel data to a request for REQ_TYPE_BLOCK_PC
    + * @rq:		request to fill
    + * @sgl:	area to map
    + * @nents:	number of elements in @sgl
    + * @gfp:	memory allocation flags
    + *
    + * Description:
    + *    Data will be mapped directly if possible. Otherwise a bounce
    + *    buffer will be used.
    + */
    +static struct request *blk_map_kern_sg(struct request_queue *q,
    +		struct scatterlist *sgl, int nents, gfp_t gfp, bool reading)
    +{
    +	struct request *rq;
    +
    +	if (!sgl) {
    +		rq = blk_get_request(q, reading ? READ : WRITE, gfp);
    +		if (unlikely(!rq))
    +			return ERR_PTR(-ENOMEM);
    +
    +		rq->cmd_type = REQ_TYPE_BLOCK_PC;
    +		goto out;
    +	}
    +
    +	rq = __blk_map_kern_sg(q, sgl, nents, NULL, gfp, reading);
    +	if (unlikely(IS_ERR(rq))) {
    +		struct blk_kern_sg_work *bw;
    +
    +		bw = blk_copy_kern_sg(q, sgl, nents, gfp, reading);
    +		if (unlikely(IS_ERR(bw))) {
    +			rq = ERR_PTR(PTR_ERR(bw));
    +			goto out;
    +		}
    +
    +		rq = __blk_map_kern_sg(q, bw->sg_table.sgl, bw->sg_table.nents,
    +				       bw, gfp, reading);
    +		if (IS_ERR(rq)) {
    +			blk_free_kern_sg_work(bw);
    +			goto out;
    +		}
    +	}
    +
    +out:
    +	return rq;
    +}
    +#endif
     
     /*
      * Can switch to the next dst_sg element, so, to copy to strictly only
    @@ -6375,15 +6740,20 @@ out:
     	return res;
     }
     
    -#endif /* !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
    -
    -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     static void scsi_end_async(struct request *req, int error)
     {
     	struct scsi_io_context *sioc = req->end_io_data;
     
     	TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
     
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
    +	lockdep_assert_held(req->q->queue_lock);
    +#else
    +	if (!req->q->mq_ops)
    +		lockdep_assert_held(req->q->queue_lock);
    +#endif
    +
     	if (sioc->done)
     #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
     		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
    @@ -6410,7 +6780,7 @@ int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
     	struct request_queue *q = cmd->dev->scsi_dev->request_queue;
     	struct request *rq;
     	struct scsi_io_context *sioc;
    -	int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
    +	bool reading = !(cmd->data_direction & SCST_DATA_WRITE);
     	gfp_t gfp = cmd->cmd_gfp_mask;
     	int cmd_len = cmd->cdb_len;
     
    @@ -6420,54 +6790,38 @@ int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
     		goto out;
     	}
     
    -	rq = blk_get_request(q, write, gfp);
    -	if (rq == NULL) {
    -		res = -ENOMEM;
    -		goto out_free_sioc;
    -	}
    -
    -	rq->cmd_type = REQ_TYPE_BLOCK_PC;
    -	rq->cmd_flags |= REQ_QUIET;
    -
    -	if (cmd->sg == NULL)
    -		goto done;
    -
     	if (cmd->data_direction == SCST_DATA_BIDI) {
     		struct request *next_rq;
     
     		if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
     			res = -EOPNOTSUPP;
    -			goto out_free_rq;
    +			goto out;
     		}
     
    -		res = blk_rq_map_kern_sg(rq, cmd->out_sg, cmd->out_sg_cnt, gfp);
    -		if (res != 0) {
    -			TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
    -			goto out_free_rq;
    +		rq = blk_map_kern_sg(q, cmd->out_sg, cmd->out_sg_cnt, gfp,
    +				     reading);
    +		if (IS_ERR(rq)) {
    +			res = PTR_ERR(rq);
    +			TRACE_DBG("blk_map_kern_sg() failed: %d", res);
    +			goto out;
     		}
     
    -		next_rq = blk_get_request(q, READ, gfp);
    -		if (next_rq == NULL) {
    -			res = -ENOMEM;
    +		next_rq = blk_map_kern_sg(q, cmd->sg, cmd->sg_cnt, gfp, false);
    +		if (IS_ERR(next_rq)) {
    +			res = PTR_ERR(next_rq);
    +			TRACE_DBG("blk_map_kern_sg() failed: %d", res);
     			goto out_free_unmap;
     		}
     		rq->next_rq = next_rq;
    -		next_rq->cmd_type = rq->cmd_type;
    -
    -		res = blk_rq_map_kern_sg(next_rq, cmd->sg, cmd->sg_cnt, gfp);
    -		if (res != 0) {
    -			TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
    -			goto out_free_unmap;
    -		}
     	} else {
    -		res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
    -		if (res != 0) {
    -			TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
    -			goto out_free_rq;
    +		rq = blk_map_kern_sg(q, cmd->sg, cmd->sg_cnt, gfp, reading);
    +		if (IS_ERR(rq)) {
    +			res = PTR_ERR(rq);
    +			TRACE_DBG("blk_map_kern_sg() failed: %d", res);
    +			goto out;
     		}
     	}
     
    -done:
     	TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
     
     	sioc->data = data;
    @@ -6485,6 +6839,7 @@ done:
     	rq->timeout = cmd->timeout;
     	rq->retries = cmd->retries;
     	rq->end_io_data = sioc;
    +	rq->cmd_flags |= REQ_QUIET;
     
     	blk_execute_rq_nowait(rq->q, NULL, rq,
     		(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
    @@ -6492,22 +6847,23 @@ out:
     	return res;
     
     out_free_unmap:
    -	if (rq->next_rq != NULL) {
    -		blk_put_request(rq->next_rq);
    -		rq->next_rq = NULL;
    +	{
    +	struct bio *bio = rq->bio, *b;
    +
    +	while (bio) {
    +		b = bio;
    +		bio = bio->bi_next;
    +		b->bi_end_io(b, res);
     	}
    -	blk_rq_unmap_kern_sg(rq, res);
    +	}
    +	rq->bio = NULL;
     
    -out_free_rq:
     	blk_put_request(rq);
    -
    -out_free_sioc:
    -	kmem_cache_free(scsi_io_context_cache, sioc);
     	goto out;
     }
     EXPORT_SYMBOL(scst_scsi_exec_async);
     
    -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
    +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
     
     /**
      * scst_copy_sg() - copy data between the command's SGs
    diff --git a/scst/src/scst_main.c b/scst/src/scst_main.c
    index 454c4b3f4..093905358 100644
    --- a/scst/src/scst_main.c
    +++ b/scst/src/scst_main.c
    @@ -47,18 +47,13 @@ option or use a 64-bit configuration instead. See README file for \
     details.
     #endif
     
    -#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
    -#if !defined(CONFIG_SCST_STRICT_SERIALIZING)
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) && \
    +	!defined(SCSI_EXEC_REQ_FIFO_DEFINED) &&	     \
    +	!defined(CONFIG_SCST_STRICT_SERIALIZING)
     #warning Patch scst_exec_req_fifo- was not applied on \
     your kernel and CONFIG_SCST_STRICT_SERIALIZING is not defined. \
     Pass-through dev handlers will not work.
    -#endif /* !defined(CONFIG_SCST_STRICT_SERIALIZING) */
    -#else  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
    -#warning Patch scst_exec_req_fifo- was not applied on \
    -your kernel. Pass-through dev handlers will not work.
    -#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
    -#endif /* !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
    +#endif
     
     /**
      ** SCST global variables. They are all uninitialized to have their layout in
    @@ -1607,26 +1602,18 @@ int __scst_register_dev_driver(struct scst_dev_type *dev_type,
     	if (res != 0)
     		goto out;
     
    -#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) && \
    +	!defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
    +	!defined(CONFIG_SCST_STRICT_SERIALIZING)
     	if (dev_type->exec == NULL) {
    -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
    -#if !defined(CONFIG_SCST_STRICT_SERIALIZING)
     		PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
     			"supported. Consider applying on your kernel patch "
     			"scst_exec_req_fifo- or define "
     			"CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
     		res = -EINVAL;
     		goto out;
    -#endif /* !defined(CONFIG_SCST_STRICT_SERIALIZING) */
    -#else  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
    -		PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
    -			"supported. Consider applying on your kernel patch "
    -			"scst_exec_req_fifo-", dev_type->name);
    -		res = -EINVAL;
    -		goto out;
    -#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
     	}
    -#endif /* !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
    +#endif
     
     #ifdef CONFIG_SCST_PROC
     	res = scst_suspend_activity(SCST_SUSPEND_TIMEOUT_USER);
    diff --git a/scst/src/scst_priv.h b/scst/src/scst_priv.h
    index 4f742c8a0..fce62d9af 100644
    --- a/scst/src/scst_priv.h
    +++ b/scst/src/scst_priv.h
    @@ -412,15 +412,6 @@ static inline int scst_exec_req(struct scsi_device *sdev,
     	    (void *)sgl, bufflen, nents, timeout, retries, privdata, done, gfp);
     #endif
     }
    -#else /* i.e. LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
    -#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
    -static inline int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
    -	void (*done)(void *data, char *sense, int result, int resid))
    -{
    -	WARN_ON_ONCE(1);
    -	return -1;
    -}
    -#endif
     #endif
     
     int scst_alloc_space(struct scst_cmd *cmd);
    diff --git a/scst/src/scst_targ.c b/scst/src/scst_targ.c
    index 689d979a2..8980b9b5e 100644
    --- a/scst/src/scst_targ.c
    +++ b/scst/src/scst_targ.c
    @@ -1778,14 +1778,16 @@ static inline enum scst_exec_context scst_optimize_post_exec_context(
      */
     void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid)
     {
    -	struct scst_cmd *cmd;
    +	struct scst_cmd *cmd = data;
     
     	TRACE_ENTRY();
     
    -	cmd = (struct scst_cmd *)data;
     	if (cmd == NULL)
     		goto out;
     
    +	TRACE_DBG("cmd %p; CDB[0/%d] %#x: result %d; resid %d", cmd,
    +		  cmd->cdb_len, cmd->cdb[0], result, resid);
    +
     	scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
     
     	cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
    @@ -2987,10 +2989,12 @@ static int scst_do_real_exec(struct scst_cmd *cmd)
     		sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
     	}
     
    -	TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
    -
     	scsi_dev = dev->scsi_dev;
     
    +	TRACE_DBG("Sending cmd %p to SCSI mid-level dev %d:%d:%d:%lld", cmd,
    +		  scsi_dev->host->host_no, scsi_dev->channel, scsi_dev->id,
    +		  (u64)scsi_dev->lun);
    +
     	if (unlikely(scsi_dev == NULL)) {
     		PRINT_ERROR("Command for virtual device must be "
     			"processed by device handler (LUN %lld)!",