diff --git a/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt b/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt index cee8e5026..a2549013a 100644 --- a/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt +++ b/iscsi-scst/doc/SCST_Gentoo_HOWTO.txt @@ -40,7 +40,6 @@ work. cd /usr/src/linux-2.6.39-gentoo-r3 patch -p1 < /root/scst/iscsi-scst/kernel/patches/put_page_callback-2.6.39.patch - patch -p1 < /root/scst/scst/kernel/scst_exec_req_fifo-2.6.39.patch make clean diff --git a/iscsi-scst/doc/iscsi-scst-howto.txt b/iscsi-scst/doc/iscsi-scst-howto.txt index 97271f74f..7b9b5fffc 100644 --- a/iscsi-scst/doc/iscsi-scst-howto.txt +++ b/iscsi-scst/doc/iscsi-scst-howto.txt @@ -21,7 +21,6 @@ the example below): cd /usr/src/kernels/linux-2.6.38.8 patch -p1 < $HOME/scst/iscsi-scst/kernel/patches/put_page_callback-2.6.38.patch - patch -p1 < $HOME/scst/scst/kernel/scst_exec_req_fifo-2.6.38.patch make clean Next, build and install the kernel: diff --git a/qla2x00t/doc/qla2x00t-howto.html b/qla2x00t/doc/qla2x00t-howto.html index 66b418723..4ed582fd4 100644 --- a/qla2x00t/doc/qla2x00t-howto.html +++ b/qla2x00t/doc/qla2x00t-howto.html @@ -80,13 +80,6 @@ Instructions for obtaining a distribution-specific kernel source tree vary. An e [root@proj src ]# tar xjf linux-source-`uname -r`.tar.bz2 -
[root@proj src ]# ln -s linux-3.11 linux -[root@proj src ]# cd linux -[root@proj linux ]# patch -p1 < /root/scst/scst/kernel/scst_exec_req_fifo-3.11.patch-
[root@proj linux ]# pwd
/usr/src/linux
diff --git a/scripts/generate-kernel-patch b/scripts/generate-kernel-patch
index 90d96eabd..dbcf623fe 100755
--- a/scripts/generate-kernel-patch
+++ b/scripts/generate-kernel-patch
@@ -282,6 +282,7 @@ for p in scst/kernel/*-${kver}.patch \
echo iscsi-scst/kernel/patches/*-${kver}.patch;
fi)
do
+ [ -e "$p" ] || continue
# Exclude the put_page_callback patch when command-line option -u has been
# specified since the current approach is not considered acceptable for
# upstream kernel inclusion. See also http://lkml.org/lkml/2008/12/11/213.
diff --git a/scripts/rebuild-rhel-kernel-rpm b/scripts/rebuild-rhel-kernel-rpm
index df8569a2f..5e92901df 100755
--- a/scripts/rebuild-rhel-kernel-rpm
+++ b/scripts/rebuild-rhel-kernel-rpm
@@ -220,11 +220,7 @@ cd SPECS
log "Copying SCST patches to the SOURCES directory"
cd ${rpmbuild_dir}/SOURCES
-copy_best_matching_patch $scst_dir/scst/kernel/rhel/scst_exec_req_fifo scst_exec_req_fifo.patch ||
-{
- echo "No matching scst_exec_req_fifo patch found for kernel version $kver";
- exit 1;
-}
+copy_best_matching_patch $scst_dir/scst/kernel/rhel/scst_exec_req_fifo scst_exec_req_fifo.patch
copy_best_matching_patch $scst_dir/iscsi-scst/kernel/patches/rhel/put_page_callback put_page_callback.patch ||
{
echo "No matching put_page_callback patch found for kernel version $kver";
@@ -300,7 +296,7 @@ diff -u SPECS/kernel.spec{.orig,}
Source82: config-s390x-debug
Source83: config-s390x-debug-rhel
-+Patch200: scst_exec_req_fifo.patch
++#Patch200: scst_exec_req_fifo.patch
+Patch201: put_page_callback.patch
+
# empty final patch file to facilitate testing of kernel patches
@@ -310,7 +306,7 @@ diff -u SPECS/kernel.spec{.orig,}
# Dynamically generate kernel .config files from config-* files
make -f %{SOURCE20} VERSION=%{version} configs
-+ApplyPatch scst_exec_req_fifo.patch
++#ApplyPatch scst_exec_req_fifo.patch
+ApplyPatch put_page_callback.patch
+
ApplyOptionalPatch linux-kernel-test.patch
@@ -339,7 +335,7 @@ diff -u SPECS/kernel.spec{.orig,}
Source82: config-generic
Source83: config-x86_64-debug-rhel
-+Patch200: scst_exec_req_fifo.patch
++#Patch200: scst_exec_req_fifo.patch
+Patch201: put_page_callback.patch
+
# empty final patch file to facilitate testing of kernel patches
@@ -349,7 +345,7 @@ diff -u SPECS/kernel.spec{.orig,}
# Dynamically generate kernel .config files from config-* files
make -f %{SOURCE20} VERSION=%{version} configs
-+ApplyPatch scst_exec_req_fifo.patch
++#ApplyPatch scst_exec_req_fifo.patch
+ApplyPatch put_page_callback.patch
+
ApplyOptionalPatch linux-kernel-test.patch
@@ -375,7 +371,7 @@ diff -u SPECS/kernel.spec{.orig,}
Source85: config-powerpc64-debug-rhel
Source86: config-s390x-debug-rhel
-+Patch200: scst_exec_req_fifo.patch
++#Patch200: scst_exec_req_fifo.patch
+Patch201: put_page_callback.patch
+
# empty final patch file to facilitate testing of kernel patches
@@ -385,7 +381,7 @@ diff -u SPECS/kernel.spec{.orig,}
# Dynamically generate kernel .config files from config-* files
make -f %{SOURCE20} VERSION=%{version} configs
-+ApplyPatch scst_exec_req_fifo.patch
++#ApplyPatch scst_exec_req_fifo.patch
+ApplyPatch put_page_callback.patch
+
ApplyOptionalPatch linux-kernel-test.patch
@@ -418,7 +414,7 @@ patch -p1 ${rpmbuild_dir}/SPECS/kernel.spec <<'EOF' || exit $?
Source2000: cpupower.service
Source2001: cpupower.config
-+Patch200: scst_exec_req_fifo.patch
++#Patch200: scst_exec_req_fifo.patch
+Patch201: put_page_callback.patch
+
# empty final patch to facilitate testing of kernel patches
@@ -428,7 +424,7 @@ patch -p1 ${rpmbuild_dir}/SPECS/kernel.spec <<'EOF' || exit $?
# Drop some necessary files from the source dir into the buildroot
cp $RPM_SOURCE_DIR/kernel-%{version}-*.config .
-+ApplyPatch scst_exec_req_fifo.patch
++#ApplyPatch scst_exec_req_fifo.patch
+ApplyPatch put_page_callback.patch
+
ApplyOptionalPatch linux-kernel-test.patch
diff --git a/scst/README b/scst/README
index ac19e55ce..b7eff48e0 100644
--- a/scst/README
+++ b/scst/README
@@ -70,27 +70,13 @@ following patches for the kernel in the "kernel" subdirectory. All of
them are optional, so, if you don't need the corresponding
functionality, you may not apply them.
-1. scst_exec_req_fifo-2.6.X.patch. This patch is necessary for
-pass-through dev handlers, because in the mainstream kernels
-scsi_do_req()/scsi_execute_async() work in LIFO order, instead of
-expected and required FIFO. So SCST needs new functions
-scsi_do_req_fifo() or scsi_execute_async_fifo() to be added in the
-kernel. This patch does that. You may not patch the kernel if you don't
-need the pass-through support. Alternatively, you can define
-CONFIG_SCST_STRICT_SERIALIZING compile option during the compilation
-(see description below). Unfortunately, the CONFIG_SCST_STRICT_SERIALIZING
-trick doesn't work on kernels starting from 2.6.30, because those
-kernels don't have the required functionality (scsi_execute_async())
-anymore. So, on them to have pass-through working you have to apply
-scst_exec_req_fifo-2.6.X.patch.
-
-2. readahead-2.6.X.patch. This patch fixes problem in Linux readahead
+1. readahead-2.6.X.patch. This patch fixes problem in Linux readahead
subsystem and greatly improves performance for software RAIDs. See
http://sourceforge.net/mailarchive/forum.php?thread_name=a0272b440906030714g67eabc5k8f847fb1e538cc62%40mail.gmail.com&forum_name=scst-devel
thread for more details. It is included in the mainstream kernels 2.6.33
and 2.6.32.11.
-3. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
+2. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
of the context readahead patch http://lkml.org/lkml/2009/4/12/9, big
thanks to Wu Fengguang. This is a performance improvement patch. It is
included in the mainstream kernel 2.6.31.
diff --git a/scst/include/scst.h b/scst/include/scst.h
index a79985bce..36207ed32 100644
--- a/scst/include/scst.h
+++ b/scst/include/scst.h
@@ -4856,7 +4856,7 @@ void scst_init_threads(struct scst_cmd_threads *cmd_threads);
void scst_deinit_threads(struct scst_cmd_threads *cmd_threads);
void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
void (*done)(void *data, char *sense, int result, int resid));
#endif
diff --git a/scst/kernel/rhel/scst_exec_req_fifo-2.6.32.patch b/scst/kernel/rhel/scst_exec_req_fifo-2.6.32.patch
deleted file mode 100644
index bcb3b02e2..000000000
--- a/scst/kernel/rhel/scst_exec_req_fifo-2.6.32.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -upkr linux-2.6.32/block/blk-map.c linux-2.6.32/block/blk-map.c
---- linux-2.6.32/block/blk-map.c 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/block/blk-map.c 2011-05-17 20:56:18.341812997 -0400
-@@ -5,6 +5,7 @@
- #include
- #include
- #include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res = 0;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.32/include/linux/blkdev.h linux-2.6.32/include/linux/blkdev.h
---- linux-2.6.32/include/linux/blkdev.h 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/include/linux/blkdev.h 2009-12-16 07:21:35.000000000 -0500
-@@ -708,6 +708,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -812,6 +814,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.32/include/linux/scatterlist.h linux-2.6.32/include/linux/scatterlist.h
---- linux-2.6.32/include/linux/scatterlist.h 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/include/linux/scatterlist.h 2009-12-16 07:21:35.000000000 -0500
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.32/lib/scatterlist.c linux-2.6.32/lib/scatterlist.c
---- linux-2.6.32/lib/scatterlist.c 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/lib/scatterlist.c 2009-12-16 07:21:35.000000000 -0500
-@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-121.patch b/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-121.patch
deleted file mode 120000
index 6a3acd053..000000000
--- a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-121.patch
+++ /dev/null
@@ -1 +0,0 @@
-../scst_exec_req_fifo-3.10.patch
\ No newline at end of file
diff --git a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-123.patch b/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-123.patch
deleted file mode 100644
index d60ddd1de..000000000
--- a/scst/kernel/rhel/scst_exec_req_fifo-3.10.0-123.patch
+++ /dev/null
@@ -1,524 +0,0 @@
-diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/block/blk-map.c ./block/blk-map.c
---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/block/blk-map.c 2014-07-16 20:25:31.000000000 +0200
-+++ ./block/blk-map.c 2014-08-07 09:09:11.751302961 +0200
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/blkdev.h ./include/linux/blkdev.h
---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/blkdev.h 2014-07-16 20:25:31.000000000 +0200
-+++ ./include/linux/blkdev.h 2014-08-07 09:09:11.751302961 +0200
-@@ -719,6 +719,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -838,6 +840,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/scatterlist.h ./include/linux/scatterlist.h
---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/scatterlist.h 2014-07-16 20:25:31.000000000 +0200
-+++ ./include/linux/scatterlist.h 2014-08-07 09:09:11.751302961 +0200
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -244,6 +245,9 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/lib/scatterlist.c ./lib/scatterlist.c
---- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/lib/scatterlist.c 2014-07-16 20:25:31.000000000 +0200
-+++ ./lib/scatterlist.c 2014-08-07 09:09:11.751302961 +0200
-@@ -628,3 +628,126 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.30.patch b/scst/kernel/scst_exec_req_fifo-2.6.30.patch
deleted file mode 100644
index 9b9cb78e1..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.30.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -upkr linux-2.6.30/block/blk-map.c linux-2.6.30/block/blk-map.c
---- linux-2.6.30/block/blk-map.c 2009-06-09 23:05:27.000000000 -0400
-+++ linux-2.6.30/block/blk-map.c 2011-05-17 21:03:29.661813000 -0400
-@@ -5,6 +5,7 @@
- #include
- #include
- #include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -272,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = rq->data = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = rq->data = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = 0;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.30/include/linux/blkdev.h linux-2.6.30/include/linux/blkdev.h
---- linux-2.6.30/include/linux/blkdev.h 2009-06-09 23:05:27.000000000 -0400
-+++ linux-2.6.30/include/linux/blkdev.h 2009-08-12 11:48:06.000000000 -0400
-@@ -704,6 +704,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -807,6 +809,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.30/include/linux/scatterlist.h linux-2.6.30/include/linux/scatterlist.h
---- linux-2.6.30/include/linux/scatterlist.h 2009-06-09 23:05:27.000000000 -0400
-+++ linux-2.6.30/include/linux/scatterlist.h 2009-08-12 11:50:02.000000000 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.30/lib/scatterlist.c linux-2.6.30/lib/scatterlist.c
---- linux-2.6.30/lib/scatterlist.c 2009-06-09 23:05:27.000000000 -0400
-+++ linux-2.6.30/lib/scatterlist.c 2009-08-12 11:56:04.000000000 -0400
-@@ -485,3 +485,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.31.patch b/scst/kernel/scst_exec_req_fifo-2.6.31.patch
deleted file mode 100644
index 0d9c06e6d..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.31.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -upkr linux-2.6.31/block/blk-map.c linux-2.6.31/block/blk-map.c
---- linux-2.6.31/block/blk-map.c 2009-09-09 18:13:59.000000000 -0400
-+++ linux-2.6.31/block/blk-map.c 2011-05-17 21:05:32.669812993 -0400
-@@ -5,6 +5,7 @@
- #include
- #include
- #include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.31/include/linux/blkdev.h linux-2.6.31/include/linux/blkdev.h
---- linux-2.6.31/include/linux/blkdev.h 2009-09-09 18:13:59.000000000 -0400
-+++ linux-2.6.31/include/linux/blkdev.h 2009-09-23 06:17:33.000000000 -0400
-@@ -699,6 +699,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -803,6 +805,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.31/include/linux/scatterlist.h linux-2.6.31/include/linux/scatterlist.h
---- linux-2.6.31/include/linux/scatterlist.h 2009-09-09 18:13:59.000000000 -0400
-+++ linux-2.6.31/include/linux/scatterlist.h 2009-09-23 06:17:33.000000000 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.31/lib/scatterlist.c linux-2.6.31/lib/scatterlist.c
---- linux-2.6.31/lib/scatterlist.c 2009-09-09 18:13:59.000000000 -0400
-+++ linux-2.6.31/lib/scatterlist.c 2009-09-23 06:17:33.000000000 -0400
-@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.32.patch b/scst/kernel/scst_exec_req_fifo-2.6.32.patch
deleted file mode 100644
index bc0171019..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.32.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -upkr linux-2.6.32/block/blk-map.c linux-2.6.32/block/blk-map.c
---- linux-2.6.32/block/blk-map.c 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/block/blk-map.c 2011-05-17 20:56:18.341812997 -0400
-@@ -5,6 +5,7 @@
- #include
- #include
- #include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.32/include/linux/blkdev.h linux-2.6.32/include/linux/blkdev.h
---- linux-2.6.32/include/linux/blkdev.h 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/include/linux/blkdev.h 2009-12-16 07:21:35.000000000 -0500
-@@ -708,6 +708,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -812,6 +814,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.32/include/linux/scatterlist.h linux-2.6.32/include/linux/scatterlist.h
---- linux-2.6.32/include/linux/scatterlist.h 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/include/linux/scatterlist.h 2009-12-16 07:21:35.000000000 -0500
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.32/lib/scatterlist.c linux-2.6.32/lib/scatterlist.c
---- linux-2.6.32/lib/scatterlist.c 2009-12-02 22:51:21.000000000 -0500
-+++ linux-2.6.32/lib/scatterlist.c 2009-12-16 07:21:35.000000000 -0500
-@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.33.patch b/scst/kernel/scst_exec_req_fifo-2.6.33.patch
deleted file mode 100644
index fee571fce..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.33.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -upkr linux-2.6.33/block/blk-map.c linux-2.6.33/block/blk-map.c
---- linux-2.6.33/block/blk-map.c 2010-02-24 13:52:17.000000000 -0500
-+++ linux-2.6.33/block/blk-map.c 2011-05-17 21:09:00.317812998 -0400
-@@ -5,6 +5,7 @@
- #include
- #include
- #include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.33/include/linux/blkdev.h linux-2.6.33/include/linux/blkdev.h
---- linux-2.6.33/include/linux/blkdev.h 2010-02-24 13:52:17.000000000 -0500
-+++ linux-2.6.33/include/linux/blkdev.h 2010-03-01 07:41:59.000000000 -0500
-@@ -710,6 +710,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -825,6 +827,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.33/include/linux/scatterlist.h linux-2.6.33/include/linux/scatterlist.h
---- linux-2.6.33/include/linux/scatterlist.h 2010-02-24 13:52:17.000000000 -0500
-+++ linux-2.6.33/include/linux/scatterlist.h 2010-03-01 07:41:59.000000000 -0500
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.33/lib/scatterlist.c linux-2.6.33/lib/scatterlist.c
---- linux-2.6.33/lib/scatterlist.c 2010-02-24 13:52:17.000000000 -0500
-+++ linux-2.6.33/lib/scatterlist.c 2010-03-01 07:41:59.000000000 -0500
-@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.34.patch b/scst/kernel/scst_exec_req_fifo-2.6.34.patch
deleted file mode 100644
index c7021f573..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.34.patch
+++ /dev/null
@@ -1,530 +0,0 @@
-diff -upkr linux-2.6.34/block/blk-map.c linux-2.6.34/block/blk-map.c
---- linux-2.6.34/block/blk-map.c 2010-05-16 17:17:36.000000000 -0400
-+++ linux-2.6.34/block/blk-map.c 2011-05-17 21:10:43.745812995 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.34/include/linux/blkdev.h linux-2.6.34/include/linux/blkdev.h
---- linux-2.6.34/include/linux/blkdev.h 2010-05-16 17:17:36.000000000 -0400
-+++ linux-2.6.34/include/linux/blkdev.h 2010-05-24 06:51:22.000000000 -0400
-@@ -713,6 +713,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -828,6 +830,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.34/include/linux/scatterlist.h linux-2.6.34/include/linux/scatterlist.h
---- linux-2.6.34/include/linux/scatterlist.h 2010-05-16 17:17:36.000000000 -0400
-+++ linux-2.6.34/include/linux/scatterlist.h 2010-05-24 06:51:22.000000000 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.34/lib/scatterlist.c linux-2.6.34/lib/scatterlist.c
---- linux-2.6.34/lib/scatterlist.c 2010-05-16 17:17:36.000000000 -0400
-+++ linux-2.6.34/lib/scatterlist.c 2010-05-24 06:51:22.000000000 -0400
-@@ -494,3 +494,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.35.patch b/scst/kernel/scst_exec_req_fifo-2.6.35.patch
deleted file mode 100644
index b10ae1b5a..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.35.patch
+++ /dev/null
@@ -1,530 +0,0 @@
-diff -upkr linux-2.6.35/block/blk-map.c linux-2.6.35/block/blk-map.c
---- linux-2.6.35/block/blk-map.c 2010-08-01 18:11:14.000000000 -0400
-+++ linux-2.6.35/block/blk-map.c 2011-05-17 21:12:23.125813000 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= 1 << BIO_RW;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.35/include/linux/blkdev.h linux-2.6.35/include/linux/blkdev.h
---- linux-2.6.35/include/linux/blkdev.h 2010-08-01 18:11:14.000000000 -0400
-+++ linux-2.6.35/include/linux/blkdev.h 2010-08-04 04:21:59.737128732 -0400
-@@ -717,6 +717,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -832,6 +834,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.35/include/linux/scatterlist.h linux-2.6.35/include/linux/scatterlist.h
---- linux-2.6.35/include/linux/scatterlist.h 2010-08-01 18:11:14.000000000 -0400
-+++ linux-2.6.35/include/linux/scatterlist.h 2010-08-04 04:21:59.741129485 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.35/lib/scatterlist.c linux-2.6.35/lib/scatterlist.c
---- linux-2.6.35/lib/scatterlist.c 2010-08-01 18:11:14.000000000 -0400
-+++ linux-2.6.35/lib/scatterlist.c 2010-08-04 04:21:59.741129485 -0400
-@@ -494,3 +494,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.36.patch b/scst/kernel/scst_exec_req_fifo-2.6.36.patch
deleted file mode 100644
index d90bdcb8e..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.36.patch
+++ /dev/null
@@ -1,532 +0,0 @@
-diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
---- linux-2.6.36/block/blk-map.c 2010-10-20 16:30:22.000000000 -0400
-+++ linux-2.6.36/block/blk-map.c 2011-05-17 21:13:42.301812997 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -271,6 +273,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.36/include/linux/blkdev.h linux-2.6.36/include/linux/blkdev.h
---- linux-2.6.36/include/linux/blkdev.h 2010-10-20 16:30:22.000000000 -0400
-+++ linux-2.6.36/include/linux/blkdev.h 2010-10-26 04:00:15.899759399 -0400
-@@ -629,6 +629,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.36/include/linux/scatterlist.h linux-2.6.36/include/linux/scatterlist.h
---- linux-2.6.36/include/linux/scatterlist.h 2010-10-20 16:30:22.000000000 -0400
-+++ linux-2.6.36/include/linux/scatterlist.h 2010-10-26 04:00:15.899759399 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.36/lib/scatterlist.c linux-2.6.36/lib/scatterlist.c
---- linux-2.6.36/lib/scatterlist.c 2010-10-20 16:30:22.000000000 -0400
-+++ linux-2.6.36/lib/scatterlist.c 2010-10-26 04:00:15.899759399 -0400
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.37.patch b/scst/kernel/scst_exec_req_fifo-2.6.37.patch
deleted file mode 100644
index ab94c18ae..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.37.patch
+++ /dev/null
@@ -1,532 +0,0 @@
-diff -upkr linux-2.6.37/block/blk-map.c linux-2.6.37/block/blk-map.c
---- linux-2.6.37/block/blk-map.c 2011-01-04 19:50:19.000000000 -0500
-+++ linux-2.6.37/block/blk-map.c 2011-05-17 21:15:14.329812999 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.37/include/linux/blkdev.h linux-2.6.37/include/linux/blkdev.h
---- linux-2.6.37/include/linux/blkdev.h 2011-01-04 19:50:19.000000000 -0500
-+++ linux-2.6.37/include/linux/blkdev.h 2011-01-08 08:45:54.350430208 -0500
-@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.37/include/linux/scatterlist.h linux-2.6.37/include/linux/scatterlist.h
---- linux-2.6.37/include/linux/scatterlist.h 2011-01-04 19:50:19.000000000 -0500
-+++ linux-2.6.37/include/linux/scatterlist.h 2011-01-08 08:45:54.354431761 -0500
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.37/lib/scatterlist.c linux-2.6.37/lib/scatterlist.c
---- linux-2.6.37/lib/scatterlist.c 2011-01-04 19:50:19.000000000 -0500
-+++ linux-2.6.37/lib/scatterlist.c 2011-01-08 08:45:54.401930472 -0500
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.38.patch b/scst/kernel/scst_exec_req_fifo-2.6.38.patch
deleted file mode 100644
index 4561ba2e9..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.38.patch
+++ /dev/null
@@ -1,532 +0,0 @@
-diff -upkr linux-2.6.38/block/blk-map.c linux-2.6.38/block/blk-map.c
---- linux-2.6.38/block/blk-map.c 2011-03-14 21:20:32.000000000 -0400
-+++ linux-2.6.38/block/blk-map.c 2011-05-11 22:07:37.589813000 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.38/include/linux/blkdev.h linux-2.6.38/include/linux/blkdev.h
---- linux-2.6.38/include/linux/blkdev.h 2011-03-14 21:20:32.000000000 -0400
-+++ linux-2.6.38/include/linux/blkdev.h 2011-03-18 10:19:00.000000000 -0400
-@@ -593,6 +593,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.38/include/linux/scatterlist.h linux-2.6.38/include/linux/scatterlist.h
---- linux-2.6.38/include/linux/scatterlist.h 2011-03-14 21:20:32.000000000 -0400
-+++ linux-2.6.38/include/linux/scatterlist.h 2011-03-18 10:19:00.000000000 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.38/lib/scatterlist.c linux-2.6.38/lib/scatterlist.c
---- linux-2.6.38/lib/scatterlist.c 2011-03-14 21:20:32.000000000 -0400
-+++ linux-2.6.38/lib/scatterlist.c 2011-03-18 10:46:41.000000000 -0400
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.39.patch b/scst/kernel/scst_exec_req_fifo-2.6.39.patch
deleted file mode 100644
index 7ecca2958..000000000
--- a/scst/kernel/scst_exec_req_fifo-2.6.39.patch
+++ /dev/null
@@ -1,532 +0,0 @@
-diff -upkr linux-2.6.39/block/blk-map.c linux-2.6.39/block/blk-map.c
---- linux-2.6.39/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
-+++ linux-2.6.39/block/blk-map.c 2011-05-19 10:49:02.753812997 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-2.6.39/include/linux/blkdev.h linux-2.6.39/include/linux/blkdev.h
---- linux-2.6.39/include/linux/blkdev.h 2011-05-19 00:06:34.000000000 -0400
-+++ linux-2.6.39/include/linux/blkdev.h 2011-05-19 10:49:02.753812997 -0400
-@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -707,6 +709,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.39/include/linux/scatterlist.h linux-2.6.39/include/linux/scatterlist.h
---- linux-2.6.39/include/linux/scatterlist.h 2011-05-19 00:06:34.000000000 -0400
-+++ linux-2.6.39/include/linux/scatterlist.h 2011-05-19 10:49:02.753812997 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.39/lib/scatterlist.c linux-2.6.39/lib/scatterlist.c
---- linux-2.6.39/lib/scatterlist.c 2011-05-19 00:06:34.000000000 -0400
-+++ linux-2.6.39/lib/scatterlist.c 2011-05-19 10:49:02.753812997 -0400
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-3.0.patch b/scst/kernel/scst_exec_req_fifo-3.0.patch
deleted file mode 100644
index 998f4a32c..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.0.patch
+++ /dev/null
@@ -1,532 +0,0 @@
-diff -upkr linux-3.0.0-orig/block/blk-map.c linux-3.0.0-scst-dbg/block/blk-map.c
---- linux-3.0.0-orig/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
-+++ linux-3.0.0-scst-dbg/block/blk-map.c 2011-07-22 19:40:27.131230804 -0400
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff -upkr linux-3.0.0-orig/include/linux/blkdev.h linux-3.0.0-scst-dbg/include/linux/blkdev.h
---- linux-3.0.0-orig/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
-+++ linux-3.0.0-scst-dbg/include/linux/blkdev.h 2011-07-22 19:24:27.803231156 -0400
-@@ -594,6 +594,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-3.0.0-orig/include/linux/scatterlist.h linux-3.0.0-scst-dbg/include/linux/scatterlist.h
---- linux-3.0.0-orig/include/linux/scatterlist.h 2011-07-21 22:17:23.000000000 -0400
-+++ linux-3.0.0-scst-dbg/include/linux/scatterlist.h 2011-07-22 19:24:27.803231156 -0400
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff -upkr linux-3.0.0-orig/lib/scatterlist.c linux-3.0.0-scst-dbg/lib/scatterlist.c
---- linux-3.0.0-orig/lib/scatterlist.c 2011-07-21 22:17:23.000000000 -0400
-+++ linux-3.0.0-scst-dbg/lib/scatterlist.c 2011-07-22 19:40:27.131230804 -0400
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
diff --git a/scst/kernel/scst_exec_req_fifo-3.1.patch b/scst/kernel/scst_exec_req_fifo-3.1.patch
deleted file mode 100644
index d6bb5346b..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.1.patch
+++ /dev/null
@@ -1,536 +0,0 @@
-=== modified file 'linux-3.1-scst/block/blk-map.c'
---- linux-3.1-orig/block/blk-map.c 2011-10-26 20:34:50 +0000
-+++ linux-3.1-scst/block/blk-map.c 2011-10-26 20:58:56 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'linux-3.1-scst/include/linux/blkdev.h'
---- linux-3.1-orig/include/linux/blkdev.h 2011-10-26 20:34:50 +0000
-+++ linux-3.1-scst/include/linux/blkdev.h 2011-10-26 20:58:56 +0000
-@@ -599,6 +599,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -714,6 +716,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'linux-3.1-scst/include/linux/scatterlist.h'
---- linux-3.1-orig/include/linux/scatterlist.h 2011-10-26 20:34:50 +0000
-+++ linux-3.1-scst/include/linux/scatterlist.h 2011-10-26 20:58:56 +0000
-@@ -3,6 +3,7 @@
-
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'linux-3.1-scst/lib/scatterlist.c'
---- linux-3.1-orig/lib/scatterlist.c 2011-10-26 20:34:50 +0000
-+++ linux-3.1-scst/lib/scatterlist.c 2011-10-26 20:58:56 +0000
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.10.patch b/scst/kernel/scst_exec_req_fifo-3.10.patch
deleted file mode 100644
index 69fce3a5f..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.10.patch
+++ /dev/null
@@ -1,527 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2013-07-23 02:45:53 +0000
-+++ new/block/blk-map.c 2013-07-23 02:50:11 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2013-07-23 02:45:53 +0000
-+++ new/include/linux/blkdev.h 2013-07-23 02:50:11 +0000
-@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2013-07-23 02:45:53 +0000
-+++ new/include/linux/scatterlist.h 2013-07-23 02:50:11 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -244,6 +245,9 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2013-07-23 02:45:53 +0000
-+++ new/lib/scatterlist.c 2013-07-23 02:50:11 +0000
-@@ -627,3 +627,126 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.11.patch b/scst/kernel/scst_exec_req_fifo-3.11.patch
deleted file mode 100644
index 63b2da453..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.11.patch
+++ /dev/null
@@ -1,528 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2013-09-28 00:14:38 +0000
-+++ new/block/blk-map.c 2013-09-28 00:23:26 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2013-09-28 00:14:38 +0000
-+++ new/include/linux/blkdev.h 2013-09-28 00:23:26 +0000
-@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2013-09-28 00:14:38 +0000
-+++ new/include/linux/scatterlist.h 2013-09-28 00:23:26 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2013-09-28 00:14:38 +0000
-+++ new/lib/scatterlist.c 2013-09-28 00:23:26 +0000
-@@ -716,3 +716,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.12.patch b/scst/kernel/scst_exec_req_fifo-3.12.patch
deleted file mode 100644
index b08d43f3e..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.12.patch
+++ /dev/null
@@ -1,528 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2013-11-30 00:34:22 +0000
-+++ new/block/blk-map.c 2013-11-30 00:39:53 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2013-11-30 00:34:22 +0000
-+++ new/include/linux/blkdev.h 2013-11-30 00:39:53 +0000
-@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2013-11-30 00:34:22 +0000
-+++ new/include/linux/scatterlist.h 2013-11-30 00:39:53 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2013-11-30 00:34:22 +0000
-+++ new/lib/scatterlist.c 2013-11-30 00:39:53 +0000
-@@ -717,3 +717,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.13.patch b/scst/kernel/scst_exec_req_fifo-3.13.patch
deleted file mode 100644
index 84980e46a..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.13.patch
+++ /dev/null
@@ -1,528 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2014-01-30 00:25:53 +0000
-+++ new/block/blk-map.c 2014-01-30 00:44:50 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2014-01-30 00:25:53 +0000
-+++ new/include/linux/blkdev.h 2014-01-30 00:44:50 +0000
-@@ -712,6 +712,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -831,6 +833,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2014-01-30 00:25:53 +0000
-+++ new/include/linux/scatterlist.h 2014-01-30 00:44:50 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2014-01-30 00:25:53 +0000
-+++ new/lib/scatterlist.c 2014-01-30 00:44:50 +0000
-@@ -717,3 +717,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.14.patch b/scst/kernel/scst_exec_req_fifo-3.14.patch
deleted file mode 100644
index 70c47797d..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.14.patch
+++ /dev/null
@@ -1,528 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2014-04-17 22:02:06 +0000
-+++ new/block/blk-map.c 2014-04-17 22:08:48 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2014-04-17 22:02:06 +0000
-+++ new/include/linux/blkdev.h 2014-04-17 22:08:48 +0000
-@@ -705,6 +705,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -825,6 +827,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2014-04-17 22:02:06 +0000
-+++ new/include/linux/scatterlist.h 2014-04-17 22:08:48 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2014-04-17 22:02:06 +0000
-+++ new/lib/scatterlist.c 2014-04-17 22:08:48 +0000
-@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.15.patch b/scst/kernel/scst_exec_req_fifo-3.15.patch
deleted file mode 100644
index 665cc2606..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.15.patch
+++ /dev/null
@@ -1,528 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2014-06-18 01:32:48 +0000
-+++ new/block/blk-map.c 2014-06-18 01:40:34 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2014-06-18 01:32:48 +0000
-+++ new/include/linux/blkdev.h 2014-06-18 01:40:34 +0000
-@@ -717,6 +717,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -837,6 +839,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct sg_iovec *,
- int, unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2014-06-18 01:32:48 +0000
-+++ new/include/linux/scatterlist.h 2014-06-18 01:40:34 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2014-06-18 01:32:48 +0000
-+++ new/lib/scatterlist.c 2014-06-18 01:40:34 +0000
-@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.16.patch b/scst/kernel/scst_exec_req_fifo-3.16.patch
deleted file mode 100644
index a08921920..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.16.patch
+++ /dev/null
@@ -1,524 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2014-08-19 01:00:36 +0000
-+++ new/block/blk-map.c 2014-08-19 01:37:01 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2014-08-19 01:00:36 +0000
-+++ new/include/linux/blkdev.h 2014-08-19 01:06:48 +0000
-@@ -735,6 +735,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -856,6 +858,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct sg_iovec *,
- int, unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2014-08-19 01:00:36 +0000
-+++ new/include/linux/scatterlist.h 2014-08-19 01:06:48 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2014-08-19 01:00:36 +0000
-+++ new/lib/scatterlist.c 2014-08-19 01:06:48 +0000
-@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.17.patch b/scst/kernel/scst_exec_req_fifo-3.17.patch
deleted file mode 100644
index 46215abb0..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.17.patch
+++ /dev/null
@@ -1,524 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2014-11-21 03:17:49 +0000
-+++ new/block/blk-map.c 2014-11-21 03:43:00 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2014-11-21 03:17:49 +0000
-+++ new/include/linux/blkdev.h 2014-11-21 03:43:00 +0000
-@@ -737,6 +737,8 @@ extern unsigned long blk_max_low_pfn, bl
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -858,6 +860,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct sg_iovec *,
- int, unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2014-11-21 03:17:49 +0000
-+++ new/include/linux/scatterlist.h 2014-11-21 03:43:00 +0000
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2014-11-21 03:17:49 +0000
-+++ new/lib/scatterlist.c 2014-11-21 03:43:00 +0000
-@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatter
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.18.patch b/scst/kernel/scst_exec_req_fifo-3.18.patch
deleted file mode 100644
index e64a14fde..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.18.patch
+++ /dev/null
@@ -1,536 +0,0 @@
-Subject: [PATCH] scst_exec_req_fifo
-
----
- block/blk-map.c | 329 ++++++++++++++++++++++++++++++++++++++++++++
- include/linux/blkdev.h | 5 +
- include/linux/scatterlist.h | 4 +
- lib/scatterlist.c | 124 +++++++++++++++++
- 4 files changed, 462 insertions(+)
-
-diff --git a/block/blk-map.c b/block/blk-map.c
-index f890d43..d4b8509 100644
---- a/block/blk-map.c
-+++ b/block/blk-map.c
-@@ -5,6 +5,8 @@
- #include
- #include
- #include
-+#include
-+#include
- #include /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index aac0f9e..5cd3afa 100644
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -731,6 +731,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
- #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
- #define BLK_MIN_SG_TIMEOUT (7 * HZ)
-
-+#define SCSI_EXEC_REQ_FIFO_DEFINED
-+
- #ifdef CONFIG_BOUNCE
- extern int init_emergency_isa_pool(void);
- extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-@@ -852,6 +854,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct sg_iovec *,
- int, unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
-index ed8f9e7..f64e02f 100644
---- a/include/linux/scatterlist.h
-+++ b/include/linux/scatterlist.h
-@@ -8,6 +8,7 @@
- #include
- #include
- #include
-+#include
-
- struct sg_table {
- struct scatterlist *sgl; /* the list */
-@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
- size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-diff --git a/lib/scatterlist.c b/lib/scatterlist.c
-index c9f2e8c..ba693d1 100644
---- a/lib/scatterlist.c
-+++ b/lib/scatterlist.c
-@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
- }
- EXPORT_SYMBOL(sg_pcopy_to_buffer);
-+
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr);
-+ kunmap_atomic(daddr);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
---
-2.1.2
-
diff --git a/scst/kernel/scst_exec_req_fifo-3.2.patch b/scst/kernel/scst_exec_req_fifo-3.2.patch
deleted file mode 100644
index 2b8257ce3..000000000
--- a/scst/kernel/scst_exec_req_fifo-3.2.patch
+++ /dev/null
@@ -1,536 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2012-01-10 22:58:17 +0000
-+++ new/block/blk-map.c 2012-01-10 23:01:21 +0000
-@@ -5,6 +5,8 @@
- #include
- #include
- #include