mirror of
https://github.com/SCST-project/scst.git
synced 2026-05-15 09:41:27 +00:00
scst: Rework SCSI pass-through support for kernel versions >= 2.6.30
Changes in this patch: - Rework the SCSI pass-through code such that for kernel versions >= 2.6.30 the scst_exec_req_fifo patch is no longer needed. - Modify the pass-through code such that blk_rq_append_bio() is only called for kernel version 2.6.30. For later kernel versions blk_make_request() is called instead. - Rework scst_scsi_exec_async(). - Add debug tracing of SCSI pass-through result status. - Add a lockdep_assert_held() call in scsi_end_async(). git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@5979 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
@@ -40,7 +40,6 @@ work.
|
||||
|
||||
cd /usr/src/linux-2.6.39-gentoo-r3
|
||||
patch -p1 < /root/scst/iscsi-scst/kernel/patches/put_page_callback-2.6.39.patch
|
||||
patch -p1 < /root/scst/scst/kernel/scst_exec_req_fifo-2.6.39.patch
|
||||
make clean
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ the example below):
|
||||
|
||||
cd /usr/src/kernels/linux-2.6.38.8
|
||||
patch -p1 < $HOME/scst/iscsi-scst/kernel/patches/put_page_callback-2.6.38.patch
|
||||
patch -p1 < $HOME/scst/scst/kernel/scst_exec_req_fifo-2.6.38.patch
|
||||
make clean
|
||||
|
||||
Next, build and install the kernel:
|
||||
|
||||
@@ -80,13 +80,6 @@ Instructions for obtaining a distribution-specific kernel source tree vary. An e
|
||||
[root@proj src ]# tar xjf linux-source-`uname -r`.tar.bz2</pre>
|
||||
</li>
|
||||
|
||||
<li>
|
||||
Patch the kernel that has just been downloaded:
|
||||
<pre>[root@proj src ]# ln -s linux-3.11 linux
|
||||
[root@proj src ]# cd linux
|
||||
[root@proj linux ]# patch -p1 < /root/scst/scst/kernel/scst_exec_req_fifo-3.11.patch</pre>
|
||||
</li>
|
||||
|
||||
<li>The next step is to configure the kernel:
|
||||
<pre>[root@proj linux ]# pwd
|
||||
/usr/src/linux
|
||||
|
||||
@@ -282,6 +282,7 @@ for p in scst/kernel/*-${kver}.patch \
|
||||
echo iscsi-scst/kernel/patches/*-${kver}.patch;
|
||||
fi)
|
||||
do
|
||||
[ -e "$p" ] || continue
|
||||
# Exclude the put_page_callback patch when command-line option -u has been
|
||||
# specified since the current approach is not considered acceptable for
|
||||
# upstream kernel inclusion. See also http://lkml.org/lkml/2008/12/11/213.
|
||||
|
||||
@@ -220,11 +220,7 @@ cd SPECS
|
||||
log "Copying SCST patches to the SOURCES directory"
|
||||
|
||||
cd ${rpmbuild_dir}/SOURCES
|
||||
copy_best_matching_patch $scst_dir/scst/kernel/rhel/scst_exec_req_fifo scst_exec_req_fifo.patch ||
|
||||
{
|
||||
echo "No matching scst_exec_req_fifo patch found for kernel version $kver";
|
||||
exit 1;
|
||||
}
|
||||
copy_best_matching_patch $scst_dir/scst/kernel/rhel/scst_exec_req_fifo scst_exec_req_fifo.patch
|
||||
copy_best_matching_patch $scst_dir/iscsi-scst/kernel/patches/rhel/put_page_callback put_page_callback.patch ||
|
||||
{
|
||||
echo "No matching put_page_callback patch found for kernel version $kver";
|
||||
@@ -300,7 +296,7 @@ diff -u SPECS/kernel.spec{.orig,}
|
||||
Source82: config-s390x-debug
|
||||
Source83: config-s390x-debug-rhel
|
||||
|
||||
+Patch200: scst_exec_req_fifo.patch
|
||||
+#Patch200: scst_exec_req_fifo.patch
|
||||
+Patch201: put_page_callback.patch
|
||||
+
|
||||
# empty final patch file to facilitate testing of kernel patches
|
||||
@@ -310,7 +306,7 @@ diff -u SPECS/kernel.spec{.orig,}
|
||||
# Dynamically generate kernel .config files from config-* files
|
||||
make -f %{SOURCE20} VERSION=%{version} configs
|
||||
|
||||
+ApplyPatch scst_exec_req_fifo.patch
|
||||
+#ApplyPatch scst_exec_req_fifo.patch
|
||||
+ApplyPatch put_page_callback.patch
|
||||
+
|
||||
ApplyOptionalPatch linux-kernel-test.patch
|
||||
@@ -339,7 +335,7 @@ diff -u SPECS/kernel.spec{.orig,}
|
||||
Source82: config-generic
|
||||
Source83: config-x86_64-debug-rhel
|
||||
|
||||
+Patch200: scst_exec_req_fifo.patch
|
||||
+#Patch200: scst_exec_req_fifo.patch
|
||||
+Patch201: put_page_callback.patch
|
||||
+
|
||||
# empty final patch file to facilitate testing of kernel patches
|
||||
@@ -349,7 +345,7 @@ diff -u SPECS/kernel.spec{.orig,}
|
||||
# Dynamically generate kernel .config files from config-* files
|
||||
make -f %{SOURCE20} VERSION=%{version} configs
|
||||
|
||||
+ApplyPatch scst_exec_req_fifo.patch
|
||||
+#ApplyPatch scst_exec_req_fifo.patch
|
||||
+ApplyPatch put_page_callback.patch
|
||||
+
|
||||
ApplyOptionalPatch linux-kernel-test.patch
|
||||
@@ -375,7 +371,7 @@ diff -u SPECS/kernel.spec{.orig,}
|
||||
Source85: config-powerpc64-debug-rhel
|
||||
Source86: config-s390x-debug-rhel
|
||||
|
||||
+Patch200: scst_exec_req_fifo.patch
|
||||
+#Patch200: scst_exec_req_fifo.patch
|
||||
+Patch201: put_page_callback.patch
|
||||
+
|
||||
# empty final patch file to facilitate testing of kernel patches
|
||||
@@ -385,7 +381,7 @@ diff -u SPECS/kernel.spec{.orig,}
|
||||
# Dynamically generate kernel .config files from config-* files
|
||||
make -f %{SOURCE20} VERSION=%{version} configs
|
||||
|
||||
+ApplyPatch scst_exec_req_fifo.patch
|
||||
+#ApplyPatch scst_exec_req_fifo.patch
|
||||
+ApplyPatch put_page_callback.patch
|
||||
+
|
||||
ApplyOptionalPatch linux-kernel-test.patch
|
||||
@@ -418,7 +414,7 @@ patch -p1 ${rpmbuild_dir}/SPECS/kernel.spec <<'EOF' || exit $?
|
||||
Source2000: cpupower.service
|
||||
Source2001: cpupower.config
|
||||
|
||||
+Patch200: scst_exec_req_fifo.patch
|
||||
+#Patch200: scst_exec_req_fifo.patch
|
||||
+Patch201: put_page_callback.patch
|
||||
+
|
||||
# empty final patch to facilitate testing of kernel patches
|
||||
@@ -428,7 +424,7 @@ patch -p1 ${rpmbuild_dir}/SPECS/kernel.spec <<'EOF' || exit $?
|
||||
# Drop some necessary files from the source dir into the buildroot
|
||||
cp $RPM_SOURCE_DIR/kernel-%{version}-*.config .
|
||||
|
||||
+ApplyPatch scst_exec_req_fifo.patch
|
||||
+#ApplyPatch scst_exec_req_fifo.patch
|
||||
+ApplyPatch put_page_callback.patch
|
||||
+
|
||||
ApplyOptionalPatch linux-kernel-test.patch
|
||||
|
||||
18
scst/README
18
scst/README
@@ -70,27 +70,13 @@ following patches for the kernel in the "kernel" subdirectory. All of
|
||||
them are optional, so, if you don't need the corresponding
|
||||
functionality, you may not apply them.
|
||||
|
||||
1. scst_exec_req_fifo-2.6.X.patch. This patch is necessary for
|
||||
pass-through dev handlers, because in the mainstream kernels
|
||||
scsi_do_req()/scsi_execute_async() work in LIFO order, instead of
|
||||
expected and required FIFO. So SCST needs new functions
|
||||
scsi_do_req_fifo() or scsi_execute_async_fifo() to be added in the
|
||||
kernel. This patch does that. You may not patch the kernel if you don't
|
||||
need the pass-through support. Alternatively, you can define
|
||||
CONFIG_SCST_STRICT_SERIALIZING compile option during the compilation
|
||||
(see description below). Unfortunately, the CONFIG_SCST_STRICT_SERIALIZING
|
||||
trick doesn't work on kernels starting from 2.6.30, because those
|
||||
kernels don't have the required functionality (scsi_execute_async())
|
||||
anymore. So, on them to have pass-through working you have to apply
|
||||
scst_exec_req_fifo-2.6.X.patch.
|
||||
|
||||
2. readahead-2.6.X.patch. This patch fixes problem in Linux readahead
|
||||
1. readahead-2.6.X.patch. This patch fixes problem in Linux readahead
|
||||
subsystem and greatly improves performance for software RAIDs. See
|
||||
http://sourceforge.net/mailarchive/forum.php?thread_name=a0272b440906030714g67eabc5k8f847fb1e538cc62%40mail.gmail.com&forum_name=scst-devel
|
||||
thread for more details. It is included in the mainstream kernels 2.6.33
|
||||
and 2.6.32.11.
|
||||
|
||||
3. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
|
||||
2. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
|
||||
of the context readahead patch http://lkml.org/lkml/2009/4/12/9, big
|
||||
thanks to Wu Fengguang. This is a performance improvement patch. It is
|
||||
included in the mainstream kernel 2.6.31.
|
||||
|
||||
@@ -4856,7 +4856,7 @@ void scst_init_threads(struct scst_cmd_threads *cmd_threads);
|
||||
void scst_deinit_threads(struct scst_cmd_threads *cmd_threads);
|
||||
|
||||
void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid);
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
|
||||
void (*done)(void *data, char *sense, int result, int resid));
|
||||
#endif
|
||||
|
||||
@@ -1,529 +0,0 @@
|
||||
diff -upkr linux-2.6.32/block/blk-map.c linux-2.6.32/block/blk-map.c
|
||||
--- linux-2.6.32/block/blk-map.c 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/block/blk-map.c 2011-05-17 20:56:18.341812997 -0400
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.32/include/linux/blkdev.h linux-2.6.32/include/linux/blkdev.h
|
||||
--- linux-2.6.32/include/linux/blkdev.h 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/include/linux/blkdev.h 2009-12-16 07:21:35.000000000 -0500
|
||||
@@ -708,6 +708,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -812,6 +814,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.32/include/linux/scatterlist.h linux-2.6.32/include/linux/scatterlist.h
|
||||
--- linux-2.6.32/include/linux/scatterlist.h 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/include/linux/scatterlist.h 2009-12-16 07:21:35.000000000 -0500
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.32/lib/scatterlist.c linux-2.6.32/lib/scatterlist.c
|
||||
--- linux-2.6.32/lib/scatterlist.c 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/lib/scatterlist.c 2009-12-16 07:21:35.000000000 -0500
|
||||
@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1 +0,0 @@
|
||||
../scst_exec_req_fifo-3.10.patch
|
||||
@@ -1,524 +0,0 @@
|
||||
diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/block/blk-map.c ./block/blk-map.c
|
||||
--- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/block/blk-map.c 2014-07-16 20:25:31.000000000 +0200
|
||||
+++ ./block/blk-map.c 2014-08-07 09:09:11.751302961 +0200
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/blkdev.h ./include/linux/blkdev.h
|
||||
--- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/blkdev.h 2014-07-16 20:25:31.000000000 +0200
|
||||
+++ ./include/linux/blkdev.h 2014-08-07 09:09:11.751302961 +0200
|
||||
@@ -719,6 +719,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -838,6 +840,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/scatterlist.h ./include/linux/scatterlist.h
|
||||
--- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/include/linux/scatterlist.h 2014-07-16 20:25:31.000000000 +0200
|
||||
+++ ./include/linux/scatterlist.h 2014-08-07 09:09:11.751302961 +0200
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -244,6 +245,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -rup ../../centos-7-orig/linux-3.10.0-123.6.3.el7/lib/scatterlist.c ./lib/scatterlist.c
|
||||
--- ../../centos-7-orig/linux-3.10.0-123.6.3.el7/lib/scatterlist.c 2014-07-16 20:25:31.000000000 +0200
|
||||
+++ ./lib/scatterlist.c 2014-08-07 09:09:11.751302961 +0200
|
||||
@@ -628,3 +628,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,529 +0,0 @@
|
||||
diff -upkr linux-2.6.30/block/blk-map.c linux-2.6.30/block/blk-map.c
|
||||
--- linux-2.6.30/block/blk-map.c 2009-06-09 23:05:27.000000000 -0400
|
||||
+++ linux-2.6.30/block/blk-map.c 2011-05-17 21:03:29.661813000 -0400
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -272,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = rq->data = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = rq->data = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = 0;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.30/include/linux/blkdev.h linux-2.6.30/include/linux/blkdev.h
|
||||
--- linux-2.6.30/include/linux/blkdev.h 2009-06-09 23:05:27.000000000 -0400
|
||||
+++ linux-2.6.30/include/linux/blkdev.h 2009-08-12 11:48:06.000000000 -0400
|
||||
@@ -704,6 +704,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -807,6 +809,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.30/include/linux/scatterlist.h linux-2.6.30/include/linux/scatterlist.h
|
||||
--- linux-2.6.30/include/linux/scatterlist.h 2009-06-09 23:05:27.000000000 -0400
|
||||
+++ linux-2.6.30/include/linux/scatterlist.h 2009-08-12 11:50:02.000000000 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.30/lib/scatterlist.c linux-2.6.30/lib/scatterlist.c
|
||||
--- linux-2.6.30/lib/scatterlist.c 2009-06-09 23:05:27.000000000 -0400
|
||||
+++ linux-2.6.30/lib/scatterlist.c 2009-08-12 11:56:04.000000000 -0400
|
||||
@@ -485,3 +485,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,529 +0,0 @@
|
||||
diff -upkr linux-2.6.31/block/blk-map.c linux-2.6.31/block/blk-map.c
|
||||
--- linux-2.6.31/block/blk-map.c 2009-09-09 18:13:59.000000000 -0400
|
||||
+++ linux-2.6.31/block/blk-map.c 2011-05-17 21:05:32.669812993 -0400
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.31/include/linux/blkdev.h linux-2.6.31/include/linux/blkdev.h
|
||||
--- linux-2.6.31/include/linux/blkdev.h 2009-09-09 18:13:59.000000000 -0400
|
||||
+++ linux-2.6.31/include/linux/blkdev.h 2009-09-23 06:17:33.000000000 -0400
|
||||
@@ -699,6 +699,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -803,6 +805,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.31/include/linux/scatterlist.h linux-2.6.31/include/linux/scatterlist.h
|
||||
--- linux-2.6.31/include/linux/scatterlist.h 2009-09-09 18:13:59.000000000 -0400
|
||||
+++ linux-2.6.31/include/linux/scatterlist.h 2009-09-23 06:17:33.000000000 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.31/lib/scatterlist.c linux-2.6.31/lib/scatterlist.c
|
||||
--- linux-2.6.31/lib/scatterlist.c 2009-09-09 18:13:59.000000000 -0400
|
||||
+++ linux-2.6.31/lib/scatterlist.c 2009-09-23 06:17:33.000000000 -0400
|
||||
@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,529 +0,0 @@
|
||||
diff -upkr linux-2.6.32/block/blk-map.c linux-2.6.32/block/blk-map.c
|
||||
--- linux-2.6.32/block/blk-map.c 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/block/blk-map.c 2011-05-17 20:56:18.341812997 -0400
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.32/include/linux/blkdev.h linux-2.6.32/include/linux/blkdev.h
|
||||
--- linux-2.6.32/include/linux/blkdev.h 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/include/linux/blkdev.h 2009-12-16 07:21:35.000000000 -0500
|
||||
@@ -708,6 +708,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -812,6 +814,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.32/include/linux/scatterlist.h linux-2.6.32/include/linux/scatterlist.h
|
||||
--- linux-2.6.32/include/linux/scatterlist.h 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/include/linux/scatterlist.h 2009-12-16 07:21:35.000000000 -0500
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.32/lib/scatterlist.c linux-2.6.32/lib/scatterlist.c
|
||||
--- linux-2.6.32/lib/scatterlist.c 2009-12-02 22:51:21.000000000 -0500
|
||||
+++ linux-2.6.32/lib/scatterlist.c 2009-12-16 07:21:35.000000000 -0500
|
||||
@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,529 +0,0 @@
|
||||
diff -upkr linux-2.6.33/block/blk-map.c linux-2.6.33/block/blk-map.c
|
||||
--- linux-2.6.33/block/blk-map.c 2010-02-24 13:52:17.000000000 -0500
|
||||
+++ linux-2.6.33/block/blk-map.c 2011-05-17 21:09:00.317812998 -0400
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +272,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.33/include/linux/blkdev.h linux-2.6.33/include/linux/blkdev.h
|
||||
--- linux-2.6.33/include/linux/blkdev.h 2010-02-24 13:52:17.000000000 -0500
|
||||
+++ linux-2.6.33/include/linux/blkdev.h 2010-03-01 07:41:59.000000000 -0500
|
||||
@@ -710,6 +710,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -825,6 +827,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.33/include/linux/scatterlist.h linux-2.6.33/include/linux/scatterlist.h
|
||||
--- linux-2.6.33/include/linux/scatterlist.h 2010-02-24 13:52:17.000000000 -0500
|
||||
+++ linux-2.6.33/include/linux/scatterlist.h 2010-03-01 07:41:59.000000000 -0500
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.33/lib/scatterlist.c linux-2.6.33/lib/scatterlist.c
|
||||
--- linux-2.6.33/lib/scatterlist.c 2010-02-24 13:52:17.000000000 -0500
|
||||
+++ linux-2.6.33/lib/scatterlist.c 2010-03-01 07:41:59.000000000 -0500
|
||||
@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,530 +0,0 @@
|
||||
diff -upkr linux-2.6.34/block/blk-map.c linux-2.6.34/block/blk-map.c
|
||||
--- linux-2.6.34/block/blk-map.c 2010-05-16 17:17:36.000000000 -0400
|
||||
+++ linux-2.6.34/block/blk-map.c 2011-05-17 21:10:43.745812995 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.34/include/linux/blkdev.h linux-2.6.34/include/linux/blkdev.h
|
||||
--- linux-2.6.34/include/linux/blkdev.h 2010-05-16 17:17:36.000000000 -0400
|
||||
+++ linux-2.6.34/include/linux/blkdev.h 2010-05-24 06:51:22.000000000 -0400
|
||||
@@ -713,6 +713,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -828,6 +830,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.34/include/linux/scatterlist.h linux-2.6.34/include/linux/scatterlist.h
|
||||
--- linux-2.6.34/include/linux/scatterlist.h 2010-05-16 17:17:36.000000000 -0400
|
||||
+++ linux-2.6.34/include/linux/scatterlist.h 2010-05-24 06:51:22.000000000 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.34/lib/scatterlist.c linux-2.6.34/lib/scatterlist.c
|
||||
--- linux-2.6.34/lib/scatterlist.c 2010-05-16 17:17:36.000000000 -0400
|
||||
+++ linux-2.6.34/lib/scatterlist.c 2010-05-24 06:51:22.000000000 -0400
|
||||
@@ -494,3 +494,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,530 +0,0 @@
|
||||
diff -upkr linux-2.6.35/block/blk-map.c linux-2.6.35/block/blk-map.c
|
||||
--- linux-2.6.35/block/blk-map.c 2010-08-01 18:11:14.000000000 -0400
|
||||
+++ linux-2.6.35/block/blk-map.c 2011-05-17 21:12:23.125813000 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= 1 << BIO_RW;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.35/include/linux/blkdev.h linux-2.6.35/include/linux/blkdev.h
|
||||
--- linux-2.6.35/include/linux/blkdev.h 2010-08-01 18:11:14.000000000 -0400
|
||||
+++ linux-2.6.35/include/linux/blkdev.h 2010-08-04 04:21:59.737128732 -0400
|
||||
@@ -717,6 +717,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -832,6 +834,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.35/include/linux/scatterlist.h linux-2.6.35/include/linux/scatterlist.h
|
||||
--- linux-2.6.35/include/linux/scatterlist.h 2010-08-01 18:11:14.000000000 -0400
|
||||
+++ linux-2.6.35/include/linux/scatterlist.h 2010-08-04 04:21:59.741129485 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.35/lib/scatterlist.c linux-2.6.35/lib/scatterlist.c
|
||||
--- linux-2.6.35/lib/scatterlist.c 2010-08-01 18:11:14.000000000 -0400
|
||||
+++ linux-2.6.35/lib/scatterlist.c 2010-08-04 04:21:59.741129485 -0400
|
||||
@@ -494,3 +494,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,532 +0,0 @@
|
||||
diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
|
||||
--- linux-2.6.36/block/blk-map.c 2010-10-20 16:30:22.000000000 -0400
|
||||
+++ linux-2.6.36/block/blk-map.c 2011-05-17 21:13:42.301812997 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -271,6 +273,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.36/include/linux/blkdev.h linux-2.6.36/include/linux/blkdev.h
|
||||
--- linux-2.6.36/include/linux/blkdev.h 2010-10-20 16:30:22.000000000 -0400
|
||||
+++ linux-2.6.36/include/linux/blkdev.h 2010-10-26 04:00:15.899759399 -0400
|
||||
@@ -629,6 +629,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.36/include/linux/scatterlist.h linux-2.6.36/include/linux/scatterlist.h
|
||||
--- linux-2.6.36/include/linux/scatterlist.h 2010-10-20 16:30:22.000000000 -0400
|
||||
+++ linux-2.6.36/include/linux/scatterlist.h 2010-10-26 04:00:15.899759399 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.36/lib/scatterlist.c linux-2.6.36/lib/scatterlist.c
|
||||
--- linux-2.6.36/lib/scatterlist.c 2010-10-20 16:30:22.000000000 -0400
|
||||
+++ linux-2.6.36/lib/scatterlist.c 2010-10-26 04:00:15.899759399 -0400
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,532 +0,0 @@
|
||||
diff -upkr linux-2.6.37/block/blk-map.c linux-2.6.37/block/blk-map.c
|
||||
--- linux-2.6.37/block/blk-map.c 2011-01-04 19:50:19.000000000 -0500
|
||||
+++ linux-2.6.37/block/blk-map.c 2011-05-17 21:15:14.329812999 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.37/include/linux/blkdev.h linux-2.6.37/include/linux/blkdev.h
|
||||
--- linux-2.6.37/include/linux/blkdev.h 2011-01-04 19:50:19.000000000 -0500
|
||||
+++ linux-2.6.37/include/linux/blkdev.h 2011-01-08 08:45:54.350430208 -0500
|
||||
@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.37/include/linux/scatterlist.h linux-2.6.37/include/linux/scatterlist.h
|
||||
--- linux-2.6.37/include/linux/scatterlist.h 2011-01-04 19:50:19.000000000 -0500
|
||||
+++ linux-2.6.37/include/linux/scatterlist.h 2011-01-08 08:45:54.354431761 -0500
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.37/lib/scatterlist.c linux-2.6.37/lib/scatterlist.c
|
||||
--- linux-2.6.37/lib/scatterlist.c 2011-01-04 19:50:19.000000000 -0500
|
||||
+++ linux-2.6.37/lib/scatterlist.c 2011-01-08 08:45:54.401930472 -0500
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,532 +0,0 @@
|
||||
diff -upkr linux-2.6.38/block/blk-map.c linux-2.6.38/block/blk-map.c
|
||||
--- linux-2.6.38/block/blk-map.c 2011-03-14 21:20:32.000000000 -0400
|
||||
+++ linux-2.6.38/block/blk-map.c 2011-05-11 22:07:37.589813000 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.38/include/linux/blkdev.h linux-2.6.38/include/linux/blkdev.h
|
||||
--- linux-2.6.38/include/linux/blkdev.h 2011-03-14 21:20:32.000000000 -0400
|
||||
+++ linux-2.6.38/include/linux/blkdev.h 2011-03-18 10:19:00.000000000 -0400
|
||||
@@ -593,6 +593,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.38/include/linux/scatterlist.h linux-2.6.38/include/linux/scatterlist.h
|
||||
--- linux-2.6.38/include/linux/scatterlist.h 2011-03-14 21:20:32.000000000 -0400
|
||||
+++ linux-2.6.38/include/linux/scatterlist.h 2011-03-18 10:19:00.000000000 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.38/lib/scatterlist.c linux-2.6.38/lib/scatterlist.c
|
||||
--- linux-2.6.38/lib/scatterlist.c 2011-03-14 21:20:32.000000000 -0400
|
||||
+++ linux-2.6.38/lib/scatterlist.c 2011-03-18 10:46:41.000000000 -0400
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,532 +0,0 @@
|
||||
diff -upkr linux-2.6.39/block/blk-map.c linux-2.6.39/block/blk-map.c
|
||||
--- linux-2.6.39/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
|
||||
+++ linux-2.6.39/block/blk-map.c 2011-05-19 10:49:02.753812997 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-2.6.39/include/linux/blkdev.h linux-2.6.39/include/linux/blkdev.h
|
||||
--- linux-2.6.39/include/linux/blkdev.h 2011-05-19 00:06:34.000000000 -0400
|
||||
+++ linux-2.6.39/include/linux/blkdev.h 2011-05-19 10:49:02.753812997 -0400
|
||||
@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -707,6 +709,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-2.6.39/include/linux/scatterlist.h linux-2.6.39/include/linux/scatterlist.h
|
||||
--- linux-2.6.39/include/linux/scatterlist.h 2011-05-19 00:06:34.000000000 -0400
|
||||
+++ linux-2.6.39/include/linux/scatterlist.h 2011-05-19 10:49:02.753812997 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-2.6.39/lib/scatterlist.c linux-2.6.39/lib/scatterlist.c
|
||||
--- linux-2.6.39/lib/scatterlist.c 2011-05-19 00:06:34.000000000 -0400
|
||||
+++ linux-2.6.39/lib/scatterlist.c 2011-05-19 10:49:02.753812997 -0400
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,532 +0,0 @@
|
||||
diff -upkr linux-3.0.0-orig/block/blk-map.c linux-3.0.0-scst-dbg/block/blk-map.c
|
||||
--- linux-3.0.0-orig/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
|
||||
+++ linux-3.0.0-scst-dbg/block/blk-map.c 2011-07-22 19:40:27.131230804 -0400
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff -upkr linux-3.0.0-orig/include/linux/blkdev.h linux-3.0.0-scst-dbg/include/linux/blkdev.h
|
||||
--- linux-3.0.0-orig/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
|
||||
+++ linux-3.0.0-scst-dbg/include/linux/blkdev.h 2011-07-22 19:24:27.803231156 -0400
|
||||
@@ -594,6 +594,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff -upkr linux-3.0.0-orig/include/linux/scatterlist.h linux-3.0.0-scst-dbg/include/linux/scatterlist.h
|
||||
--- linux-3.0.0-orig/include/linux/scatterlist.h 2011-07-21 22:17:23.000000000 -0400
|
||||
+++ linux-3.0.0-scst-dbg/include/linux/scatterlist.h 2011-07-22 19:24:27.803231156 -0400
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff -upkr linux-3.0.0-orig/lib/scatterlist.c linux-3.0.0-scst-dbg/lib/scatterlist.c
|
||||
--- linux-3.0.0-orig/lib/scatterlist.c 2011-07-21 22:17:23.000000000 -0400
|
||||
+++ linux-3.0.0-scst-dbg/lib/scatterlist.c 2011-07-22 19:40:27.131230804 -0400
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
@@ -1,536 +0,0 @@
|
||||
=== modified file 'linux-3.1-scst/block/blk-map.c'
|
||||
--- linux-3.1-orig/block/blk-map.c 2011-10-26 20:34:50 +0000
|
||||
+++ linux-3.1-scst/block/blk-map.c 2011-10-26 20:58:56 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'linux-3.1-scst/include/linux/blkdev.h'
|
||||
--- linux-3.1-orig/include/linux/blkdev.h 2011-10-26 20:34:50 +0000
|
||||
+++ linux-3.1-scst/include/linux/blkdev.h 2011-10-26 20:58:56 +0000
|
||||
@@ -599,6 +599,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -714,6 +716,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'linux-3.1-scst/include/linux/scatterlist.h'
|
||||
--- linux-3.1-orig/include/linux/scatterlist.h 2011-10-26 20:34:50 +0000
|
||||
+++ linux-3.1-scst/include/linux/scatterlist.h 2011-10-26 20:58:56 +0000
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'linux-3.1-scst/lib/scatterlist.c'
|
||||
--- linux-3.1-orig/lib/scatterlist.c 2011-10-26 20:34:50 +0000
|
||||
+++ linux-3.1-scst/lib/scatterlist.c 2011-10-26 20:58:56 +0000
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2013-07-23 02:45:53 +0000
|
||||
+++ new/block/blk-map.c 2013-07-23 02:50:11 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2013-07-23 02:45:53 +0000
|
||||
+++ new/include/linux/blkdev.h 2013-07-23 02:50:11 +0000
|
||||
@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2013-07-23 02:45:53 +0000
|
||||
+++ new/include/linux/scatterlist.h 2013-07-23 02:50:11 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -244,6 +245,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2013-07-23 02:45:53 +0000
|
||||
+++ new/lib/scatterlist.c 2013-07-23 02:50:11 +0000
|
||||
@@ -627,3 +627,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,528 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2013-09-28 00:14:38 +0000
|
||||
+++ new/block/blk-map.c 2013-09-28 00:23:26 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2013-09-28 00:14:38 +0000
|
||||
+++ new/include/linux/blkdev.h 2013-09-28 00:23:26 +0000
|
||||
@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2013-09-28 00:14:38 +0000
|
||||
+++ new/include/linux/scatterlist.h 2013-09-28 00:23:26 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2013-09-28 00:14:38 +0000
|
||||
+++ new/lib/scatterlist.c 2013-09-28 00:23:26 +0000
|
||||
@@ -716,3 +716,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,528 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2013-11-30 00:34:22 +0000
|
||||
+++ new/block/blk-map.c 2013-11-30 00:39:53 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2013-11-30 00:34:22 +0000
|
||||
+++ new/include/linux/blkdev.h 2013-11-30 00:39:53 +0000
|
||||
@@ -676,6 +676,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -795,6 +797,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2013-11-30 00:34:22 +0000
|
||||
+++ new/include/linux/scatterlist.h 2013-11-30 00:39:53 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2013-11-30 00:34:22 +0000
|
||||
+++ new/lib/scatterlist.c 2013-11-30 00:39:53 +0000
|
||||
@@ -717,3 +717,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,528 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2014-01-30 00:25:53 +0000
|
||||
+++ new/block/blk-map.c 2014-01-30 00:44:50 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2014-01-30 00:25:53 +0000
|
||||
+++ new/include/linux/blkdev.h 2014-01-30 00:44:50 +0000
|
||||
@@ -712,6 +712,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -831,6 +833,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2014-01-30 00:25:53 +0000
|
||||
+++ new/include/linux/scatterlist.h 2014-01-30 00:44:50 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2014-01-30 00:25:53 +0000
|
||||
+++ new/lib/scatterlist.c 2014-01-30 00:44:50 +0000
|
||||
@@ -717,3 +717,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,528 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2014-04-17 22:02:06 +0000
|
||||
+++ new/block/blk-map.c 2014-04-17 22:08:48 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2014-04-17 22:02:06 +0000
|
||||
+++ new/include/linux/blkdev.h 2014-04-17 22:08:48 +0000
|
||||
@@ -705,6 +705,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -825,6 +827,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2014-04-17 22:02:06 +0000
|
||||
+++ new/include/linux/scatterlist.h 2014-04-17 22:08:48 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2014-04-17 22:02:06 +0000
|
||||
+++ new/lib/scatterlist.c 2014-04-17 22:08:48 +0000
|
||||
@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,528 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2014-06-18 01:32:48 +0000
|
||||
+++ new/block/blk-map.c 2014-06-18 01:40:34 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2014-06-18 01:32:48 +0000
|
||||
+++ new/include/linux/blkdev.h 2014-06-18 01:40:34 +0000
|
||||
@@ -717,6 +717,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -837,6 +839,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, const struct sg_iovec *,
|
||||
int, unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2014-06-18 01:32:48 +0000
|
||||
+++ new/include/linux/scatterlist.h 2014-06-18 01:40:34 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2014-06-18 01:32:48 +0000
|
||||
+++ new/lib/scatterlist.c 2014-06-18 01:40:34 +0000
|
||||
@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,524 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2014-08-19 01:00:36 +0000
|
||||
+++ new/block/blk-map.c 2014-08-19 01:37:01 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2014-08-19 01:00:36 +0000
|
||||
+++ new/include/linux/blkdev.h 2014-08-19 01:06:48 +0000
|
||||
@@ -735,6 +735,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -856,6 +858,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, const struct sg_iovec *,
|
||||
int, unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2014-08-19 01:00:36 +0000
|
||||
+++ new/include/linux/scatterlist.h 2014-08-19 01:06:48 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2014-08-19 01:00:36 +0000
|
||||
+++ new/lib/scatterlist.c 2014-08-19 01:06:48 +0000
|
||||
@@ -718,3 +718,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,524 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2014-11-21 03:17:49 +0000
|
||||
+++ new/block/blk-map.c 2014-11-21 03:43:00 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2014-11-21 03:17:49 +0000
|
||||
+++ new/include/linux/blkdev.h 2014-11-21 03:43:00 +0000
|
||||
@@ -737,6 +737,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -858,6 +860,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, const struct sg_iovec *,
|
||||
int, unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2014-11-21 03:17:49 +0000
|
||||
+++ new/include/linux/scatterlist.h 2014-11-21 03:43:00 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatt
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2014-11-21 03:17:49 +0000
|
||||
+++ new/lib/scatterlist.c 2014-11-21 03:43:00 +0000
|
||||
@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatter
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,536 +0,0 @@
|
||||
Subject: [PATCH] scst_exec_req_fifo
|
||||
|
||||
---
|
||||
block/blk-map.c | 329 ++++++++++++++++++++++++++++++++++++++++++++
|
||||
include/linux/blkdev.h | 5 +
|
||||
include/linux/scatterlist.h | 4 +
|
||||
lib/scatterlist.c | 124 +++++++++++++++++
|
||||
4 files changed, 462 insertions(+)
|
||||
|
||||
diff --git a/block/blk-map.c b/block/blk-map.c
|
||||
index f890d43..d4b8509 100644
|
||||
--- a/block/blk-map.c
|
||||
+++ b/block/blk-map.c
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
||||
index aac0f9e..5cd3afa 100644
|
||||
--- a/include/linux/blkdev.h
|
||||
+++ b/include/linux/blkdev.h
|
||||
@@ -731,6 +731,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -852,6 +854,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, const struct sg_iovec *,
|
||||
int, unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
|
||||
index ed8f9e7..f64e02f 100644
|
||||
--- a/include/linux/scatterlist.h
|
||||
+++ b/include/linux/scatterlist.h
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
|
||||
index c9f2e8c..ba693d1 100644
|
||||
--- a/lib/scatterlist.c
|
||||
+++ b/lib/scatterlist.c
|
||||
@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_pcopy_to_buffer);
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
--
|
||||
2.1.2
|
||||
|
||||
@@ -1,536 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2012-01-10 22:58:17 +0000
|
||||
+++ new/block/blk-map.c 2012-01-10 23:01:21 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2012-01-10 22:58:17 +0000
|
||||
+++ new/include/linux/blkdev.h 2012-01-10 23:01:21 +0000
|
||||
@@ -599,6 +599,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -716,6 +718,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2012-01-10 22:58:17 +0000
|
||||
+++ new/include/linux/scatterlist.h 2012-01-10 23:01:21 +0000
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2012-01-10 22:58:17 +0000
|
||||
+++ new/lib/scatterlist.c 2012-01-10 23:01:21 +0000
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,536 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2012-03-19 23:46:01 +0000
|
||||
+++ new/block/blk-map.c 2012-03-20 00:10:37 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,339 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
|
||||
+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy,
|
||||
+ KM_USER0, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2012-03-19 23:46:01 +0000
|
||||
+++ new/include/linux/blkdev.h 2012-03-20 00:10:37 +0000
|
||||
@@ -612,6 +612,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -731,6 +733,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2012-03-19 23:46:01 +0000
|
||||
+++ new/include/linux/scatterlist.h 2012-03-20 00:10:37 +0000
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2012-03-19 23:46:01 +0000
|
||||
+++ new/lib/scatterlist.c 2012-03-20 00:10:37 +0000
|
||||
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page +
|
||||
+ (src_offs >> PAGE_SHIFT), s_km_type) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page +
|
||||
+ (dst_offs >> PAGE_SHIFT), d_km_type) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr, s_km_type);
|
||||
+ kunmap_atomic(daddr, d_km_type);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ * @d_km_type: kmap_atomic type for the destination SG
|
||||
+ * @s_km_type: kmap_atomic type for the source SG
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len,
|
||||
+ enum km_type d_km_type, enum km_type s_km_type)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len, d_km_type, s_km_type);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,528 +0,0 @@
|
||||
diff --git a/block/blk-map.c b/block/blk-map.c
|
||||
index 623e1cd..20349d0 100644
|
||||
--- a/block/blk-map.c
|
||||
+++ b/block/blk-map.c
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
||||
index 4d4ac24..3fa6a30 100644
|
||||
--- a/include/linux/blkdev.h
|
||||
+++ b/include/linux/blkdev.h
|
||||
@@ -609,6 +609,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -728,6 +730,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
|
||||
index ac9586d..4b743d7 100644
|
||||
--- a/include/linux/scatterlist.h
|
||||
+++ b/include/linux/scatterlist.h
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -220,6 +221,9 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
|
||||
index 6096e89..1786ca9 100644
|
||||
--- a/lib/scatterlist.c
|
||||
+++ b/lib/scatterlist.c
|
||||
@@ -517,3 +517,126 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2012-08-08 02:57:29 +0000
|
||||
+++ new/block/blk-map.c 2012-08-08 03:02:56 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2012-08-08 02:57:29 +0000
|
||||
+++ new/include/linux/blkdev.h 2012-08-08 03:02:56 +0000
|
||||
@@ -627,6 +627,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2012-08-08 02:57:29 +0000
|
||||
+++ new/include/linux/scatterlist.h 2012-08-08 03:02:56 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -220,6 +221,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2012-08-08 02:57:29 +0000
|
||||
+++ new/lib/scatterlist.c 2012-08-08 03:02:56 +0000
|
||||
@@ -517,3 +517,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2012-10-01 18:39:34 +0000
|
||||
+++ new/block/blk-map.c 2012-10-01 20:50:07 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2012-10-01 18:39:34 +0000
|
||||
+++ new/include/linux/blkdev.h 2012-10-01 18:45:47 +0000
|
||||
@@ -638,6 +638,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -757,6 +759,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2012-10-01 18:39:34 +0000
|
||||
+++ new/include/linux/scatterlist.h 2012-10-01 18:45:47 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -224,6 +225,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2012-10-01 18:39:34 +0000
|
||||
+++ new/lib/scatterlist.c 2012-10-01 20:50:07 +0000
|
||||
@@ -573,3 +573,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2012-12-17 19:41:04 +0000
|
||||
+++ new/block/blk-map.c 2012-12-17 22:29:54 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2012-12-17 19:41:04 +0000
|
||||
+++ new/include/linux/blkdev.h 2012-12-17 22:29:54 +0000
|
||||
@@ -660,6 +660,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -779,6 +781,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2012-12-17 19:41:04 +0000
|
||||
+++ new/include/linux/scatterlist.h 2012-12-17 22:29:54 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -225,6 +226,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2012-12-17 19:41:04 +0000
|
||||
+++ new/lib/scatterlist.c 2012-12-17 22:29:54 +0000
|
||||
@@ -592,3 +592,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2013-02-22 21:12:31 +0000
|
||||
+++ new/block/blk-map.c 2013-02-23 00:07:57 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2013-02-22 21:12:31 +0000
|
||||
+++ new/include/linux/blkdev.h 2013-02-22 21:21:51 +0000
|
||||
@@ -668,6 +668,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -787,6 +789,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2013-02-22 21:12:31 +0000
|
||||
+++ new/include/linux/scatterlist.h 2013-02-22 21:21:51 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -225,6 +226,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2013-02-22 21:12:31 +0000
|
||||
+++ new/lib/scatterlist.c 2013-02-23 00:07:57 +0000
|
||||
@@ -593,3 +593,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -1,527 +0,0 @@
|
||||
=== modified file 'block/blk-map.c'
|
||||
--- old/block/blk-map.c 2013-05-11 05:39:14 +0000
|
||||
+++ new/block/blk-map.c 2013-05-14 01:25:01 +0000
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
+#include <linux/scatterlist.h>
|
||||
+#include <linux/slab.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include "blk.h"
|
||||
@@ -275,6 +277,337 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
+struct blk_kern_sg_work {
|
||||
+ atomic_t bios_inflight;
|
||||
+ struct sg_table sg_table;
|
||||
+ struct scatterlist *src_sgl;
|
||||
+};
|
||||
+
|
||||
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
+{
|
||||
+ struct sg_table *sgt = &bw->sg_table;
|
||||
+ struct scatterlist *sg;
|
||||
+ int i;
|
||||
+
|
||||
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
+ struct page *pg = sg_page(sg);
|
||||
+ if (pg == NULL)
|
||||
+ break;
|
||||
+ __free_page(pg);
|
||||
+ }
|
||||
+
|
||||
+ sg_free_table(sgt);
|
||||
+ kfree(bw);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
+{
|
||||
+ struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
+
|
||||
+ if (bw != NULL) {
|
||||
+ /* Decrement the bios in processing and, if zero, free */
|
||||
+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
+ if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ local_irq_save(flags); /* to protect KMs */
|
||||
+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0);
|
||||
+ local_irq_restore(flags);
|
||||
+ }
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ bio_put(bio);
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work **pbw,
|
||||
+ gfp_t gfp, gfp_t page_gfp)
|
||||
+{
|
||||
+ int res = 0, i;
|
||||
+ struct scatterlist *sg;
|
||||
+ struct scatterlist *new_sgl;
|
||||
+ int new_sgl_nents;
|
||||
+ size_t len = 0, to_copy;
|
||||
+ struct blk_kern_sg_work *bw;
|
||||
+
|
||||
+ bw = kzalloc(sizeof(*bw), gfp);
|
||||
+ if (bw == NULL)
|
||||
+ goto out;
|
||||
+
|
||||
+ bw->src_sgl = sgl;
|
||||
+
|
||||
+ for_each_sg(sgl, sg, nents, i)
|
||||
+ len += sg->length;
|
||||
+ to_copy = len;
|
||||
+
|
||||
+ new_sgl_nents = PFN_UP(len);
|
||||
+
|
||||
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
|
||||
+ if (res != 0)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ new_sgl = bw->sg_table.sgl;
|
||||
+
|
||||
+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
+ struct page *pg;
|
||||
+
|
||||
+ pg = alloc_page(page_gfp);
|
||||
+ if (pg == NULL)
|
||||
+ goto err_free;
|
||||
+
|
||||
+ sg_assign_page(sg, pg);
|
||||
+ sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
+
|
||||
+ len -= PAGE_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ if (rq_data_dir(rq) == WRITE) {
|
||||
+ /*
|
||||
+ * We need to limit amount of copied data to to_copy, because
|
||||
+ * sgl might have the last element in sgl not marked as last in
|
||||
+ * SG chaining.
|
||||
+ */
|
||||
+ sg_copy(new_sgl, sgl, 0, to_copy);
|
||||
+ }
|
||||
+
|
||||
+ *pbw = bw;
|
||||
+ /*
|
||||
+ * REQ_COPY_USER name is misleading. It should be something like
|
||||
+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
|
||||
+ */
|
||||
+ rq->cmd_flags |= REQ_COPY_USER;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+err_free:
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ res = -ENOMEM;
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+ struct request_queue *q = rq->q;
|
||||
+ int rw = rq_data_dir(rq);
|
||||
+ int max_nr_vecs, i;
|
||||
+ size_t tot_len;
|
||||
+ bool need_new_bio;
|
||||
+ struct scatterlist *sg, *prev_sg = NULL;
|
||||
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
+ int bios;
|
||||
+
|
||||
+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
|
||||
+ WARN_ON(1);
|
||||
+ res = -EINVAL;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Let's keep each bio allocation inside a single page to decrease
|
||||
+ * probability of failure.
|
||||
+ */
|
||||
+ max_nr_vecs = min_t(size_t,
|
||||
+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
|
||||
+ BIO_MAX_PAGES);
|
||||
+
|
||||
+ need_new_bio = true;
|
||||
+ tot_len = 0;
|
||||
+ bios = 0;
|
||||
+ for_each_sg(sgl, sg, nents, i) {
|
||||
+ struct page *page = sg_page(sg);
|
||||
+ void *page_addr = page_address(page);
|
||||
+ size_t len = sg->length, l;
|
||||
+ size_t offset = sg->offset;
|
||||
+
|
||||
+ tot_len += len;
|
||||
+ prev_sg = sg;
|
||||
+
|
||||
+ /*
|
||||
+ * Each segment must be aligned on DMA boundary and
|
||||
+ * not on stack. The last one may have unaligned
|
||||
+ * length as long as the total length is aligned to
|
||||
+ * DMA padding alignment.
|
||||
+ */
|
||||
+ if (i == nents - 1)
|
||||
+ l = 0;
|
||||
+ else
|
||||
+ l = len;
|
||||
+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ while (len > 0) {
|
||||
+ size_t bytes;
|
||||
+ int rc;
|
||||
+
|
||||
+ if (need_new_bio) {
|
||||
+ bio = bio_kmalloc(gfp, max_nr_vecs);
|
||||
+ if (bio == NULL) {
|
||||
+ res = -ENOMEM;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (rw == WRITE)
|
||||
+ bio->bi_rw |= REQ_WRITE;
|
||||
+
|
||||
+ bios++;
|
||||
+ bio->bi_private = bw;
|
||||
+ bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
+
|
||||
+ if (hbio == NULL)
|
||||
+ hbio = tbio = bio;
|
||||
+ else
|
||||
+ tbio = tbio->bi_next = bio;
|
||||
+ }
|
||||
+
|
||||
+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
+
|
||||
+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
+ if (rc < bytes) {
|
||||
+ if (unlikely(need_new_bio || (rc < 0))) {
|
||||
+ if (rc < 0)
|
||||
+ res = rc;
|
||||
+ else
|
||||
+ res = -EIO;
|
||||
+ goto out_free_bios;
|
||||
+ } else {
|
||||
+ need_new_bio = true;
|
||||
+ len -= rc;
|
||||
+ offset += rc;
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ need_new_bio = false;
|
||||
+ offset = 0;
|
||||
+ len -= bytes;
|
||||
+ page = nth_page(page, 1);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (hbio == NULL) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ /* Total length must be aligned on DMA padding alignment */
|
||||
+ if ((tot_len & q->dma_pad_mask) &&
|
||||
+ !(rq->cmd_flags & REQ_COPY_USER)) {
|
||||
+ res = -EINVAL;
|
||||
+ goto out_free_bios;
|
||||
+ }
|
||||
+
|
||||
+ if (bw != NULL)
|
||||
+ atomic_set(&bw->bios_inflight, bios);
|
||||
+
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio->bi_next = NULL;
|
||||
+
|
||||
+ blk_queue_bounce(q, &bio);
|
||||
+
|
||||
+ res = blk_rq_append_bio(q, rq, bio);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ bio->bi_next = hbio;
|
||||
+ hbio = bio;
|
||||
+ /* We can have one or more bios bounced */
|
||||
+ goto out_unmap_bios;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ res = 0;
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+out:
|
||||
+ return res;
|
||||
+
|
||||
+out_unmap_bios:
|
||||
+ blk_rq_unmap_kern_sg(rq, res);
|
||||
+
|
||||
+out_free_bios:
|
||||
+ while (hbio != NULL) {
|
||||
+ bio = hbio;
|
||||
+ hbio = hbio->bi_next;
|
||||
+ bio_put(bio);
|
||||
+ }
|
||||
+ goto out;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
|
||||
+ * @rq: request to fill
|
||||
+ * @sgl: area to map
|
||||
+ * @nents: number of elements in @sgl
|
||||
+ * @gfp: memory allocation flags
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data will be mapped directly if possible. Otherwise a bounce
|
||||
+ * buffer will be used.
|
||||
+ */
|
||||
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp)
|
||||
+{
|
||||
+ int res;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
|
||||
+ if (unlikely(res != 0)) {
|
||||
+ struct blk_kern_sg_work *bw = NULL;
|
||||
+
|
||||
+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
|
||||
+ gfp, rq->q->bounce_gfp | gfp);
|
||||
+ if (unlikely(res != 0))
|
||||
+ goto out;
|
||||
+
|
||||
+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
|
||||
+ bw->sg_table.nents, bw, gfp);
|
||||
+ if (res != 0) {
|
||||
+ blk_free_kern_sg_work(bw);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ rq->buffer = NULL;
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
|
||||
+
|
||||
+/**
|
||||
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
|
||||
+ * @rq: request to unmap
|
||||
+ * @err: non-zero error code
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
|
||||
+ * only in case of an error!
|
||||
+ */
|
||||
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
|
||||
+{
|
||||
+ struct bio *bio = rq->bio;
|
||||
+
|
||||
+ while (bio) {
|
||||
+ struct bio *b = bio;
|
||||
+ bio = bio->bi_next;
|
||||
+ b->bi_end_io(b, err);
|
||||
+ }
|
||||
+ rq->bio = NULL;
|
||||
+
|
||||
+ return;
|
||||
+}
|
||||
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
|
||||
+
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* @q: request queue where request should be inserted
|
||||
|
||||
=== modified file 'include/linux/blkdev.h'
|
||||
--- old/include/linux/blkdev.h 2013-05-11 05:39:14 +0000
|
||||
+++ new/include/linux/blkdev.h 2013-05-11 05:48:04 +0000
|
||||
@@ -670,6 +670,8 @@ extern unsigned long blk_max_low_pfn, bl
|
||||
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
||||
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
||||
|
||||
+#define SCSI_EXEC_REQ_FIFO_DEFINED
|
||||
+
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
@@ -789,6 +791,9 @@ extern int blk_rq_map_kern(struct reques
|
||||
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, struct sg_iovec *, int,
|
||||
unsigned int, gfp_t);
|
||||
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
|
||||
+ int nents, gfp_t gfp);
|
||||
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
|
||||
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
=== modified file 'include/linux/scatterlist.h'
|
||||
--- old/include/linux/scatterlist.h 2013-05-11 05:39:14 +0000
|
||||
+++ new/include/linux/scatterlist.h 2013-05-11 05:48:04 +0000
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
+#include <asm/kmap_types.h>
|
||||
|
||||
struct sg_table {
|
||||
struct scatterlist *sgl; /* the list */
|
||||
@@ -225,6 +226,9 @@ size_t sg_copy_from_buffer(struct scatte
|
||||
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
void *buf, size_t buflen);
|
||||
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len);
|
||||
+
|
||||
/*
|
||||
* Maximum number of entries that will be allocated in one piece, if
|
||||
* a list larger than this is required then chaining will be utilized.
|
||||
|
||||
=== modified file 'lib/scatterlist.c'
|
||||
--- old/lib/scatterlist.c 2013-05-11 05:39:14 +0000
|
||||
+++ new/lib/scatterlist.c 2013-05-14 01:25:01 +0000
|
||||
@@ -629,3 +629,126 @@ size_t sg_copy_to_buffer(struct scatterl
|
||||
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(sg_copy_to_buffer);
|
||||
+
|
||||
+/*
|
||||
+ * Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
+ * one dst_sg element, it must be either last in the chain, or
|
||||
+ * copy_len == dst_sg->length.
|
||||
+ */
|
||||
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
|
||||
+ size_t *pdst_offs, struct scatterlist *src_sg,
|
||||
+ size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ struct scatterlist *dst_sg;
|
||||
+ size_t src_len, dst_len, src_offs, dst_offs;
|
||||
+ struct page *src_page, *dst_page;
|
||||
+
|
||||
+ dst_sg = *pdst_sg;
|
||||
+ dst_len = *pdst_len;
|
||||
+ dst_offs = *pdst_offs;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+
|
||||
+ src_page = sg_page(src_sg);
|
||||
+ src_len = src_sg->length;
|
||||
+ src_offs = src_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ void *saddr, *daddr;
|
||||
+ size_t n;
|
||||
+
|
||||
+ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) +
|
||||
+ (src_offs & ~PAGE_MASK);
|
||||
+ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) +
|
||||
+ (dst_offs & ~PAGE_MASK);
|
||||
+
|
||||
+ if (((src_offs & ~PAGE_MASK) == 0) &&
|
||||
+ ((dst_offs & ~PAGE_MASK) == 0) &&
|
||||
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
|
||||
+ (copy_len >= PAGE_SIZE)) {
|
||||
+ copy_page(daddr, saddr);
|
||||
+ n = PAGE_SIZE;
|
||||
+ } else {
|
||||
+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
|
||||
+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
|
||||
+ n = min(n, src_len);
|
||||
+ n = min(n, dst_len);
|
||||
+ n = min_t(size_t, n, copy_len);
|
||||
+ memcpy(daddr, saddr, n);
|
||||
+ }
|
||||
+ dst_offs += n;
|
||||
+ src_offs += n;
|
||||
+
|
||||
+ kunmap_atomic(saddr);
|
||||
+ kunmap_atomic(daddr);
|
||||
+
|
||||
+ res += n;
|
||||
+ copy_len -= n;
|
||||
+ if (copy_len == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_len -= n;
|
||||
+ dst_len -= n;
|
||||
+ if (dst_len == 0) {
|
||||
+ dst_sg = sg_next(dst_sg);
|
||||
+ if (dst_sg == NULL)
|
||||
+ goto out;
|
||||
+ dst_page = sg_page(dst_sg);
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+ }
|
||||
+ } while (src_len > 0);
|
||||
+
|
||||
+out:
|
||||
+ *pdst_sg = dst_sg;
|
||||
+ *pdst_len = dst_len;
|
||||
+ *pdst_offs = dst_offs;
|
||||
+ return res;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * sg_copy - copy one SG vector to another
|
||||
+ * @dst_sg: destination SG
|
||||
+ * @src_sg: source SG
|
||||
+ * @nents_to_copy: maximum number of entries to copy
|
||||
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
|
||||
+ *
|
||||
+ * Description:
|
||||
+ * Data from the source SG vector will be copied to the destination SG
|
||||
+ * vector. End of the vectors will be determined by sg_next() returning
|
||||
+ * NULL. Returns number of bytes copied.
|
||||
+ */
|
||||
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
+ int nents_to_copy, size_t copy_len)
|
||||
+{
|
||||
+ int res = 0;
|
||||
+ size_t dst_len, dst_offs;
|
||||
+
|
||||
+ if (copy_len == 0)
|
||||
+ copy_len = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ if (nents_to_copy == 0)
|
||||
+ nents_to_copy = 0x7FFFFFFF; /* copy all */
|
||||
+
|
||||
+ dst_len = dst_sg->length;
|
||||
+ dst_offs = dst_sg->offset;
|
||||
+
|
||||
+ do {
|
||||
+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
|
||||
+ src_sg, copy_len);
|
||||
+ copy_len -= copied;
|
||||
+ res += copied;
|
||||
+ if ((copy_len == 0) || (dst_sg == NULL))
|
||||
+ goto out;
|
||||
+
|
||||
+ nents_to_copy--;
|
||||
+ if (nents_to_copy == 0)
|
||||
+ goto out;
|
||||
+
|
||||
+ src_sg = sg_next(src_sg);
|
||||
+ } while (src_sg != NULL);
|
||||
+
|
||||
+out:
|
||||
+ return res;
|
||||
+}
|
||||
+EXPORT_SYMBOL(sg_copy);
|
||||
|
||||
@@ -47,7 +47,7 @@ static void disk_detach(struct scst_device *dev);
|
||||
static int disk_parse(struct scst_cmd *cmd);
|
||||
static int disk_perf_exec(struct scst_cmd *cmd);
|
||||
static int disk_done(struct scst_cmd *cmd);
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
static int disk_exec(struct scst_cmd *cmd);
|
||||
static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd);
|
||||
#endif
|
||||
@@ -61,7 +61,7 @@ static struct scst_dev_type disk_devtype = {
|
||||
.attach = disk_attach,
|
||||
.detach = disk_detach,
|
||||
.parse = disk_parse,
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
.exec = disk_exec,
|
||||
.on_sg_tablesize_low = disk_on_sg_tablesize_low,
|
||||
#endif
|
||||
@@ -82,7 +82,7 @@ static struct scst_dev_type disk_devtype_perf = {
|
||||
.parse = disk_parse,
|
||||
.exec = disk_perf_exec,
|
||||
.dev_done = disk_done,
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
.on_sg_tablesize_low = disk_on_sg_tablesize_low,
|
||||
#endif
|
||||
#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
|
||||
@@ -293,7 +293,7 @@ static int disk_done(struct scst_cmd *cmd)
|
||||
return res;
|
||||
}
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
|
||||
static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd)
|
||||
{
|
||||
@@ -544,7 +544,7 @@ out_error:
|
||||
goto out_done;
|
||||
}
|
||||
|
||||
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
|
||||
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
|
||||
|
||||
static int disk_perf_exec(struct scst_cmd *cmd)
|
||||
{
|
||||
|
||||
@@ -119,7 +119,8 @@ int hex_to_bin(char ch)
|
||||
EXPORT_SYMBOL(hex_to_bin);
|
||||
#endif
|
||||
|
||||
#if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) && !defined(HAVE_SG_COPY)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || \
|
||||
!defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
static int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
|
||||
int nents_to_copy, size_t copy_len,
|
||||
@@ -6221,7 +6222,371 @@ out:
|
||||
return;
|
||||
}
|
||||
|
||||
#if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) && !defined(HAVE_SG_COPY)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
struct blk_kern_sg_work {
|
||||
atomic_t bios_inflight;
|
||||
struct sg_table sg_table;
|
||||
struct scatterlist *src_sgl;
|
||||
};
|
||||
|
||||
static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
|
||||
{
|
||||
struct sg_table *sgt = &bw->sg_table;
|
||||
struct scatterlist *sg;
|
||||
struct page *pg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||||
pg = sg_page(sg);
|
||||
if (pg == NULL)
|
||||
break;
|
||||
__free_page(pg);
|
||||
}
|
||||
|
||||
sg_free_table(sgt);
|
||||
kfree(bw);
|
||||
return;
|
||||
}
|
||||
|
||||
static void blk_bio_map_kern_endio(struct bio *bio, int err)
|
||||
{
|
||||
struct blk_kern_sg_work *bw = bio->bi_private;
|
||||
|
||||
if (bw != NULL) {
|
||||
/* Decrement the bios in processing and, if zero, free */
|
||||
BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
|
||||
if (atomic_dec_and_test(&bw->bios_inflight)) {
|
||||
if (bio_data_dir(bio) == READ && err == 0) {
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags); /* to protect KMs */
|
||||
sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
|
||||
, KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ
|
||||
#endif
|
||||
);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
blk_free_kern_sg_work(bw);
|
||||
}
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
|
||||
/*
|
||||
* See also patch "block: Add blk_make_request(), takes bio, returns a
|
||||
* request" (commit 79eb63e9e5875b84341a3a05f8e6ae9cdb4bb6f6).
|
||||
*/
|
||||
static struct request *blk_make_request(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
|
||||
|
||||
if (unlikely(!rq))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
|
||||
for ( ; bio; bio = bio->bi_next) {
|
||||
struct bio *bounce_bio = bio;
|
||||
int ret;
|
||||
|
||||
blk_queue_bounce(q, &bounce_bio);
|
||||
ret = blk_rq_append_bio(q, rq, bounce_bio);
|
||||
if (unlikely(ret)) {
|
||||
blk_put_request(rq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
|
||||
|
||||
/*
|
||||
* Copy an sg-list. This function is related to bio_copy_kern() but duplicates
|
||||
* an sg-list instead of creating a bio out of a single kernel address range.
|
||||
*/
|
||||
static struct blk_kern_sg_work *blk_copy_kern_sg(struct request_queue *q,
|
||||
struct scatterlist *sgl, int nents, gfp_t gfp_mask, bool reading)
|
||||
{
|
||||
int res = 0, i;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *new_sgl;
|
||||
int new_sgl_nents;
|
||||
size_t len = 0, to_copy;
|
||||
struct blk_kern_sg_work *bw;
|
||||
|
||||
res = -ENOMEM;
|
||||
bw = kzalloc(sizeof(*bw), gfp_mask);
|
||||
if (bw == NULL)
|
||||
goto err;
|
||||
|
||||
bw->src_sgl = sgl;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
len += sg->length;
|
||||
to_copy = len;
|
||||
|
||||
new_sgl_nents = PFN_UP(len);
|
||||
|
||||
res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp_mask);
|
||||
if (res != 0)
|
||||
goto err_free_bw;
|
||||
|
||||
new_sgl = bw->sg_table.sgl;
|
||||
|
||||
res = -ENOMEM;
|
||||
for_each_sg(new_sgl, sg, new_sgl_nents, i) {
|
||||
struct page *pg;
|
||||
|
||||
pg = alloc_page(q->bounce_gfp | gfp_mask);
|
||||
if (pg == NULL)
|
||||
goto err_free_table;
|
||||
|
||||
sg_assign_page(sg, pg);
|
||||
sg->length = min_t(size_t, PAGE_SIZE, len);
|
||||
|
||||
len -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (!reading) {
|
||||
/*
|
||||
* We need to limit amount of copied data to to_copy, because
|
||||
* sgl might have the last element in sgl not marked as last in
|
||||
* SG chaining.
|
||||
*/
|
||||
sg_copy(new_sgl, sgl, 0, to_copy
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
|
||||
, KM_USER0, KM_USER1
|
||||
#endif
|
||||
);
|
||||
}
|
||||
|
||||
out:
|
||||
return bw;
|
||||
|
||||
err_free_table:
|
||||
sg_free_table(&bw->sg_table);
|
||||
|
||||
err_free_bw:
|
||||
blk_free_kern_sg_work(bw);
|
||||
|
||||
err:
|
||||
sBUG_ON(res == 0);
|
||||
bw = ERR_PTR(res);
|
||||
goto out;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
|
||||
static void bio_kmalloc_destructor(struct bio *bio)
|
||||
{
|
||||
kfree(bio->bi_io_vec);
|
||||
kfree(bio);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* __blk_map_kern_sg - map kernel data to a request for REQ_TYPE_BLOCK_PC */
|
||||
static struct request *__blk_map_kern_sg(struct request_queue *q,
|
||||
struct scatterlist *sgl, int nents, struct blk_kern_sg_work *bw,
|
||||
gfp_t gfp_mask, bool reading)
|
||||
{
|
||||
struct request *rq;
|
||||
int max_nr_vecs, i;
|
||||
size_t tot_len;
|
||||
bool need_new_bio;
|
||||
struct scatterlist *sg;
|
||||
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
int bios;
|
||||
|
||||
if (unlikely(sgl == NULL || sgl->length == 0 || nents <= 0)) {
|
||||
WARN_ON_ONCE(true);
|
||||
rq = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restrict bio size to a single page to minimize the probability that
|
||||
* bio allocation fails.
|
||||
*/
|
||||
max_nr_vecs = min_t(int,
|
||||
(PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
|
||||
BIO_MAX_PAGES);
|
||||
|
||||
need_new_bio = true;
|
||||
tot_len = 0;
|
||||
bios = 0;
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
void *page_addr = page_address(page);
|
||||
size_t len = sg->length, l;
|
||||
size_t offset = sg->offset;
|
||||
|
||||
tot_len += len;
|
||||
|
||||
/*
|
||||
* Each segment must be DMA-aligned and must not reside not on
|
||||
* the stack. The last segment may have unaligned length as
|
||||
* long as the total length satisfies the DMA padding
|
||||
* alignment requirements.
|
||||
*/
|
||||
if (i == nents - 1)
|
||||
l = 0;
|
||||
else
|
||||
l = len;
|
||||
if (((sg->offset | l) & queue_dma_alignment(q)) ||
|
||||
(page_addr && object_is_on_stack(page_addr + sg->offset))) {
|
||||
rq = ERR_PTR(-EINVAL);
|
||||
goto out_free_bios;
|
||||
}
|
||||
|
||||
while (len > 0) {
|
||||
size_t bytes;
|
||||
int rc;
|
||||
|
||||
if (need_new_bio) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
|
||||
bio = bio_alloc_bioset(gfp_mask, max_nr_vecs, NULL);
|
||||
if (bio)
|
||||
bio->bi_destructor =
|
||||
bio_kmalloc_destructor;
|
||||
#else
|
||||
bio = bio_kmalloc(gfp_mask, max_nr_vecs);
|
||||
#endif
|
||||
if (bio == NULL) {
|
||||
rq = ERR_PTR(-ENOMEM);
|
||||
goto out_free_bios;
|
||||
}
|
||||
|
||||
if (!reading)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
|
||||
bio->bi_rw |= 1 << BIO_RW;
|
||||
#else
|
||||
bio->bi_rw |= REQ_WRITE;
|
||||
#endif
|
||||
bios++;
|
||||
bio->bi_private = bw;
|
||||
bio->bi_end_io = blk_bio_map_kern_endio;
|
||||
|
||||
if (hbio == NULL)
|
||||
hbio = bio;
|
||||
else
|
||||
tbio->bi_next = bio;
|
||||
tbio = bio;
|
||||
}
|
||||
|
||||
bytes = min_t(size_t, len, PAGE_SIZE - offset);
|
||||
|
||||
rc = bio_add_pc_page(q, bio, page, bytes, offset);
|
||||
if (rc < bytes) {
|
||||
if (unlikely(need_new_bio || rc < 0)) {
|
||||
rq = ERR_PTR(rc < 0 ? rc : -EIO);
|
||||
goto out_free_bios;
|
||||
} else {
|
||||
need_new_bio = true;
|
||||
len -= rc;
|
||||
offset += rc;
|
||||
}
|
||||
} else {
|
||||
need_new_bio = false;
|
||||
offset = 0;
|
||||
len -= bytes;
|
||||
page = nth_page(page, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hbio == NULL) {
|
||||
rq = ERR_PTR(-EINVAL);
|
||||
goto out_free_bios;
|
||||
}
|
||||
|
||||
/* Total length must satisfy DMA padding alignment */
|
||||
if ((tot_len & q->dma_pad_mask) && bw != NULL) {
|
||||
rq = ERR_PTR(-EINVAL);
|
||||
goto out_free_bios;
|
||||
}
|
||||
|
||||
rq = blk_make_request(q, hbio, gfp_mask);
|
||||
if (unlikely(IS_ERR(rq)))
|
||||
goto out_free_bios;
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
|
||||
/*
|
||||
* See also patch "block: add blk_rq_set_block_pc()" (commit
|
||||
* f27b087b81b7).
|
||||
*/
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
#endif
|
||||
|
||||
if (bw != NULL) {
|
||||
atomic_set(&bw->bios_inflight, bios);
|
||||
rq->cmd_flags |= REQ_COPY_USER;
|
||||
}
|
||||
|
||||
out:
|
||||
return rq;
|
||||
|
||||
out_free_bios:
|
||||
while (hbio != NULL) {
|
||||
bio = hbio;
|
||||
hbio = hbio->bi_next;
|
||||
bio_put(bio);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_map_kern_sg - map kernel data to a request for REQ_TYPE_BLOCK_PC
|
||||
* @rq: request to fill
|
||||
* @sgl: area to map
|
||||
* @nents: number of elements in @sgl
|
||||
* @gfp: memory allocation flags
|
||||
*
|
||||
* Description:
|
||||
* Data will be mapped directly if possible. Otherwise a bounce
|
||||
* buffer will be used.
|
||||
*/
|
||||
static struct request *blk_map_kern_sg(struct request_queue *q,
|
||||
struct scatterlist *sgl, int nents, gfp_t gfp, bool reading)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
if (!sgl) {
|
||||
rq = blk_get_request(q, reading ? READ : WRITE, gfp);
|
||||
if (unlikely(!rq))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rq = __blk_map_kern_sg(q, sgl, nents, NULL, gfp, reading);
|
||||
if (unlikely(IS_ERR(rq))) {
|
||||
struct blk_kern_sg_work *bw;
|
||||
|
||||
bw = blk_copy_kern_sg(q, sgl, nents, gfp, reading);
|
||||
if (unlikely(IS_ERR(bw))) {
|
||||
rq = ERR_PTR(PTR_ERR(bw));
|
||||
goto out;
|
||||
}
|
||||
|
||||
rq = __blk_map_kern_sg(q, bw->sg_table.sgl, bw->sg_table.nents,
|
||||
bw, gfp, reading);
|
||||
if (IS_ERR(rq)) {
|
||||
blk_free_kern_sg_work(bw);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return rq;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Can switch to the next dst_sg element, so, to copy to strictly only
|
||||
@@ -6375,15 +6740,20 @@ out:
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif /* !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
|
||||
static void scsi_end_async(struct request *req, int error)
|
||||
{
|
||||
struct scsi_io_context *sioc = req->end_io_data;
|
||||
|
||||
TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
|
||||
lockdep_assert_held(req->q->queue_lock);
|
||||
#else
|
||||
if (!req->q->mq_ops)
|
||||
lockdep_assert_held(req->q->queue_lock);
|
||||
#endif
|
||||
|
||||
if (sioc->done)
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
|
||||
sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
|
||||
@@ -6410,7 +6780,7 @@ int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
|
||||
struct request_queue *q = cmd->dev->scsi_dev->request_queue;
|
||||
struct request *rq;
|
||||
struct scsi_io_context *sioc;
|
||||
int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
|
||||
bool reading = !(cmd->data_direction & SCST_DATA_WRITE);
|
||||
gfp_t gfp = cmd->cmd_gfp_mask;
|
||||
int cmd_len = cmd->cdb_len;
|
||||
|
||||
@@ -6420,54 +6790,38 @@ int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, write, gfp);
|
||||
if (rq == NULL) {
|
||||
res = -ENOMEM;
|
||||
goto out_free_sioc;
|
||||
}
|
||||
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
|
||||
if (cmd->sg == NULL)
|
||||
goto done;
|
||||
|
||||
if (cmd->data_direction == SCST_DATA_BIDI) {
|
||||
struct request *next_rq;
|
||||
|
||||
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
|
||||
res = -EOPNOTSUPP;
|
||||
goto out_free_rq;
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = blk_rq_map_kern_sg(rq, cmd->out_sg, cmd->out_sg_cnt, gfp);
|
||||
if (res != 0) {
|
||||
TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
|
||||
goto out_free_rq;
|
||||
rq = blk_map_kern_sg(q, cmd->out_sg, cmd->out_sg_cnt, gfp,
|
||||
reading);
|
||||
if (IS_ERR(rq)) {
|
||||
res = PTR_ERR(rq);
|
||||
TRACE_DBG("blk_map_kern_sg() failed: %d", res);
|
||||
goto out;
|
||||
}
|
||||
|
||||
next_rq = blk_get_request(q, READ, gfp);
|
||||
if (next_rq == NULL) {
|
||||
res = -ENOMEM;
|
||||
next_rq = blk_map_kern_sg(q, cmd->sg, cmd->sg_cnt, gfp, false);
|
||||
if (IS_ERR(next_rq)) {
|
||||
res = PTR_ERR(next_rq);
|
||||
TRACE_DBG("blk_map_kern_sg() failed: %d", res);
|
||||
goto out_free_unmap;
|
||||
}
|
||||
rq->next_rq = next_rq;
|
||||
next_rq->cmd_type = rq->cmd_type;
|
||||
|
||||
res = blk_rq_map_kern_sg(next_rq, cmd->sg, cmd->sg_cnt, gfp);
|
||||
if (res != 0) {
|
||||
TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
|
||||
goto out_free_unmap;
|
||||
}
|
||||
} else {
|
||||
res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
|
||||
if (res != 0) {
|
||||
TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
|
||||
goto out_free_rq;
|
||||
rq = blk_map_kern_sg(q, cmd->sg, cmd->sg_cnt, gfp, reading);
|
||||
if (IS_ERR(rq)) {
|
||||
res = PTR_ERR(rq);
|
||||
TRACE_DBG("blk_map_kern_sg() failed: %d", res);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
|
||||
|
||||
sioc->data = data;
|
||||
@@ -6485,6 +6839,7 @@ done:
|
||||
rq->timeout = cmd->timeout;
|
||||
rq->retries = cmd->retries;
|
||||
rq->end_io_data = sioc;
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
|
||||
blk_execute_rq_nowait(rq->q, NULL, rq,
|
||||
(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
|
||||
@@ -6492,22 +6847,23 @@ out:
|
||||
return res;
|
||||
|
||||
out_free_unmap:
|
||||
if (rq->next_rq != NULL) {
|
||||
blk_put_request(rq->next_rq);
|
||||
rq->next_rq = NULL;
|
||||
{
|
||||
struct bio *bio = rq->bio, *b;
|
||||
|
||||
while (bio) {
|
||||
b = bio;
|
||||
bio = bio->bi_next;
|
||||
b->bi_end_io(b, res);
|
||||
}
|
||||
blk_rq_unmap_kern_sg(rq, res);
|
||||
}
|
||||
rq->bio = NULL;
|
||||
|
||||
out_free_rq:
|
||||
blk_put_request(rq);
|
||||
|
||||
out_free_sioc:
|
||||
kmem_cache_free(scsi_io_context_cache, sioc);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(scst_scsi_exec_async);
|
||||
|
||||
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
|
||||
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
|
||||
|
||||
/**
|
||||
* scst_copy_sg() - copy data between the command's SGs
|
||||
|
||||
@@ -47,18 +47,13 @@ option or use a 64-bit configuration instead. See README file for \
|
||||
details.
|
||||
#endif
|
||||
|
||||
#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
|
||||
#if !defined(CONFIG_SCST_STRICT_SERIALIZING)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) && \
|
||||
!defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
|
||||
!defined(CONFIG_SCST_STRICT_SERIALIZING)
|
||||
#warning Patch scst_exec_req_fifo-<kernel-version> was not applied on \
|
||||
your kernel and CONFIG_SCST_STRICT_SERIALIZING is not defined. \
|
||||
Pass-through dev handlers will not work.
|
||||
#endif /* !defined(CONFIG_SCST_STRICT_SERIALIZING) */
|
||||
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
|
||||
#warning Patch scst_exec_req_fifo-<kernel-version> was not applied on \
|
||||
your kernel. Pass-through dev handlers will not work.
|
||||
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
|
||||
#endif /* !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
|
||||
#endif
|
||||
|
||||
/**
|
||||
** SCST global variables. They are all uninitialized to have their layout in
|
||||
@@ -1607,26 +1602,18 @@ int __scst_register_dev_driver(struct scst_dev_type *dev_type,
|
||||
if (res != 0)
|
||||
goto out;
|
||||
|
||||
#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) && \
|
||||
!defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
|
||||
!defined(CONFIG_SCST_STRICT_SERIALIZING)
|
||||
if (dev_type->exec == NULL) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
|
||||
#if !defined(CONFIG_SCST_STRICT_SERIALIZING)
|
||||
PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
|
||||
"supported. Consider applying on your kernel patch "
|
||||
"scst_exec_req_fifo-<kernel-version> or define "
|
||||
"CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
|
||||
res = -EINVAL;
|
||||
goto out;
|
||||
#endif /* !defined(CONFIG_SCST_STRICT_SERIALIZING) */
|
||||
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
|
||||
PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
|
||||
"supported. Consider applying on your kernel patch "
|
||||
"scst_exec_req_fifo-<kernel-version>", dev_type->name);
|
||||
res = -EINVAL;
|
||||
goto out;
|
||||
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
|
||||
}
|
||||
#endif /* !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCST_PROC
|
||||
res = scst_suspend_activity(SCST_SUSPEND_TIMEOUT_USER);
|
||||
|
||||
@@ -412,15 +412,6 @@ static inline int scst_exec_req(struct scsi_device *sdev,
|
||||
(void *)sgl, bufflen, nents, timeout, retries, privdata, done, gfp);
|
||||
#endif
|
||||
}
|
||||
#else /* i.e. LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
|
||||
#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
|
||||
static inline int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
|
||||
void (*done)(void *data, char *sense, int result, int resid))
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
int scst_alloc_space(struct scst_cmd *cmd);
|
||||
|
||||
@@ -1778,14 +1778,16 @@ static inline enum scst_exec_context scst_optimize_post_exec_context(
|
||||
*/
|
||||
void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid)
|
||||
{
|
||||
struct scst_cmd *cmd;
|
||||
struct scst_cmd *cmd = data;
|
||||
|
||||
TRACE_ENTRY();
|
||||
|
||||
cmd = (struct scst_cmd *)data;
|
||||
if (cmd == NULL)
|
||||
goto out;
|
||||
|
||||
TRACE_DBG("cmd %p; CDB[0/%d] %#x: result %d; resid %d", cmd,
|
||||
cmd->cdb_len, cmd->cdb[0], result, resid);
|
||||
|
||||
scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
|
||||
|
||||
cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
|
||||
@@ -2987,10 +2989,12 @@ static int scst_do_real_exec(struct scst_cmd *cmd)
|
||||
sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
|
||||
}
|
||||
|
||||
TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
|
||||
|
||||
scsi_dev = dev->scsi_dev;
|
||||
|
||||
TRACE_DBG("Sending cmd %p to SCSI mid-level dev %d:%d:%d:%lld", cmd,
|
||||
scsi_dev->host->host_no, scsi_dev->channel, scsi_dev->id,
|
||||
(u64)scsi_dev->lun);
|
||||
|
||||
if (unlikely(scsi_dev == NULL)) {
|
||||
PRINT_ERROR("Command for virtual device must be "
|
||||
"processed by device handler (LUN %lld)!",
|
||||
|
||||
Reference in New Issue
Block a user