diff --git a/iscsi-scst/kernel/patches/put_page_callback-3.18.patch b/iscsi-scst/kernel/patches/put_page_callback-3.18.patch new file mode 100644 index 000000000..7f85a7496 --- /dev/null +++ b/iscsi-scst/kernel/patches/put_page_callback-3.18.patch @@ -0,0 +1,387 @@ +Subject: [PATCH] put_page_callback + +--- + drivers/block/drbd/drbd_receiver.c | 2 +- + include/linux/mm_types.h | 11 +++++++++ + include/linux/net.h | 40 ++++++++++++++++++++++++++++++ + include/linux/skbuff.h | 4 +-- + net/Kconfig | 12 +++++++++ + net/ceph/pagevec.c | 2 +- + net/core/skbuff.c | 14 +++++------ + net/core/sock.c | 4 +-- + net/ipv4/Makefile | 1 + + net/ipv4/ip_output.c | 4 +-- + net/ipv4/tcp.c | 4 +-- + net/ipv4/tcp_zero_copy.c | 50 ++++++++++++++++++++++++++++++++++++++ + net/ipv6/ip6_output.c | 2 +- + 13 files changed, 132 insertions(+), 18 deletions(-) + create mode 100644 net/ipv4/tcp_zero_copy.c + +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c +index 6960fb0..8fa4016 100644 +--- a/drivers/block/drbd/drbd_receiver.c ++++ b/drivers/block/drbd/drbd_receiver.c +@@ -132,7 +132,7 @@ static int page_chain_free(struct page *page) + struct page *tmp; + int i = 0; + page_chain_for_each_safe(page, tmp) { +- put_page(page); ++ net_put_page(page); + ++i; + } + return i; +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 6e0b286..5706a4d 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -196,6 +196,17 @@ struct page { + #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; + #endif ++ ++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) ++ /* ++ * Used to implement support for notification on zero-copy TCP transfer ++ * completion. It might look as not good to have this field here and ++ * it's better to have it in struct sk_buff, but it would make the code ++ * much more complicated and fragile, since all skb then would have to ++ * contain only pages with the same value in this field. ++ */ ++ void *net_priv; ++#endif + } + /* + * The struct page can be forced to be double word aligned so that atomic ops +diff --git a/include/linux/net.h b/include/linux/net.h +index 17d8339..f784384 100644 +--- a/include/linux/net.h ++++ b/include/linux/net.h +@@ -19,6 +19,7 @@ + #define _LINUX_NET_H + + #include ++#include + #include + #include + #include /* For O_CLOEXEC and O_NONBLOCK */ +@@ -285,6 +286,45 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, + int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); + int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); + ++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) ++/* Support for notification on zero-copy TCP transfer completion */ ++typedef void (*net_get_page_callback_t)(struct page *page); ++typedef void (*net_put_page_callback_t)(struct page *page); ++ ++extern net_get_page_callback_t net_get_page_callback; ++extern net_put_page_callback_t net_put_page_callback; ++ ++extern int net_set_get_put_page_callbacks( ++ net_get_page_callback_t get_callback, ++ net_put_page_callback_t put_callback); ++ ++/* ++ * See comment for net_set_get_put_page_callbacks() why those functions ++ * don't need any protection. ++ */ ++static inline void net_get_page(struct page *page) ++{ ++ if (page->net_priv != 0) ++ net_get_page_callback(page); ++ get_page(page); ++} ++static inline void net_put_page(struct page *page) ++{ ++ if (page->net_priv != 0) ++ net_put_page_callback(page); ++ put_page(page); ++} ++#else ++static inline void net_get_page(struct page *page) ++{ ++ get_page(page); ++} ++static inline void net_put_page(struct page *page) ++{ ++ put_page(page); ++} ++#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */ ++ + #define MODULE_ALIAS_NETPROTO(proto) \ + MODULE_ALIAS("net-pf-" __stringify(proto)) + +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 6c8b6f6..edf6195 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -2250,7 +2250,7 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag) + */ + static inline void __skb_frag_ref(skb_frag_t *frag) + { +- get_page(skb_frag_page(frag)); ++ net_get_page(skb_frag_page(frag)); + } + + /** +@@ -2273,7 +2273,7 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) + */ + static inline void __skb_frag_unref(skb_frag_t *frag) + { +- put_page(skb_frag_page(frag)); ++ net_put_page(skb_frag_page(frag)); + } + + /** +diff --git a/net/Kconfig b/net/Kconfig +index 99815b5..ac45213 100644 +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -76,6 +76,18 @@ config INET + + Short answer: say Y. + ++config TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION ++ bool "TCP/IP zero-copy transfer completion notification" ++ depends on INET ++ default SCST_ISCSI ++ ---help--- ++ Adds support for sending a notification upon completion of a ++ zero-copy TCP/IP transfer. This can speed up certain TCP/IP ++ software. Currently this is only used by the iSCSI target driver ++ iSCSI-SCST. ++ ++ If unsure, say N. ++ + if INET + source "net/ipv4/Kconfig" + source "net/ipv6/Kconfig" +diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c +index 5550130..993f710 100644 +--- a/net/ceph/pagevec.c ++++ b/net/ceph/pagevec.c +@@ -51,7 +51,7 @@ void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) + for (i = 0; i < num_pages; i++) { + if (dirty) + set_page_dirty_lock(pages[i]); +- put_page(pages[i]); ++ net_put_page(pages[i]); + } + if (is_vmalloc_addr(pages)) + vfree(pages); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 32e31c2..6eb3a9e 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -437,7 +437,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, + if (likely(data)) { + skb = build_skb(data, fragsz); + if (unlikely(!skb)) +- put_page(virt_to_head_page(data)); ++ net_put_page(virt_to_head_page(data)); + } + } else { + skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, +@@ -495,7 +495,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) + static void skb_free_head(struct sk_buff *skb) + { + if (skb->head_frag) +- put_page(virt_to_head_page(skb->head)); ++ net_put_page(virt_to_head_page(skb->head)); + else + kfree(skb->head); + } +@@ -822,7 +822,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) + if (!page) { + while (head) { + struct page *next = (struct page *)page_private(head); +- put_page(head); ++ net_put_page(head); + head = next; + } + return -ENOMEM; +@@ -1669,7 +1669,7 @@ EXPORT_SYMBOL(skb_copy_bits); + */ + static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) + { +- put_page(spd->pages[i]); ++ net_put_page(spd->pages[i]); + } + + static struct page *linear_to_page(struct page *page, unsigned int *len, +@@ -1722,7 +1722,7 @@ static bool spd_fill_page(struct splice_pipe_desc *spd, + spd->partial[spd->nr_pages - 1].len += *len; + return false; + } +- get_page(page); ++ net_get_page(page); + spd->pages[spd->nr_pages] = page; + spd->partial[spd->nr_pages].len = *len; + spd->partial[spd->nr_pages].offset = offset; +@@ -2181,7 +2181,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) + page = virt_to_head_page(from->head); + offset = from->data - (unsigned char *)page_address(page); + __skb_fill_page_desc(to, 0, page, offset, plen); +- get_page(page); ++ net_get_page(page); + j = 1; + len -= plen; + } +@@ -2835,7 +2835,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, + copy); + frg_cnt++; + pfrag->offset += copy; +- get_page(pfrag->page); ++ net_get_page(pfrag->page); + + skb->truesize += copy; + atomic_add(copy, &sk->sk_wmem_alloc); +diff --git a/net/core/sock.c b/net/core/sock.c +index 15e0c67..e8ea0df 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1830,7 +1830,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) + } + if (pfrag->offset + sz <= pfrag->size) + return true; +- put_page(pfrag->page); ++ net_put_page(pfrag->page); + } + + pfrag->offset = 0; +@@ -2581,7 +2581,7 @@ void sk_common_release(struct sock *sk) + sk_refcnt_debug_release(sk); + + if (sk->sk_frag.page) { +- put_page(sk->sk_frag.page); ++ net_put_page(sk->sk_frag.page); + sk->sk_frag.page = NULL; + } + +diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile +index 518c04e..4072a87 100644 +--- a/net/ipv4/Makefile ++++ b/net/ipv4/Makefile +@@ -57,6 +57,7 @@ obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o + obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o + obj-$(CONFIG_NETLABEL) += cipso_ipv4.o + obj-$(CONFIG_GENEVE) += geneve.o ++obj-$(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) += tcp_zero_copy.o + + obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ + xfrm4_output.o xfrm4_protocol.o +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index bc6471d..ab9e262 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -1051,7 +1051,7 @@ alloc_new_skb: + __skb_fill_page_desc(skb, i, pfrag->page, + pfrag->offset, 0); + skb_shinfo(skb)->nr_frags = ++i; +- get_page(pfrag->page); ++ net_get_page(pfrag->page); + } + copy = min_t(int, copy, pfrag->size - pfrag->offset); + if (getfrag(from, +@@ -1276,7 +1276,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, + if (skb_can_coalesce(skb, i, page, offset)) { + skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len); + } else if (i < MAX_SKB_FRAGS) { +- get_page(page); ++ net_get_page(page); + skb_fill_page_desc(skb, i, page, offset, len); + } else { + err = -EMSGSIZE; +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 38c2bcb..f089a7a 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -949,7 +949,7 @@ new_segment: + if (can_coalesce) { + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); + } else { +- get_page(page); ++ net_get_page(page); + skb_fill_page_desc(skb, i, page, offset, copy); + } + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; +@@ -1250,7 +1250,7 @@ new_segment: + } else { + skb_fill_page_desc(skb, i, pfrag->page, + pfrag->offset, copy); +- get_page(pfrag->page); ++ net_get_page(pfrag->page); + } + pfrag->offset += copy; + } +diff --git a/net/ipv4/tcp_zero_copy.c b/net/ipv4/tcp_zero_copy.c +new file mode 100644 +index 0000000..430147e +--- /dev/null ++++ b/net/ipv4/tcp_zero_copy.c +@@ -0,0 +1,50 @@ ++/* ++ * Support routines for TCP zero copy transmit ++ * ++ * Created by Vladislav Bolkhovitin ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++ ++net_get_page_callback_t net_get_page_callback __read_mostly; ++EXPORT_SYMBOL_GPL(net_get_page_callback); ++ ++net_put_page_callback_t net_put_page_callback __read_mostly; ++EXPORT_SYMBOL_GPL(net_put_page_callback); ++ ++/* ++ * Caller of this function must ensure that at the moment when it's called ++ * there are no pages in the system with net_priv field set to non-zero ++ * value. Hence, this function, as well as net_get_page() and net_put_page(), ++ * don't need any protection. ++ */ ++int net_set_get_put_page_callbacks( ++ net_get_page_callback_t get_callback, ++ net_put_page_callback_t put_callback) ++{ ++ int res = 0; ++ ++ if ((net_get_page_callback != NULL) && (get_callback != NULL) && ++ (net_get_page_callback != get_callback)) { ++ res = -EBUSY; ++ goto out; ++ } ++ ++ if ((net_put_page_callback != NULL) && (put_callback != NULL) && ++ (net_put_page_callback != put_callback)) { ++ res = -EBUSY; ++ goto out; ++ } ++ ++ net_get_page_callback = get_callback; ++ net_put_page_callback = put_callback; ++ ++out: ++ return res; ++} ++EXPORT_SYMBOL_GPL(net_set_get_put_page_callbacks); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 8e950c2..8cb4760 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1472,7 +1472,7 @@ alloc_new_skb: + __skb_fill_page_desc(skb, i, pfrag->page, + pfrag->offset, 0); + skb_shinfo(skb)->nr_frags = ++i; +- get_page(pfrag->page); ++ net_get_page(pfrag->page); + } + copy = min_t(int, copy, pfrag->size - pfrag->offset); + if (getfrag(from, +-- +2.1.2 + diff --git a/qla2x00t/qla2x00-target/Makefile_in-tree-3.18 b/qla2x00t/qla2x00-target/Makefile_in-tree-3.18 new file mode 100644 index 000000000..9657aee84 --- /dev/null +++ b/qla2x00t/qla2x00-target/Makefile_in-tree-3.18 @@ -0,0 +1,5 @@ +ccflags-y += -Idrivers/scsi/qla2xxx + +qla2x00tgt-y := qla2x00t.o + +obj-$(CONFIG_SCST_QLA_TGT_ADDON) += qla2x00tgt.o diff --git a/qla2x00t/qla2x00-target/qla2x00t.c b/qla2x00t/qla2x00-target/qla2x00t.c index 85e81b59e..13a3afff3 100644 --- a/qla2x00t/qla2x00-target/qla2x00t.c +++ b/qla2x00t/qla2x00-target/qla2x00t.c @@ -2679,6 +2679,31 @@ out_unlock_free_unmap: goto out; } +/* + * Convert sense buffer (byte array) to little endian format as required by + * qla24xx firmware. + */ +static void q24_copy_sense_buffer_to_ctio(ctio7_status1_entry_t *ctio, + uint8_t *sense_buf, unsigned int sense_buf_len) +{ + uint32_t *src = (void *)sense_buf; + uint32_t *end = (void *)sense_buf + sense_buf_len; + uint8_t *p; + __be32 *dst = (void *)ctio->sense_data; + + /* + * The sense buffer allocated by scst_alloc_sense() is zero-filled and + * has a length that is a multiple of four. This means that it is safe + * to access the bytes after the end of the sense buffer up to a + * boundary that is a multiple of four. + */ + for (p = (uint8_t *)end; ((uintptr_t)p & 3) != 0; p++) + WARN_ONCE(*p != 0, "sense_buf[%zd] = %d\n", p - sense_buf, *p); + + for ( ; src < end; src++) + *dst++ = cpu_to_be32(*src); +} + static inline int q2t_need_explicit_conf(scsi_qla_host_t *ha, struct q2t_cmd *cmd, int sending_sense) { @@ -2914,7 +2939,6 @@ static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio, ctio->residual = cpu_to_le32(prm->residual); ctio->scsi_status = cpu_to_le16(prm->rq_result); if (scst_sense_valid(prm->sense_buffer)) { - int i; ctio1 = (ctio7_status1_entry_t *)ctio; if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { ctio1->flags |= cpu_to_le16( @@ -2925,20 +2949,8 @@ static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio, ctio1->flags |= cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio1->scsi_status |= cpu_to_le16(SS_SENSE_LEN_VALID); ctio1->sense_length = cpu_to_le16(prm->sense_buffer_len); - for (i = 0; i < prm->sense_buffer_len/4; i++) - ((uint32_t *)ctio1->sense_data)[i] = - cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); -#if 0 - if (unlikely((prm->sense_buffer_len % 4) != 0)) { - static int q; - if (q < 10) { - PRINT_INFO("qla2x00t(%ld): %d bytes of sense " - "lost", prm->tgt->ha->instance, - prm->sense_buffer_len % 4); - q++; - } - } -#endif + q24_copy_sense_buffer_to_ctio(ctio1, prm->sense_buffer, + prm->sense_buffer_len); } else { ctio1 = (ctio7_status1_entry_t *)ctio; ctio1->flags &= ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); diff --git a/scst/kernel/in-tree/Kconfig.drivers.Linux-3.18.patch b/scst/kernel/in-tree/Kconfig.drivers.Linux-3.18.patch new file mode 100644 index 000000000..0d5a19f0f --- /dev/null +++ b/scst/kernel/in-tree/Kconfig.drivers.Linux-3.18.patch @@ -0,0 +1,13 @@ +diff --git a/drivers/Kconfig b/drivers/Kconfig +index aa43b91..c96860e 100644 +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -24,6 +24,8 @@ source "drivers/ide/Kconfig" + + source "drivers/scsi/Kconfig" + ++source "drivers/scst/Kconfig" ++ + source "drivers/ata/Kconfig" + + source "drivers/md/Kconfig" diff --git a/scst/kernel/in-tree/Makefile.dev_handlers-3.18 b/scst/kernel/in-tree/Makefile.dev_handlers-3.18 new file mode 100644 index 000000000..f933b36f7 --- /dev/null +++ b/scst/kernel/in-tree/Makefile.dev_handlers-3.18 @@ -0,0 +1,14 @@ +ccflags-y += -Wno-unused-parameter + +obj-m := scst_cdrom.o scst_changer.o scst_disk.o scst_modisk.o scst_tape.o \ + scst_vdisk.o scst_raid.o scst_processor.o scst_user.o + +obj-$(CONFIG_SCST_DISK) += scst_disk.o +obj-$(CONFIG_SCST_TAPE) += scst_tape.o +obj-$(CONFIG_SCST_CDROM) += scst_cdrom.o +obj-$(CONFIG_SCST_MODISK) += scst_modisk.o +obj-$(CONFIG_SCST_CHANGER) += scst_changer.o +obj-$(CONFIG_SCST_RAID) += scst_raid.o +obj-$(CONFIG_SCST_PROCESSOR) += scst_processor.o +obj-$(CONFIG_SCST_VDISK) += scst_vdisk.o +obj-$(CONFIG_SCST_USER) += scst_user.o diff --git a/scst/kernel/in-tree/Makefile.drivers.Linux-3.18.patch b/scst/kernel/in-tree/Makefile.drivers.Linux-3.18.patch new file mode 100644 index 000000000..4d482c340 --- /dev/null +++ b/scst/kernel/in-tree/Makefile.drivers.Linux-3.18.patch @@ -0,0 +1,12 @@ +diff --git a/drivers/Makefile b/drivers/Makefile +index ebee555..17f67ae 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -134,6 +134,7 @@ obj-$(CONFIG_SSB) += ssb/ + obj-$(CONFIG_BCMA) += bcma/ + obj-$(CONFIG_VHOST_RING) += vhost/ + obj-$(CONFIG_VLYNQ) += vlynq/ ++obj-$(CONFIG_SCST) += scst/ + obj-$(CONFIG_STAGING) += staging/ + obj-y += platform/ + #common clk code diff --git a/scst/kernel/in-tree/Makefile.scst-3.18 b/scst/kernel/in-tree/Makefile.scst-3.18 new file mode 100644 index 000000000..53af5f388 --- /dev/null +++ b/scst/kernel/in-tree/Makefile.scst-3.18 @@ -0,0 +1,13 @@ +ccflags-y += -Wno-unused-parameter + +scst-y += scst_main.o +scst-y += scst_pres.o +scst-y += scst_targ.o +scst-y += scst_lib.o +scst-y += scst_sysfs.o +scst-y += scst_mem.o +scst-y += scst_tg.o +scst-y += scst_debug.o + +obj-$(CONFIG_SCST) += scst.o dev_handlers/ fcst/ iscsi-scst/ qla2xxx-target/ \ + srpt/ scst_local/ diff --git a/scst/kernel/scst_exec_req_fifo-3.18.patch b/scst/kernel/scst_exec_req_fifo-3.18.patch new file mode 100644 index 000000000..e64a14fde --- /dev/null +++ b/scst/kernel/scst_exec_req_fifo-3.18.patch @@ -0,0 +1,536 @@ +Subject: [PATCH] scst_exec_req_fifo + +--- + block/blk-map.c | 329 ++++++++++++++++++++++++++++++++++++++++++++ + include/linux/blkdev.h | 5 + + include/linux/scatterlist.h | 4 + + lib/scatterlist.c | 124 +++++++++++++++++ + 4 files changed, 462 insertions(+) + +diff --git a/block/blk-map.c b/block/blk-map.c +index f890d43..d4b8509 100644 +--- a/block/blk-map.c ++++ b/block/blk-map.c +@@ -5,6 +5,8 @@ + #include + #include + #include ++#include ++#include + #include /* for struct sg_iovec */ + + #include "blk.h" +@@ -273,6 +275,333 @@ int blk_rq_unmap_user(struct bio *bio) + } + EXPORT_SYMBOL(blk_rq_unmap_user); + ++struct blk_kern_sg_work { ++ atomic_t bios_inflight; ++ struct sg_table sg_table; ++ struct scatterlist *src_sgl; ++}; ++ ++static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw) ++{ ++ struct sg_table *sgt = &bw->sg_table; ++ struct scatterlist *sg; ++ int i; ++ ++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { ++ struct page *pg = sg_page(sg); ++ if (pg == NULL) ++ break; ++ __free_page(pg); ++ } ++ ++ sg_free_table(sgt); ++ kfree(bw); ++ return; ++} ++ ++static void blk_bio_map_kern_endio(struct bio *bio, int err) ++{ ++ struct blk_kern_sg_work *bw = bio->bi_private; ++ ++ if (bw != NULL) { ++ /* Decrement the bios in processing and, if zero, free */ ++ BUG_ON(atomic_read(&bw->bios_inflight) <= 0); ++ if (atomic_dec_and_test(&bw->bios_inflight)) { ++ if ((bio_data_dir(bio) == READ) && (err == 0)) { ++ unsigned long flags; ++ ++ local_irq_save(flags); /* to protect KMs */ ++ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0); ++ local_irq_restore(flags); ++ } ++ blk_free_kern_sg_work(bw); ++ } ++ } ++ ++ bio_put(bio); ++ return; ++} ++ ++static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl, ++ int nents, struct blk_kern_sg_work **pbw, ++ gfp_t gfp, gfp_t page_gfp) ++{ ++ int res = 0, i; ++ struct scatterlist *sg; ++ struct scatterlist *new_sgl; ++ int new_sgl_nents; ++ size_t len = 0, to_copy; ++ struct blk_kern_sg_work *bw; ++ ++ bw = kzalloc(sizeof(*bw), gfp); ++ if (bw == NULL) ++ goto out; ++ ++ bw->src_sgl = sgl; ++ ++ for_each_sg(sgl, sg, nents, i) ++ len += sg->length; ++ to_copy = len; ++ ++ new_sgl_nents = PFN_UP(len); ++ ++ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp); ++ if (res != 0) ++ goto err_free; ++ ++ new_sgl = bw->sg_table.sgl; ++ ++ for_each_sg(new_sgl, sg, new_sgl_nents, i) { ++ struct page *pg; ++ ++ pg = alloc_page(page_gfp); ++ if (pg == NULL) ++ goto err_free; ++ ++ sg_assign_page(sg, pg); ++ sg->length = min_t(size_t, PAGE_SIZE, len); ++ ++ len -= PAGE_SIZE; ++ } ++ ++ if (rq_data_dir(rq) == WRITE) { ++ /* ++ * We need to limit amount of copied data to to_copy, because ++ * sgl might have the last element in sgl not marked as last in ++ * SG chaining. ++ */ ++ sg_copy(new_sgl, sgl, 0, to_copy); ++ } ++ ++ *pbw = bw; ++ /* ++ * REQ_COPY_USER name is misleading. It should be something like ++ * REQ_HAS_TAIL_SPACE_FOR_PADDING. ++ */ ++ rq->cmd_flags |= REQ_COPY_USER; ++ ++out: ++ return res; ++ ++err_free: ++ blk_free_kern_sg_work(bw); ++ res = -ENOMEM; ++ goto out; ++} ++ ++static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl, ++ int nents, struct blk_kern_sg_work *bw, gfp_t gfp) ++{ ++ int res; ++ struct request_queue *q = rq->q; ++ int rw = rq_data_dir(rq); ++ int max_nr_vecs, i; ++ size_t tot_len; ++ bool need_new_bio; ++ struct scatterlist *sg, *prev_sg = NULL; ++ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; ++ int bios; ++ ++ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) { ++ WARN_ON(1); ++ res = -EINVAL; ++ goto out; ++ } ++ ++ /* ++ * Let's keep each bio allocation inside a single page to decrease ++ * probability of failure. ++ */ ++ max_nr_vecs = min_t(size_t, ++ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)), ++ BIO_MAX_PAGES); ++ ++ need_new_bio = true; ++ tot_len = 0; ++ bios = 0; ++ for_each_sg(sgl, sg, nents, i) { ++ struct page *page = sg_page(sg); ++ void *page_addr = page_address(page); ++ size_t len = sg->length, l; ++ size_t offset = sg->offset; ++ ++ tot_len += len; ++ prev_sg = sg; ++ ++ /* ++ * Each segment must be aligned on DMA boundary and ++ * not on stack. The last one may have unaligned ++ * length as long as the total length is aligned to ++ * DMA padding alignment. ++ */ ++ if (i == nents - 1) ++ l = 0; ++ else ++ l = len; ++ if (((sg->offset | l) & queue_dma_alignment(q)) || ++ (page_addr && object_is_on_stack(page_addr + sg->offset))) { ++ res = -EINVAL; ++ goto out_free_bios; ++ } ++ ++ while (len > 0) { ++ size_t bytes; ++ int rc; ++ ++ if (need_new_bio) { ++ bio = bio_kmalloc(gfp, max_nr_vecs); ++ if (bio == NULL) { ++ res = -ENOMEM; ++ goto out_free_bios; ++ } ++ ++ if (rw == WRITE) ++ bio->bi_rw |= REQ_WRITE; ++ ++ bios++; ++ bio->bi_private = bw; ++ bio->bi_end_io = blk_bio_map_kern_endio; ++ ++ if (hbio == NULL) ++ hbio = tbio = bio; ++ else ++ tbio = tbio->bi_next = bio; ++ } ++ ++ bytes = min_t(size_t, len, PAGE_SIZE - offset); ++ ++ rc = bio_add_pc_page(q, bio, page, bytes, offset); ++ if (rc < bytes) { ++ if (unlikely(need_new_bio || (rc < 0))) { ++ if (rc < 0) ++ res = rc; ++ else ++ res = -EIO; ++ goto out_free_bios; ++ } else { ++ need_new_bio = true; ++ len -= rc; ++ offset += rc; ++ continue; ++ } ++ } ++ ++ need_new_bio = false; ++ offset = 0; ++ len -= bytes; ++ page = nth_page(page, 1); ++ } ++ } ++ ++ if (hbio == NULL) { ++ res = -EINVAL; ++ goto out_free_bios; ++ } ++ ++ /* Total length must be aligned on DMA padding alignment */ ++ if ((tot_len & q->dma_pad_mask) && ++ !(rq->cmd_flags & REQ_COPY_USER)) { ++ res = -EINVAL; ++ goto out_free_bios; ++ } ++ ++ if (bw != NULL) ++ atomic_set(&bw->bios_inflight, bios); ++ ++ while (hbio != NULL) { ++ bio = hbio; ++ hbio = hbio->bi_next; ++ bio->bi_next = NULL; ++ ++ blk_queue_bounce(q, &bio); ++ ++ res = blk_rq_append_bio(q, rq, bio); ++ if (unlikely(res != 0)) { ++ bio->bi_next = hbio; ++ hbio = bio; ++ /* We can have one or more bios bounced */ ++ goto out_unmap_bios; ++ } ++ } ++ ++ res = 0; ++out: ++ return res; ++ ++out_unmap_bios: ++ blk_rq_unmap_kern_sg(rq, res); ++ ++out_free_bios: ++ while (hbio != NULL) { ++ bio = hbio; ++ hbio = hbio->bi_next; ++ bio_put(bio); ++ } ++ goto out; ++} ++ ++/** ++ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC ++ * @rq: request to fill ++ * @sgl: area to map ++ * @nents: number of elements in @sgl ++ * @gfp: memory allocation flags ++ * ++ * Description: ++ * Data will be mapped directly if possible. Otherwise a bounce ++ * buffer will be used. ++ */ ++int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl, ++ int nents, gfp_t gfp) ++{ ++ int res; ++ ++ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp); ++ if (unlikely(res != 0)) { ++ struct blk_kern_sg_work *bw = NULL; ++ ++ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw, ++ gfp, rq->q->bounce_gfp | gfp); ++ if (unlikely(res != 0)) ++ goto out; ++ ++ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl, ++ bw->sg_table.nents, bw, gfp); ++ if (res != 0) { ++ blk_free_kern_sg_work(bw); ++ goto out; ++ } ++ } ++ ++out: ++ return res; ++} ++EXPORT_SYMBOL(blk_rq_map_kern_sg); ++ ++/** ++ * blk_rq_unmap_kern_sg - unmap a request with kernel sg ++ * @rq: request to unmap ++ * @err: non-zero error code ++ * ++ * Description: ++ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called ++ * only in case of an error! ++ */ ++void blk_rq_unmap_kern_sg(struct request *rq, int err) ++{ ++ struct bio *bio = rq->bio; ++ ++ while (bio) { ++ struct bio *b = bio; ++ bio = bio->bi_next; ++ b->bi_end_io(b, err); ++ } ++ rq->bio = NULL; ++ ++ return; ++} ++EXPORT_SYMBOL(blk_rq_unmap_kern_sg); ++ + /** + * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage + * @q: request queue where request should be inserted +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index aac0f9e..5cd3afa 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -731,6 +731,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; + #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) + #define BLK_MIN_SG_TIMEOUT (7 * HZ) + ++#define SCSI_EXEC_REQ_FIFO_DEFINED ++ + #ifdef CONFIG_BOUNCE + extern int init_emergency_isa_pool(void); + extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); +@@ -852,6 +854,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns + extern int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, const struct sg_iovec *, + int, unsigned int, gfp_t); ++extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl, ++ int nents, gfp_t gfp); ++extern void blk_rq_unmap_kern_sg(struct request *rq, int err); + extern int blk_execute_rq(struct request_queue *, struct gendisk *, + struct request *, int); + extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, +diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h +index ed8f9e7..f64e02f 100644 +--- a/include/linux/scatterlist.h ++++ b/include/linux/scatterlist.h +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + struct sg_table { + struct scatterlist *sgl; /* the list */ +@@ -249,6 +250,9 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip); + ++int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg, ++ int nents_to_copy, size_t copy_len); ++ + /* + * Maximum number of entries that will be allocated in one piece, if + * a list larger than this is required then chaining will be utilized. +diff --git a/lib/scatterlist.c b/lib/scatterlist.c +index c9f2e8c..ba693d1 100644 +--- a/lib/scatterlist.c ++++ b/lib/scatterlist.c +@@ -727,3 +727,127 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); + } + EXPORT_SYMBOL(sg_pcopy_to_buffer); ++ ++ ++/* ++ * Can switch to the next dst_sg element, so, to copy to strictly only ++ * one dst_sg element, it must be either last in the chain, or ++ * copy_len == dst_sg->length. ++ */ ++static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len, ++ size_t *pdst_offs, struct scatterlist *src_sg, ++ size_t copy_len) ++{ ++ int res = 0; ++ struct scatterlist *dst_sg; ++ size_t src_len, dst_len, src_offs, dst_offs; ++ struct page *src_page, *dst_page; ++ ++ dst_sg = *pdst_sg; ++ dst_len = *pdst_len; ++ dst_offs = *pdst_offs; ++ dst_page = sg_page(dst_sg); ++ ++ src_page = sg_page(src_sg); ++ src_len = src_sg->length; ++ src_offs = src_sg->offset; ++ ++ do { ++ void *saddr, *daddr; ++ size_t n; ++ ++ saddr = kmap_atomic(src_page + (src_offs >> PAGE_SHIFT)) + ++ (src_offs & ~PAGE_MASK); ++ daddr = kmap_atomic(dst_page + (dst_offs >> PAGE_SHIFT)) + ++ (dst_offs & ~PAGE_MASK); ++ ++ if (((src_offs & ~PAGE_MASK) == 0) && ++ ((dst_offs & ~PAGE_MASK) == 0) && ++ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) && ++ (copy_len >= PAGE_SIZE)) { ++ copy_page(daddr, saddr); ++ n = PAGE_SIZE; ++ } else { ++ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK), ++ PAGE_SIZE - (src_offs & ~PAGE_MASK)); ++ n = min(n, src_len); ++ n = min(n, dst_len); ++ n = min_t(size_t, n, copy_len); ++ memcpy(daddr, saddr, n); ++ } ++ dst_offs += n; ++ src_offs += n; ++ ++ kunmap_atomic(saddr); ++ kunmap_atomic(daddr); ++ ++ res += n; ++ copy_len -= n; ++ if (copy_len == 0) ++ goto out; ++ ++ src_len -= n; ++ dst_len -= n; ++ if (dst_len == 0) { ++ dst_sg = sg_next(dst_sg); ++ if (dst_sg == NULL) ++ goto out; ++ dst_page = sg_page(dst_sg); ++ dst_len = dst_sg->length; ++ dst_offs = dst_sg->offset; ++ } ++ } while (src_len > 0); ++ ++out: ++ *pdst_sg = dst_sg; ++ *pdst_len = dst_len; ++ *pdst_offs = dst_offs; ++ return res; ++} ++ ++/** ++ * sg_copy - copy one SG vector to another ++ * @dst_sg: destination SG ++ * @src_sg: source SG ++ * @nents_to_copy: maximum number of entries to copy ++ * @copy_len: maximum amount of data to copy. If 0, then copy all. ++ * ++ * Description: ++ * Data from the source SG vector will be copied to the destination SG ++ * vector. End of the vectors will be determined by sg_next() returning ++ * NULL. Returns number of bytes copied. ++ */ ++int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg, ++ int nents_to_copy, size_t copy_len) ++{ ++ int res = 0; ++ size_t dst_len, dst_offs; ++ ++ if (copy_len == 0) ++ copy_len = 0x7FFFFFFF; /* copy all */ ++ ++ if (nents_to_copy == 0) ++ nents_to_copy = 0x7FFFFFFF; /* copy all */ ++ ++ dst_len = dst_sg->length; ++ dst_offs = dst_sg->offset; ++ ++ do { ++ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs, ++ src_sg, copy_len); ++ copy_len -= copied; ++ res += copied; ++ if ((copy_len == 0) || (dst_sg == NULL)) ++ goto out; ++ ++ nents_to_copy--; ++ if (nents_to_copy == 0) ++ goto out; ++ ++ src_sg = sg_next(src_sg); ++ } while (src_sg != NULL); ++ ++out: ++ return res; ++} ++EXPORT_SYMBOL(sg_copy); +-- +2.1.2 + diff --git a/scst/src/scst_lib.c b/scst/src/scst_lib.c index 62cd4197c..e63347bdc 100644 --- a/scst/src/scst_lib.c +++ b/scst/src/scst_lib.c @@ -7349,7 +7349,8 @@ int scst_calc_block_shift(int sector_size) sector_size = 512; block_shift = ilog2(sector_size); - WARN_ON(1 << block_shift != sector_size); + WARN_ONCE(1 << block_shift != sector_size, "1 << %d != %d\n", + block_shift, sector_size); if (block_shift < 9) { PRINT_ERROR("Wrong sector size %d", sector_size); diff --git a/scst/src/scst_sysfs.c b/scst/src/scst_sysfs.c index 16072fc86..b6105643a 100644 --- a/scst/src/scst_sysfs.c +++ b/scst/src/scst_sysfs.c @@ -1268,6 +1268,7 @@ static int __scst_process_luns_mgmt_store(char *buffer, goto out_unlock; } else if (virt_lun > SCST_MAX_LUN) { PRINT_ERROR("Too big LUN %ld (max %d)", virt_lun, SCST_MAX_LUN); + res = -EINVAL; goto out_unlock; } diff --git a/scst_local/in-tree/Makefile-3.18 b/scst_local/in-tree/Makefile-3.18 new file mode 100644 index 000000000..8cbbbff63 --- /dev/null +++ b/scst_local/in-tree/Makefile-3.18 @@ -0,0 +1,2 @@ +obj-$(CONFIG_SCST_LOCAL) += scst_local.o + diff --git a/scst_local/scst_local.c b/scst_local/scst_local.c index 88c684075..f39472add 100644 --- a/scst_local/scst_local.c +++ b/scst_local/scst_local.c @@ -229,7 +229,7 @@ static int scst_local_get_sas_transport_id(struct scst_local_sess *sess, tr_id[5] = 0xEE; tr_id[6] = 0xDE; tr_id[7] = 0x40 | ((sess->number >> 4) & 0x0F); - tr_id[8] = 0x0F | (sess->number & 0xF0); + tr_id[8] = 0x0F | ((sess->number & 0x0F) << 4); tr_id[9] = 0xAD; tr_id[10] = 0xE0; tr_id[11] = 0x50; diff --git a/srpt/patches/kernel-3.18-pre-cflags.patch b/srpt/patches/kernel-3.18-pre-cflags.patch new file mode 100644 index 000000000..a6adaf47b --- /dev/null +++ b/srpt/patches/kernel-3.18-pre-cflags.patch @@ -0,0 +1,12 @@ +diff --git a/Makefile b/Makefile +index fd80c6e..09ca4ea 100644 +--- a/Makefile ++++ b/Makefile +@@ -390,6 +390,7 @@ USERINCLUDE := \ + # Use LINUXINCLUDE when you must reference the include/ directory. + # Needed to be compatible with the O= option + LINUXINCLUDE := \ ++ $(PRE_CFLAGS) \ + -I$(srctree)/arch/$(hdr-arch)/include \ + -Iarch/$(hdr-arch)/include/generated \ + $(if $(KBUILD_SRC), -I$(srctree)/include) \