diff --git a/qla2x00t-32gbit/qla_fw.h b/qla2x00t-32gbit/qla_fw.h index 50c1e6c62..b61a09c2c 100644 --- a/qla2x00t-32gbit/qla_fw.h +++ b/qla2x00t-32gbit/qla_fw.h @@ -7,8 +7,17 @@ #ifndef __QLA_FW_H #define __QLA_FW_H +#include #include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #include +#endif + +#ifdef INSIDE_KERNEL_TREE +#include +#else +#include "backport.h" +#endif #define MBS_CHECKSUM_ERROR 0x4010 #define MBS_INVALID_PRODUCT_KEY 0x4020 diff --git a/qla2x00t-32gbit/qla_isr.c b/qla2x00t-32gbit/qla_isr.c index 3a416068e..bf6de056f 100644 --- a/qla2x00t-32gbit/qla_isr.c +++ b/qla2x00t-32gbit/qla_isr.c @@ -25,7 +25,9 @@ #include #include #include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #include +#endif static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); @@ -3443,6 +3445,13 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int min_vecs = QLA_BASE_VECTORS; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && + IS_ATIO_MSIX_CAPABLE(ha)) { + min_vecs++; + } +#else struct irq_affinity desc = { .pre_vectors = QLA_BASE_VECTORS, }; @@ -3452,7 +3461,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) desc.pre_vectors++; min_vecs++; } +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, ha->msix_count, + PCI_IRQ_MSIX); +#else if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { /* user wants to control IRQ setting for target mode */ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, @@ -3461,6 +3475,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); +#endif if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, @@ -3489,7 +3504,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); } } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) vha->irq_offset = desc.pre_vectors; +#else + vha->irq_offset = min_vecs; +#endif ha->msix_entries = kcalloc(ha->msix_count, sizeof(struct qla_msix_entry), GFP_KERNEL); diff --git a/qla2x00t-32gbit/qla_nvme.c b/qla2x00t-32gbit/qla_nvme.c index 26cf748f7..56e0d777f 100644 --- a/qla2x00t-32gbit/qla_nvme.c +++ b/qla2x00t-32gbit/qla_nvme.c @@ -5,6 +5,10 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + #include #include #include @@ -717,3 +721,5 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) return ret; } + +#endif diff --git a/qla2x00t-32gbit/qla_nvme.h b/qla2x00t-32gbit/qla_nvme.h index da8dad5ad..922276dc5 100644 --- a/qla2x00t-32gbit/qla_nvme.h +++ b/qla2x00t-32gbit/qla_nvme.h @@ -10,7 +10,9 @@ #include #include #include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #include +#endif #include "qla_def.h" @@ -139,6 +141,7 @@ struct pt_ls4_rx_unsol { uint32_t payload[3]; }; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) /* * Global functions prototype in qla_nvme.c source file. */ @@ -146,7 +149,30 @@ int qla_nvme_register_hba(struct scsi_qla_host *); int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *); void qla_nvme_delete(struct scsi_qla_host *); void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res); +#else +static inline int qla_nvme_register_hba(struct scsi_qla_host *vha) +{ + return -ENOTSUPP; +} + +static inline int qla_nvme_register_remote(struct scsi_qla_host *vha, + struct fc_port *fcport) +{ + return -ENOTSUPP; +} + +static inline void qla_nvme_delete(struct scsi_qla_host *vha) +{ +} + +static inline void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, + int res) +{ +} +#endif + void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *, struct req_que *); void qla24xx_async_gffid_sp_done(void *, int); + #endif