diff --git a/scst/include/scst.h b/scst/include/scst.h index 97333a4bf..7a43769db 100644 --- a/scst/include/scst.h +++ b/scst/include/scst.h @@ -3148,11 +3148,7 @@ struct scst_tgt_dev { gfp_t tgt_dev_gfp_mask; /* SGV pool from which buffers of this tgt_dev's cmds should be allocated */ -#ifdef CONFIG_CPUMASK_OFFSTACK struct sgv_pool **pools; -#else - struct sgv_pool *pools[NR_CPUS]; -#endif /* Max number of allowed in this tgt_dev SG segments */ int max_sg_cnt; diff --git a/scst/src/scst_lib.c b/scst/src/scst_lib.c index 501bf9deb..284195d72 100644 --- a/scst/src/scst_lib.c +++ b/scst/src/scst_lib.c @@ -5276,16 +5276,6 @@ static int scst_alloc_add_tgt_dev(struct scst_session *sess, else clear_bit(SCST_TGT_DEV_BLACK_HOLE, &tgt_dev->tgt_dev_flags); -#ifdef CONFIG_CPUMASK_OFFSTACK - tgt_dev->pools = kzalloc_node(sizeof(tgt_dev->pools[0])*NR_CPUS, - GFP_KERNEL, dev->dev_numa_node_id); - if (tgt_dev->pools == NULL) { - PRINT_ERROR("Unable to alloc tgt_dev->pools (size %zd)", - sizeof(tgt_dev->pools[0])*NR_CPUS); - goto out_free; - } -#endif - scst_sgv_pool_use_norm(tgt_dev); if (dev->scsi_dev != NULL) { @@ -5406,11 +5396,7 @@ out_dec_free: out_free_ua: scst_free_all_UA(tgt_dev); -#ifdef CONFIG_CPUMASK_OFFSTACK - kfree(tgt_dev->pools); -out_free: -#endif kmem_cache_free(scst_tgtd_cachep, tgt_dev); goto out; } @@ -5468,10 +5454,6 @@ static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev) scst_tgt_dev_stop_threads(tgt_dev); -#ifdef CONFIG_CPUMASK_OFFSTACK - kfree(tgt_dev->pools); -#endif - kmem_cache_free(scst_tgtd_cachep, tgt_dev); TRACE_EXIT(); diff --git a/scst/src/scst_mem.c b/scst/src/scst_mem.c index 6b3a2d125..f6f2b924a 100644 --- a/scst/src/scst_mem.c +++ b/scst/src/scst_mem.c @@ -46,6 +46,12 @@ static struct sgv_pool *sgv_dma_pool_per_cpu[NR_CPUS]; static struct sgv_pool *sgv_norm_clust_pool_per_cpu[NR_CPUS]; static struct sgv_pool *sgv_norm_pool_per_cpu[NR_CPUS]; +static struct sgv_pool *sgv_dma_pool_global[NR_CPUS]; +static struct sgv_pool *sgv_norm_clust_pool_global[NR_CPUS]; +static struct sgv_pool *sgv_norm_pool_global[NR_CPUS]; + +static struct sgv_pool *sgv_norm_clust_pool_main, *sgv_norm_pool_main, *sgv_dma_pool_main; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) #if defined(CONFIG_LOCKDEP) && !defined(CONFIG_SCST_PROC) static struct lock_class_key scst_pool_key; @@ -54,8 +60,6 @@ static struct lockdep_map scst_pool_dep_map = #endif #endif -static struct sgv_pool *sgv_norm_clust_pool, *sgv_norm_pool, *sgv_dma_pool; - #ifndef CONFIG_SCST_NO_TOTAL_MEM_CHECKS static atomic_t sgv_pages_total = ATOMIC_INIT(0); #endif @@ -103,39 +107,33 @@ static inline bool sgv_pool_clustered(const struct sgv_pool *pool) void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev) { - int i; tgt_dev->tgt_dev_gfp_mask = __GFP_NOWARN; - for (i = 0; i < NR_CPUS; i++) - if (!scst_force_global_sgv_pool) - tgt_dev->pools[i] = sgv_norm_pool_per_cpu[i]; - else - tgt_dev->pools[i] = sgv_norm_pool; + if (!scst_force_global_sgv_pool) + tgt_dev->pools = sgv_norm_pool_per_cpu; + else + tgt_dev->pools = sgv_norm_pool_global; tgt_dev->tgt_dev_clust_pool = 0; } void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev) { - int i; TRACE_MEM("%s", "Use clustering"); tgt_dev->tgt_dev_gfp_mask = __GFP_NOWARN; - for (i = 0; i < NR_CPUS; i++) - if (!scst_force_global_sgv_pool) - tgt_dev->pools[i] = sgv_norm_clust_pool_per_cpu[i]; - else - tgt_dev->pools[i] = sgv_norm_clust_pool; + if (!scst_force_global_sgv_pool) + tgt_dev->pools = sgv_norm_clust_pool_per_cpu; + else + tgt_dev->pools = sgv_norm_clust_pool_global; tgt_dev->tgt_dev_clust_pool = 1; } void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev) { - int i; TRACE_MEM("%s", "Use ISA DMA memory"); tgt_dev->tgt_dev_gfp_mask = __GFP_NOWARN | GFP_DMA; - for (i = 0; i < NR_CPUS; i++) - if (!scst_force_global_sgv_pool) - tgt_dev->pools[i] = sgv_dma_pool_per_cpu[i]; - else - tgt_dev->pools[i] = sgv_dma_pool; + if (!scst_force_global_sgv_pool) + tgt_dev->pools = sgv_dma_pool_per_cpu; + else + tgt_dev->pools = sgv_dma_pool_global; tgt_dev->tgt_dev_clust_pool = 0; } @@ -1760,18 +1758,18 @@ int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark) sgv_evaluate_local_max_pages(); - sgv_norm_pool = sgv_pool_create("sgv", sgv_no_clustering, 0, false, 0); - if (sgv_norm_pool == NULL) + sgv_norm_pool_main = sgv_pool_create("sgv", sgv_no_clustering, 0, false, 0); + if (sgv_norm_pool_main == NULL) goto out_free_pool; - sgv_norm_clust_pool = sgv_pool_create("sgv-clust", + sgv_norm_clust_pool_main = sgv_pool_create("sgv-clust", sgv_full_clustering, 0, false, 0); - if (sgv_norm_clust_pool == NULL) + if (sgv_norm_clust_pool_main == NULL) goto out_free_norm; - sgv_dma_pool = sgv_pool_create("sgv-dma", sgv_no_clustering, 0, + sgv_dma_pool_main = sgv_pool_create("sgv-dma", sgv_no_clustering, 0, false, 0); - if (sgv_dma_pool == NULL) + if (sgv_dma_pool_main == NULL) goto out_free_clust; /* @@ -1779,6 +1777,15 @@ int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark) * callbacks must be installed! */ + for (i = 0; i < NR_CPUS; i++) + sgv_norm_pool_global[i] = sgv_norm_pool_main; + + for (i = 0; i < NR_CPUS; i++) + sgv_norm_clust_pool_global[i] = sgv_norm_clust_pool_main; + + for (i = 0; i < NR_CPUS; i++) + sgv_dma_pool_global[i] = sgv_dma_pool_main; + for (i = 0; i < NR_CPUS; i++) { char name[60]; if (!cpu_online(i)) @@ -1844,13 +1851,13 @@ out_free_per_cpu_norm: if (sgv_norm_pool_per_cpu[i] != NULL) sgv_pool_destroy(sgv_norm_pool_per_cpu[i]); - sgv_pool_destroy(sgv_dma_pool); + sgv_pool_destroy(sgv_dma_pool_main); out_free_clust: - sgv_pool_destroy(sgv_norm_clust_pool); + sgv_pool_destroy(sgv_norm_clust_pool_main); out_free_norm: - sgv_pool_destroy(sgv_norm_pool); + sgv_pool_destroy(sgv_norm_pool_main); out_free_pool: kmem_cache_destroy(sgv_pool_cachep); @@ -1872,21 +1879,30 @@ void scst_sgv_pools_deinit(void) unregister_shrinker(&sgv_shrinker); #endif - sgv_pool_destroy(sgv_dma_pool); + sgv_pool_destroy(sgv_dma_pool_main); for (i = 0; i < NR_CPUS; i++) if (sgv_dma_pool_per_cpu[i] != NULL) sgv_pool_destroy(sgv_dma_pool_per_cpu[i]); - sgv_pool_destroy(sgv_norm_pool); + sgv_pool_destroy(sgv_norm_pool_main); for (i = 0; i < NR_CPUS; i++) if (sgv_norm_pool_per_cpu[i] != NULL) sgv_pool_destroy(sgv_norm_pool_per_cpu[i]); - sgv_pool_destroy(sgv_norm_clust_pool); + sgv_pool_destroy(sgv_norm_clust_pool_main); for (i = 0; i < NR_CPUS; i++) if (sgv_norm_clust_pool_per_cpu[i] != NULL) sgv_pool_destroy(sgv_norm_clust_pool_per_cpu[i]); + for (i = 0; i < NR_CPUS; i++) + sgv_norm_pool_global[i] = NULL; + + for (i = 0; i < NR_CPUS; i++) + sgv_norm_clust_pool_global[i] = NULL; + + for (i = 0; i < NR_CPUS; i++) + sgv_dma_pool_global[i] = NULL; + kmem_cache_destroy(sgv_pool_cachep); TRACE_EXIT();