Slightly modified patch from Bart Van Assche <bvanassche@acm.org> fixing build breakage on not too recent kernels introduced by r2317

git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@2324 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2010-10-01 12:09:26 +00:00
parent 45848e74a4
commit 929b4db422
6 changed files with 84 additions and 8 deletions

View File

@@ -3850,7 +3850,7 @@ struct scst_tgt_template iscsi_template = {
.get_scsi_transport_version = iscsi_get_scsi_transport_version,
};
int iscsi_threads_pool_get(const struct cpumask *cpu_mask,
int iscsi_threads_pool_get(const cpumask_t *cpu_mask,
struct iscsi_thread_pool **out_pool)
{
int res;

View File

@@ -78,7 +78,7 @@ struct iscsi_thread_pool {
struct list_head wr_list;
wait_queue_head_t wr_waitQ;
struct cpumask cpu_mask;
cpumask_t cpu_mask;
int thread_pool_ref;
@@ -527,7 +527,7 @@ extern int iscsi_preliminary_complete(struct iscsi_cmnd *req,
struct iscsi_cmnd *orig_req, bool get_data);
extern int set_scst_preliminary_status_rsp(struct iscsi_cmnd *req,
bool get_data, int key, int asc, int ascq);
extern int iscsi_threads_pool_get(const struct cpumask *cpu_mask,
extern int iscsi_threads_pool_get(const cpumask_t *cpu_mask,
struct iscsi_thread_pool **out_pool);
extern void iscsi_threads_pool_put(struct iscsi_thread_pool *p);

View File

@@ -26,6 +26,7 @@
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/cpumask.h>
/* #define CONFIG_SCST_PROC */
@@ -59,6 +60,76 @@ typedef _Bool bool;
#include "scst_sgv.h"
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 20)
#define nr_cpu_ids NR_CPUS
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
#define cpumask_bits(maskp) ((maskp)->bits)
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
#define nr_cpumask_bits nr_cpu_ids
#else
#define nr_cpumask_bits NR_CPUS
#endif
/* verify cpu argument to cpumask_* operators */
static inline unsigned int cpumask_check(unsigned int cpu)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
WARN_ON_ONCE(cpu >= nr_cpumask_bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
return cpu;
}
/**
* cpumask_next - get the next cpu in a cpumask
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @srcp: the cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set.
*/
static inline unsigned int cpumask_next(int n, const cpumask_t *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
/**
* for_each_cpu - iterate over every cpu in a mask
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
for ((cpu) = -1; \
(cpu) = cpumask_next((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
/**
* cpumask_copy - *dstp = *srcp
* @dstp: the result
* @srcp: the input cpumask
*/
static inline void cpumask_copy(cpumask_t *dstp,
const cpumask_t *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
static inline int set_cpus_allowed_ptr(struct task_struct *p,
const cpumask_t *new_mask)
{
return set_cpus_allowed(p, *new_mask);
}
#endif
/*
* Version numbers, the same as for the kernel.
*
@@ -2396,7 +2467,7 @@ struct scst_acg {
int acg_io_grouping_type;
/* CPU affinity for threads in this ACG */
struct cpumask acg_cpu_mask;
cpumask_t acg_cpu_mask;
unsigned int tgt_acg:1;
@@ -3895,7 +3966,7 @@ struct scst_sysfs_work_item {
bool is_tgt_kobj;
int io_grouping_type;
bool enable;
struct cpumask cpu_mask;
cpumask_t cpu_mask;
};
};
struct {

View File

@@ -158,7 +158,7 @@ static int suspend_count;
static int scst_virt_dev_last_id; /* protected by scst_mutex */
struct cpumask default_cpu_mask;
cpumask_t default_cpu_mask;
static unsigned int scst_max_cmd_mem;
unsigned int scst_max_dev_cmd_mem;

View File

@@ -193,7 +193,7 @@ extern spinlock_t scst_mgmt_lock;
extern struct list_head scst_sess_init_list;
extern struct list_head scst_sess_shut_list;
extern struct cpumask default_cpu_mask;
extern cpumask_t default_cpu_mask;
struct scst_cmd_thread_t {
struct task_struct *cmd_thread;

View File

@@ -3176,8 +3176,13 @@ static ssize_t __scst_acg_cpu_mask_show(struct scst_acg *acg, char *buf)
{
int res;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
res = cpumask_scnprintf(buf, SCST_SYSFS_BLOCK_SIZE,
acg->acg_cpu_mask);
#else
res = cpumask_scnprintf(buf, SCST_SYSFS_BLOCK_SIZE,
&acg->acg_cpu_mask);
#endif
if (!cpus_equal(acg->acg_cpu_mask, default_cpu_mask))
res += sprintf(&buf[res], "\n%s\n", SCST_SYSFS_KEY_MARK);
@@ -3185,7 +3190,7 @@ static ssize_t __scst_acg_cpu_mask_show(struct scst_acg *acg, char *buf)
}
static int __scst_acg_process_cpu_mask_store(struct scst_tgt *tgt,
struct scst_acg *acg, struct cpumask *cpu_mask)
struct scst_acg *acg, cpumask_t *cpu_mask)
{
int res = 0;
struct scst_session *sess;