scst: avoid false lockdep complaints about recursive locking

In some cases SCST needs to take multiple locks recursivly, e.g. to lock
all tgt_devs in a session. For this case SCST takes those locks in their
current sort order, e.g. by LUN for tgt_devs, then releases in the
opposite order. Unfortunately, lockdep complains on such actions as
recursive locking, then disables itself. The disabling itself action is
the most unpleasant one leading to lockdep being useless after this point.

Unfortunately, nested locking annotations can't help, because after
free, then alloc again, or after LUN change (in case of tgt_devs) order
of locks can change.

So, this patch implements a way to annotate some lock and unlock actions
as "no lockdep", so lockdep will not track them. It contains related
kernel patches for some kernels.

This is a debug aid only useful only with lockdep enabled kernels.



git-svn-id: http://svn.code.sf.net/p/scst/svn/trunk@7065 d57e44dd-8a1f-0410-8b47-8ef2f437770f
This commit is contained in:
Vladislav Bolkhovitin
2016-12-20 05:47:15 +00:00
parent 122a343343
commit 6c95c681ac
17 changed files with 1640 additions and 6 deletions

View File

@@ -73,6 +73,26 @@
#include <scst_const.h>
#endif
#ifdef NOLOCKDEP_SUPPORTED
#define spin_lock_nolockdep(lock) do { current->nolockdep_call = 1; spin_lock(lock); current->nolockdep_call = 0; } while (0)
#define spin_unlock_nolockdep(lock) do { current->nolockdep_call = 1; spin_unlock(lock); current->nolockdep_call = 0; } while (0)
#define mutex_lock_nolockdep(lock) do { current->nolockdep_call = 1; mutex_lock(lock); current->nolockdep_call = 0; } while (0)
#define mutex_unlock_nolockdep(lock) do { current->nolockdep_call = 1; mutex_unlock(lock); current->nolockdep_call = 0; } while (0)
#define down_read_nolockdep(lock) do { current->nolockdep_call = 1; down_read(lock); current->nolockdep_call = 0; } while (0)
#define up_read_nolockdep(lock) do { current->nolockdep_call = 1; up_read(lock); current->nolockdep_call = 0; } while (0)
#define down_write_nolockdep(lock) do { current->nolockdep_call = 1; down_write(lock); current->nolockdep_call = 0; } while (0)
#define up_write_nolockdep(lock) do { current->nolockdep_call = 1; up_write(lock); current->nolockdep_call = 0; } while (0)
#else
#define spin_lock_nolockdep spin_lock
#define spin_unlock_nolockdep spin_unlock
#define mutex_lock_nolockdep mutex_lock
#define mutex_unlock_nolockdep mutex_unlock
#define down_read_nolockdep down_read
#define up_read_nolockdep up_read
#define down_write_nolockdep down_write
#define up_write_nolockdep up_write
#endif
#ifdef INSIDE_KERNEL_TREE
#include <scst/scst_sgv.h>
#else

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2013-07-23 02:45:53 +0000
+++ new/include/linux/lockdep.h 2013-07-23 03:31:57 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2013-07-23 02:45:53 +0000
+++ new/include/linux/sched.h 2013-07-23 03:31:57 +0000
@@ -1273,6 +1273,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2013-07-23 02:45:53 +0000
+++ new/kernel/lockdep.c 2013-07-23 03:31:57 +0000
@@ -3593,9 +3593,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3613,6 +3615,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3806,6 +3811,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3826,6 +3834,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2013-07-23 02:45:53 +0000
+++ new/kernel/softirq.c 2013-07-23 03:31:57 +0000
@@ -314,6 +314,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -368,6 +380,18 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2013-09-28 00:14:38 +0000
+++ new/include/linux/lockdep.h 2013-09-28 03:00:19 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2013-09-28 00:14:38 +0000
+++ new/include/linux/sched.h 2013-09-28 03:00:19 +0000
@@ -1266,6 +1266,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2013-09-28 00:14:38 +0000
+++ new/kernel/lockdep.c 2013-09-28 03:00:19 +0000
@@ -3593,9 +3593,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3613,6 +3615,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3806,6 +3811,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3826,6 +3834,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2013-09-28 00:14:38 +0000
+++ new/kernel/softirq.c 2013-09-28 03:00:19 +0000
@@ -312,6 +312,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -366,6 +378,18 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2013-11-30 00:34:22 +0000
+++ new/include/linux/lockdep.h 2013-11-30 00:57:33 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2013-11-30 00:34:22 +0000
+++ new/include/linux/sched.h 2013-11-30 00:57:33 +0000
@@ -1265,6 +1265,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2013-11-30 00:34:22 +0000
+++ new/kernel/lockdep.c 2013-11-30 00:57:33 +0000
@@ -3593,9 +3593,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3613,6 +3615,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3806,6 +3811,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3826,6 +3834,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2013-11-30 00:34:22 +0000
+++ new/kernel/softirq.c 2013-11-30 00:57:33 +0000
@@ -312,6 +312,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -375,6 +387,18 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2014-01-30 00:25:53 +0000
+++ new/include/linux/lockdep.h 2014-01-30 01:13:44 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2014-01-30 00:25:53 +0000
+++ new/include/linux/sched.h 2014-01-30 01:13:44 +0000
@@ -1277,6 +1277,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/locking/lockdep.c'
--- old/kernel/locking/lockdep.c 2014-01-30 00:25:53 +0000
+++ new/kernel/locking/lockdep.c 2014-01-30 01:13:44 +0000
@@ -3593,9 +3593,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3613,6 +3615,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3806,6 +3811,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3826,6 +3834,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2014-01-30 00:25:53 +0000
+++ new/kernel/softirq.c 2014-01-30 01:13:44 +0000
@@ -313,6 +313,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -382,6 +394,18 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,114 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2014-04-17 22:02:06 +0000
+++ new/include/linux/lockdep.h 2014-04-17 22:55:34 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2014-04-17 22:02:06 +0000
+++ new/include/linux/sched.h 2014-04-17 22:55:34 +0000
@@ -1404,6 +1404,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/locking/lockdep.c'
--- old/kernel/locking/lockdep.c 2014-04-17 22:02:06 +0000
+++ new/kernel/locking/lockdep.c 2014-04-17 22:55:34 +0000
@@ -3595,9 +3595,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3615,6 +3617,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3808,6 +3813,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3828,6 +3836,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2014-04-17 22:02:06 +0000
+++ new/kernel/softirq.c 2014-04-17 22:55:34 +0000
@@ -321,6 +321,17 @@ asmlinkage void do_softirq(void)
*/
void irq_enter(void)
{
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -389,6 +400,17 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
trace_hardirq_exit(); /* must be last! */
}

View File

@@ -0,0 +1,114 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2014-06-18 01:32:48 +0000
+++ new/include/linux/lockdep.h 2014-06-18 01:45:33 +0000
@@ -354,7 +354,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2014-06-18 01:32:48 +0000
+++ new/include/linux/sched.h 2014-06-18 01:45:33 +0000
@@ -1422,6 +1422,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/locking/lockdep.c'
--- old/kernel/locking/lockdep.c 2014-06-18 01:32:48 +0000
+++ new/kernel/locking/lockdep.c 2014-06-18 01:45:33 +0000
@@ -3592,9 +3592,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3612,6 +3614,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3805,6 +3810,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3825,6 +3833,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2014-06-18 01:32:48 +0000
+++ new/kernel/softirq.c 2014-06-18 01:45:33 +0000
@@ -322,6 +322,17 @@ asmlinkage __visible void do_softirq(voi
*/
void irq_enter(void)
{
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -390,6 +401,17 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
trace_hardirq_exit(); /* must be last! */
}

View File

@@ -0,0 +1,114 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2014-08-19 01:00:36 +0000
+++ new/include/linux/lockdep.h 2014-08-19 01:18:25 +0000
@@ -354,7 +354,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2014-08-19 01:00:36 +0000
+++ new/include/linux/sched.h 2014-08-19 01:18:25 +0000
@@ -1467,6 +1467,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/locking/lockdep.c'
--- old/kernel/locking/lockdep.c 2014-08-19 01:00:36 +0000
+++ new/kernel/locking/lockdep.c 2014-08-19 01:18:25 +0000
@@ -3592,9 +3592,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3612,6 +3614,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3805,6 +3810,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3825,6 +3833,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2014-08-19 01:00:36 +0000
+++ new/kernel/softirq.c 2014-08-19 01:18:25 +0000
@@ -320,6 +320,17 @@ asmlinkage __visible void do_softirq(voi
*/
void irq_enter(void)
{
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -388,6 +399,17 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
trace_hardirq_exit(); /* must be last! */
}

View File

@@ -0,0 +1,114 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2014-11-21 03:17:49 +0000
+++ new/include/linux/lockdep.h 2014-11-21 03:51:56 +0000
@@ -354,7 +354,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2014-11-21 03:17:49 +0000
+++ new/include/linux/sched.h 2014-11-21 03:51:56 +0000
@@ -1462,6 +1462,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/locking/lockdep.c'
--- old/kernel/locking/lockdep.c 2014-11-21 03:17:49 +0000
+++ new/kernel/locking/lockdep.c 2014-11-21 03:51:56 +0000
@@ -3594,9 +3594,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3614,6 +3616,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3807,6 +3812,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3827,6 +3835,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2014-11-21 03:17:49 +0000
+++ new/kernel/softirq.c 2014-11-21 03:51:56 +0000
@@ -320,6 +320,17 @@ asmlinkage __visible void do_softirq(voi
*/
void irq_enter(void)
{
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -388,6 +399,17 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
trace_hardirq_exit(); /* must be last! */
}

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2012-08-08 02:57:29 +0000
+++ new/include/linux/lockdep.h 2012-08-28 21:26:26 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2012-08-08 02:57:29 +0000
+++ new/include/linux/sched.h 2012-08-28 22:49:36 +0000
@@ -1454,6 +1454,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2012-08-08 02:57:29 +0000
+++ new/kernel/lockdep.c 2012-08-28 23:02:47 +0000
@@ -3547,9 +3547,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3567,6 +3569,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3760,6 +3765,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3780,6 +3788,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2012-08-08 02:57:29 +0000
+++ new/kernel/softirq.c 2012-08-28 23:23:07 +0000
@@ -296,6 +296,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -344,6 +356,18 @@ void irq_exit(void)
#endif
rcu_irq_exit();
sched_preempt_enable_no_resched();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2012-10-01 18:39:34 +0000
+++ new/include/linux/lockdep.h 2012-10-01 22:12:06 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2012-10-01 18:39:34 +0000
+++ new/include/linux/sched.h 2012-10-01 22:12:06 +0000
@@ -1462,6 +1462,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2012-10-01 18:39:34 +0000
+++ new/kernel/lockdep.c 2012-10-01 22:12:06 +0000
@@ -3547,9 +3547,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3567,6 +3569,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3760,6 +3765,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3780,6 +3788,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2012-10-01 18:39:34 +0000
+++ new/kernel/softirq.c 2012-10-01 22:12:06 +0000
@@ -305,6 +305,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -353,6 +365,18 @@ void irq_exit(void)
#endif
rcu_irq_exit();
sched_preempt_enable_no_resched();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2012-12-17 19:41:04 +0000
+++ new/include/linux/lockdep.h 2012-12-17 23:12:00 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2012-12-17 19:41:04 +0000
+++ new/include/linux/sched.h 2012-12-17 23:12:00 +0000
@@ -1418,6 +1418,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2012-12-17 19:41:04 +0000
+++ new/kernel/lockdep.c 2012-12-17 23:12:00 +0000
@@ -3586,9 +3586,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3606,6 +3608,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3799,6 +3804,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3819,6 +3827,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2012-12-17 19:41:04 +0000
+++ new/kernel/softirq.c 2012-12-17 23:12:00 +0000
@@ -306,6 +306,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -354,6 +366,18 @@ void irq_exit(void)
#endif
rcu_irq_exit();
sched_preempt_enable_no_resched();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2013-02-22 21:12:31 +0000
+++ new/include/linux/lockdep.h 2013-02-23 00:19:37 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2013-02-22 21:12:31 +0000
+++ new/include/linux/sched.h 2013-02-23 00:19:37 +0000
@@ -1466,6 +1466,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2013-02-22 21:12:31 +0000
+++ new/kernel/lockdep.c 2013-02-23 00:19:37 +0000
@@ -3586,9 +3586,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3606,6 +3608,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3799,6 +3804,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3819,6 +3827,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2013-02-22 21:12:31 +0000
+++ new/kernel/softirq.c 2013-02-23 00:19:37 +0000
@@ -306,6 +306,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -354,6 +366,18 @@ void irq_exit(void)
#endif
rcu_irq_exit();
sched_preempt_enable_no_resched();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,116 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2013-05-11 05:39:14 +0000
+++ new/include/linux/lockdep.h 2013-05-18 03:43:23 +0000
@@ -355,7 +355,7 @@ extern void lockdep_set_current_reclaim_
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2013-05-11 05:39:14 +0000
+++ new/include/linux/sched.h 2013-05-18 03:43:23 +0000
@@ -1438,6 +1438,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/lockdep.c'
--- old/kernel/lockdep.c 2013-05-11 05:39:14 +0000
+++ new/kernel/lockdep.c 2013-05-18 03:43:23 +0000
@@ -3591,9 +3591,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3611,6 +3613,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3804,6 +3809,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3824,6 +3832,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2013-05-11 05:39:14 +0000
+++ new/kernel/softirq.c 2013-05-18 03:43:23 +0000
@@ -307,6 +307,18 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
+
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -352,6 +364,18 @@ void irq_exit(void)
tick_nohz_irq_exit();
#endif
rcu_irq_exit();
+
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
}
/*

View File

@@ -0,0 +1,114 @@
=== modified file 'include/linux/lockdep.h'
--- old/include/linux/lockdep.h 2016-06-17 23:23:35 +0000
+++ new/include/linux/lockdep.h 2016-06-17 23:38:32 +0000
@@ -359,7 +359,7 @@ extern void lockdep_trace_alloc(gfp_t ma
extern void lock_pin_lock(struct lockdep_map *lock);
extern void lock_unpin_lock(struct lockdep_map *lock);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, .nolockdep_call = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
=== modified file 'include/linux/sched.h'
--- old/include/linux/sched.h 2016-06-17 23:23:35 +0000
+++ new/include/linux/sched.h 2016-06-17 23:38:32 +0000
@@ -1649,6 +1649,9 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
+# define NOLOCKDEP_SUPPORTED 1
+ unsigned int nolockdep_call:1;
+ unsigned int nolockdep_call_irq_saved:1;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
=== modified file 'kernel/locking/lockdep.c'
--- old/kernel/locking/lockdep.c 2016-06-17 23:23:35 +0000
+++ new/kernel/locking/lockdep.c 2016-06-17 23:38:32 +0000
@@ -3700,9 +3700,11 @@ void lock_acquire(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
-
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
@@ -3720,6 +3722,9 @@ void lock_release(struct lockdep_map *lo
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3948,6 +3953,9 @@ void lock_contended(struct lockdep_map *
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
@@ -3968,6 +3976,9 @@ void lock_acquired(struct lockdep_map *l
if (unlikely(current->lockdep_recursion))
return;
+ if (unlikely(current->nolockdep_call))
+ return;
+
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
=== modified file 'kernel/softirq.c'
--- old/kernel/softirq.c 2016-06-17 23:23:35 +0000
+++ new/kernel/softirq.c 2016-06-17 23:38:32 +0000
@@ -324,6 +324,17 @@ asmlinkage __visible void do_softirq(voi
*/
void irq_enter(void)
{
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call) {
+ current->nolockdep_call_irq_saved = 1;
+ current->nolockdep_call = 0;
+ }
+ local_irq_restore(flags);
+ }
+#endif
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -392,6 +403,17 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
+#ifdef CONFIG_LOCKDEP
+ if (unlikely(current->nolockdep_call_irq_saved)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (current->nolockdep_call_irq_saved) {
+ current->nolockdep_call_irq_saved = 0;
+ current->nolockdep_call = 1;
+ }
+ local_irq_restore(flags);
+ }
+#endif
trace_hardirq_exit(); /* must be last! */
}

View File

@@ -1788,7 +1788,7 @@ bool scst_cm_check_block_all_devs(struct scst_cmd *cmd)
#if !defined(__CHECKER__)
list_for_each_entry(e, &d->cm_sorted_devs_list, cm_sorted_devs_list_entry) {
spin_lock(&e->cm_fcmd->dev->dev_lock);
spin_lock_nolockdep(&e->cm_fcmd->dev->dev_lock);
}
#endif
@@ -1817,7 +1817,7 @@ bool scst_cm_check_block_all_devs(struct scst_cmd *cmd)
#if !defined(__CHECKER__)
list_for_each_entry_reverse(e, &d->cm_sorted_devs_list,
cm_sorted_devs_list_entry) {
spin_unlock(&e->cm_fcmd->dev->dev_lock);
spin_unlock_nolockdep(&e->cm_fcmd->dev->dev_lock);
}
#endif

View File

@@ -2644,7 +2644,7 @@ static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
list_for_each_entry(tgt_dev, head,
sess_tgt_dev_list_entry) {
/* Lockdep triggers here a false positive.. */
spin_lock(&tgt_dev->tgt_dev_lock);
spin_lock_nolockdep(&tgt_dev->tgt_dev_lock);
}
}
#endif
@@ -2674,7 +2674,7 @@ static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
list_for_each_entry_reverse(tgt_dev, head,
sess_tgt_dev_list_entry) {
spin_unlock(&tgt_dev->tgt_dev_lock);
spin_unlock_nolockdep(&tgt_dev->tgt_dev_lock);
}
}
#endif
@@ -12568,7 +12568,7 @@ again:
list_for_each_entry(tgt_dev, head,
sess_tgt_dev_list_entry) {
/* Lockdep triggers here a false positive.. */
spin_lock(&tgt_dev->tgt_dev_lock);
spin_lock_nolockdep(&tgt_dev->tgt_dev_lock);
}
}
#endif
@@ -12642,7 +12642,7 @@ out_unlock:
list_for_each_entry_reverse(tgt_dev, head,
sess_tgt_dev_list_entry) {
spin_unlock(&tgt_dev->tgt_dev_lock);
spin_unlock_nolockdep(&tgt_dev->tgt_dev_lock);
}
}