On 2020-03-16 15:57:38 [+0800], kbuild test robot wrote:
This (including the reports so far) is due to CONFIG_TRACE_IRQFLAGS=n
and CONFIG_LOCKDEP=y. I intend to move check_wait_context() under
CONFIG_PROVE_LOCKING:
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index df74531fd9f85..b2e9062bec1d6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3686,126 +3686,6 @@ static int mark_lock(struct task_struct *curr, struct held_lock
*this,
return ret;
}
-#else /* CONFIG_PROVE_LOCKING */
-
-static inline int
-mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
-{
- return 1;
-}
-
-static inline unsigned int task_irq_context(struct task_struct *task)
-{
- return 0;
-}
-
-static inline int separate_irq_context(struct task_struct *curr,
- struct held_lock *hlock)
-{
- return 0;
-}
-
-#endif /* CONFIG_PROVE_LOCKING */
-
-/*
- * Initialize a lock instance's lock-class mapping info:
- */
-void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass,
- short inner, short outer)
-{
- int i;
-
- for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
- lock->class_cache[i] = NULL;
-
-#ifdef CONFIG_LOCK_STAT
- lock->cpu = raw_smp_processor_id();
-#endif
-
- /*
- * Can't be having no nameless bastards around this place!
- */
- if (DEBUG_LOCKS_WARN_ON(!name)) {
- lock->name = "NULL";
- return;
- }
-
- lock->name = name;
-
- lock->wait_type_outer = outer;
- lock->wait_type_inner = inner;
-
- /*
- * No key, no joy, we need to hash something.
- */
- if (DEBUG_LOCKS_WARN_ON(!key))
- return;
- /*
- * Sanity check, the lock-class key must either have been allocated
- * statically or must have been registered as a dynamic key.
- */
- if (!static_obj(key) && !is_dynamic_key(key)) {
- if (debug_locks)
- printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
- DEBUG_LOCKS_WARN_ON(1);
- return;
- }
- lock->key = key;
-
- if (unlikely(!debug_locks))
- return;
-
- if (subclass) {
- unsigned long flags;
-
- if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
- return;
-
- raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
- register_lock_class(lock, subclass, 1);
- current->lockdep_recursion = 0;
- raw_local_irq_restore(flags);
- }
-}
-EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
-
-struct lock_class_key __lockdep_no_validate__;
-EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
-
-static void
-print_lock_nested_lock_not_held(struct task_struct *curr,
- struct held_lock *hlock,
- unsigned long ip)
-{
- if (!debug_locks_off())
- return;
- if (debug_locks_silent)
- return;
-
- pr_warn("\n");
- pr_warn("==================================\n");
- pr_warn("WARNING: Nested lock was not taken\n");
- print_kernel_ident();
- pr_warn("----------------------------------\n");
-
- pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
- print_lock(hlock);
-
- pr_warn("\nbut this task is not holding:\n");
- pr_warn("%s\n", hlock->nest_lock->name);
-
- pr_warn("\nstack backtrace:\n");
- dump_stack();
-
- pr_warn("\nother info that might help us debug this:\n");
- lockdep_print_held_locks(curr);
-
- pr_warn("\nstack backtrace:\n");
- dump_stack();
-}
-
static int
print_lock_invalid_wait_context(struct task_struct *curr,
struct held_lock *hlock)
@@ -3913,6 +3793,132 @@ static int check_wait_context(struct task_struct *curr, struct
held_lock *next)
return 0;
}
+#else /* CONFIG_PROVE_LOCKING */
+
+static inline int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
+{
+ return 1;
+}
+
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 0;
+}
+
+static inline int separate_irq_context(struct task_struct *curr,
+ struct held_lock *hlock)
+{
+ return 0;
+}
+
+static inline int check_wait_context(struct task_struct *curr,
+ struct held_lock *next)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PROVE_LOCKING */
+
+/*
+ * Initialize a lock instance's lock-class mapping info:
+ */
+void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass,
+ short inner, short outer)
+{
+ int i;
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
+
+#ifdef CONFIG_LOCK_STAT
+ lock->cpu = raw_smp_processor_id();
+#endif
+
+ /*
+ * Can't be having no nameless bastards around this place!
+ */
+ if (DEBUG_LOCKS_WARN_ON(!name)) {
+ lock->name = "NULL";
+ return;
+ }
+
+ lock->name = name;
+
+ lock->wait_type_outer = outer;
+ lock->wait_type_inner = inner;
+
+ /*
+ * No key, no joy, we need to hash something.
+ */
+ if (DEBUG_LOCKS_WARN_ON(!key))
+ return;
+ /*
+ * Sanity check, the lock-class key must either have been allocated
+ * statically or must have been registered as a dynamic key.
+ */
+ if (!static_obj(key) && !is_dynamic_key(key)) {
+ if (debug_locks)
+ printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
+ DEBUG_LOCKS_WARN_ON(1);
+ return;
+ }
+ lock->key = key;
+
+ if (unlikely(!debug_locks))
+ return;
+
+ if (subclass) {
+ unsigned long flags;
+
+ if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
+ register_lock_class(lock, subclass, 1);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
+
+struct lock_class_key __lockdep_no_validate__;
+EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
+
+static void
+print_lock_nested_lock_not_held(struct task_struct *curr,
+ struct held_lock *hlock,
+ unsigned long ip)
+{
+ if (!debug_locks_off())
+ return;
+ if (debug_locks_silent)
+ return;
+
+ pr_warn("\n");
+ pr_warn("==================================\n");
+ pr_warn("WARNING: Nested lock was not taken\n");
+ print_kernel_ident();
+ pr_warn("----------------------------------\n");
+
+ pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
+ print_lock(hlock);
+
+ pr_warn("\nbut this task is not holding:\n");
+ pr_warn("%s\n", hlock->nest_lock->name);
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
+
+ pr_warn("\nother info that might help us debug this:\n");
+ lockdep_print_held_locks(curr);
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
+}
+
static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
Sebastian