tree:
https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
linux-5.4.y-rt
head: 5fbf1e70f11dba64cc05c9d85120a3aa7c67a4a2
commit: 17a62333ac5d375f3cd66b92b586480d1aeb1f48 [140/325] rtmutex: Handle the various new
futex race conditions
config: i386-randconfig-m021-20200810 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
smatch warnings:
kernel/locking/rtmutex.c:973 task_blocks_on_rt_mutex() warn: inconsistent indenting
vim +973 kernel/locking/rtmutex.c
926
927 /*
928 * Task blocks on lock.
929 *
930 * Prepare waiter and propagate pi chain
931 *
932 * This must be called with lock->wait_lock held and interrupts disabled
933 */
934 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
935 struct rt_mutex_waiter *waiter,
936 struct task_struct *task,
937 enum rtmutex_chainwalk chwalk)
938 {
939 struct task_struct *owner = rt_mutex_owner(lock);
940 struct rt_mutex_waiter *top_waiter = waiter;
941 struct rt_mutex *next_lock;
942 int chain_walk = 0, res;
943
944 lockdep_assert_held(&lock->wait_lock);
945
946 /*
947 * Early deadlock detection. We really don't want the task to
948 * enqueue on itself just to untangle the mess later. It's not
949 * only an optimization. We drop the locks, so another waiter
950 * can come in before the chain walk detects the deadlock. So
951 * the other will detect the deadlock and return -EDEADLOCK,
952 * which is wrong, as the other waiter is not in a deadlock
953 * situation.
954 */
955 if (owner == task)
956 return -EDEADLK;
957
958 raw_spin_lock(&task->pi_lock);
959 /*
960 * In the case of futex requeue PI, this will be a proxy
961 * lock. The task will wake unaware that it is enqueueed on
962 * this lock. Avoid blocking on two locks and corrupting
963 * pi_blocked_on via the PI_WAKEUP_INPROGRESS
964 * flag. futex_wait_requeue_pi() sets this when it wakes up
965 * before requeue (due to a signal or timeout). Do not enqueue
966 * the task if PI_WAKEUP_INPROGRESS is set.
967 */
968 if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
969 raw_spin_unlock(&task->pi_lock);
970 return -EAGAIN;
971 }
972
973 BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
974
975 waiter->task = task;
976 waiter->lock = lock;
977 waiter->prio = task->prio;
978 waiter->deadline = task->dl.deadline;
979
980 /* Get the top priority waiter on the lock */
981 if (rt_mutex_has_waiters(lock))
982 top_waiter = rt_mutex_top_waiter(lock);
983 rt_mutex_enqueue(lock, waiter);
984
985 task->pi_blocked_on = waiter;
986
987 raw_spin_unlock(&task->pi_lock);
988
989 if (!owner)
990 return 0;
991
992 raw_spin_lock(&owner->pi_lock);
993 if (waiter == rt_mutex_top_waiter(lock)) {
994 rt_mutex_dequeue_pi(owner, top_waiter);
995 rt_mutex_enqueue_pi(owner, waiter);
996
997 rt_mutex_adjust_prio(owner);
998 if (rt_mutex_real_waiter(owner->pi_blocked_on))
999 chain_walk = 1;
1000 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1001 chain_walk = 1;
1002 }
1003
1004 /* Store the lock on which owner is blocked or NULL */
1005 next_lock = task_blocked_on_lock(owner);
1006
1007 raw_spin_unlock(&owner->pi_lock);
1008 /*
1009 * Even if full deadlock detection is on, if the owner is not
1010 * blocked itself, we can avoid finding this out in the chain
1011 * walk.
1012 */
1013 if (!chain_walk || !next_lock)
1014 return 0;
1015
1016 /*
1017 * The owner can't disappear while holding a lock,
1018 * so the owner struct is protected by wait_lock.
1019 * Gets dropped in rt_mutex_adjust_prio_chain()!
1020 */
1021 get_task_struct(owner);
1022
1023 raw_spin_unlock_irq(&lock->wait_lock);
1024
1025 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1026 next_lock, waiter, task);
1027
1028 raw_spin_lock_irq(&lock->wait_lock);
1029
1030 return res;
1031 }
1032
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org