Hi Waiman,
[FYI, it's a private test report for your RFC patch.]
[auto build test ERROR on tip/locking/core]
[also build test ERROR on tip/master arm-perf/for-next/perf v5.10-rc4 next-20201118]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Waiman-Long/locking-rwsem-Rework...
base:
https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
932f8c64d38bb08f69c8c26a2216ba0c36c6daa8
config: nios2-randconfig-r022-20201118 (attached as .config)
compiler: nios2-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://github.com/0day-ci/linux/commit/8ba2157a62de4ca5c3a922faaea7c2b2a...
git remote add linux-review
https://github.com/0day-ci/linux
git fetch --no-tags linux-review
Waiman-Long/locking-rwsem-Rework-reader-optimistic-spinning/20201118-110810
git checkout 8ba2157a62de4ca5c3a922faaea7c2b2ab2ca8a7
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=nios2
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
kernel/locking/rwsem.c: In function 'rwsem_down_write_slowpath':
> kernel/locking/rwsem.c:1016:6: error: too few arguments to
function 'rwsem_can_spin_on_owner'
1016 | if
(rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
| ^~~~~~~~~~~~~~~~~~~~~~~
kernel/locking/rwsem.c:855:20: note: declared here
855 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
| ^~~~~~~~~~~~~~~~~~~~~~~
> kernel/locking/rwsem.c:1016:38: error: too few arguments to
function 'rwsem_optimistic_spin'
1016 | if
(rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
| ^~~~~~~~~~~~~~~~~~~~~
kernel/locking/rwsem.c:861:20: note: declared here
861 | static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
| ^~~~~~~~~~~~~~~~~~~~~
vim +/rwsem_can_spin_on_owner +1016 kernel/locking/rwsem.c
1002
1003 /*
1004 * Wait until we successfully acquire the write lock
1005 */
1006 static struct rw_semaphore *
1007 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1008 {
1009 long count;
1010 enum writer_wait_state wstate;
1011 struct rwsem_waiter waiter;
1012 struct rw_semaphore *ret = sem;
1013 DEFINE_WAKE_Q(wake_q);
1014
1015 /* do optimistic spinning and steal lock if possible */
1016 if (rwsem_can_spin_on_owner(sem) &&
rwsem_optimistic_spin(sem)) {
1017 /* rwsem_optimistic_spin() implies ACQUIRE on
success */
1018 return sem;
1019 }
1020
1021 /*
1022 * Optimistic spinning failed, proceed to the slowpath
1023 * and block until we can acquire the sem.
1024 */
1025 waiter.task = current;
1026 waiter.type = RWSEM_WAITING_FOR_WRITE;
1027 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1028
1029 raw_spin_lock_irq(&sem->wait_lock);
1030
1031 /* account for this before adding a new element to the list */
1032 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1033
1034 list_add_tail(&waiter.list, &sem->wait_list);
1035
1036 /* we're now waiting on the lock */
1037 if (wstate == WRITER_NOT_FIRST) {
1038 count = atomic_long_read(&sem->count);
1039
1040 /*
1041 * If there were already threads queued before us and:
1042 * 1) there are no no active locks, wake the front
1043 * queued process(es) as the handoff bit might be set.
1044 * 2) there are no active writers and some readers, the lock
1045 * must be read owned; so we try to wake any read lock
1046 * waiters that were queued ahead of us.
1047 */
1048 if (count & RWSEM_WRITER_MASK)
1049 goto wait;
1050
1051 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1052 ? RWSEM_WAKE_READERS
1053 : RWSEM_WAKE_ANY, &wake_q);
1054
1055 if (!wake_q_empty(&wake_q)) {
1056 /*
1057 * We want to minimize wait_lock hold time especially
1058 * when a large number of readers are to be woken up.
1059 */
1060 raw_spin_unlock_irq(&sem->wait_lock);
1061 wake_up_q(&wake_q);
1062 wake_q_init(&wake_q); /* Used again, reinit */
1063 raw_spin_lock_irq(&sem->wait_lock);
1064 }
1065 } else {
1066 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1067 }
1068
1069 wait:
1070 /* wait until we successfully acquire the lock */
1071 set_current_state(state);
1072 for (;;) {
1073 if (rwsem_try_write_lock(sem, wstate)) {
1074 /* rwsem_try_write_lock() implies ACQUIRE on success */
1075 break;
1076 }
1077
1078 raw_spin_unlock_irq(&sem->wait_lock);
1079
1080 /*
1081 * After setting the handoff bit and failing to acquire
1082 * the lock, attempt to spin on owner to accelerate lock
1083 * transfer. If the previous owner is a on-cpu writer and it
1084 * has just released the lock, OWNER_NULL will be returned.
1085 * In this case, we attempt to acquire the lock again
1086 * without sleeping.
1087 */
1088 if (wstate == WRITER_HANDOFF &&
1089 rwsem_spin_on_owner(sem) == OWNER_NULL)
1090 goto trylock_again;
1091
1092 /* Block until there are no active lockers. */
1093 for (;;) {
1094 if (signal_pending_state(state, current))
1095 goto out_nolock;
1096
1097 schedule();
1098 lockevent_inc(rwsem_sleep_writer);
1099 set_current_state(state);
1100 /*
1101 * If HANDOFF bit is set, unconditionally do
1102 * a trylock.
1103 */
1104 if (wstate == WRITER_HANDOFF)
1105 break;
1106
1107 if ((wstate == WRITER_NOT_FIRST) &&
1108 (rwsem_first_waiter(sem) == &waiter))
1109 wstate = WRITER_FIRST;
1110
1111 count = atomic_long_read(&sem->count);
1112 if (!(count & RWSEM_LOCK_MASK))
1113 break;
1114
1115 /*
1116 * The setting of the handoff bit is deferred
1117 * until rwsem_try_write_lock() is called.
1118 */
1119 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1120 time_after(jiffies, waiter.timeout))) {
1121 wstate = WRITER_HANDOFF;
1122 lockevent_inc(rwsem_wlock_handoff);
1123 break;
1124 }
1125 }
1126 trylock_again:
1127 raw_spin_lock_irq(&sem->wait_lock);
1128 }
1129 __set_current_state(TASK_RUNNING);
1130 list_del(&waiter.list);
1131 raw_spin_unlock_irq(&sem->wait_lock);
1132 lockevent_inc(rwsem_wlock);
1133
1134 return ret;
1135
1136 out_nolock:
1137 __set_current_state(TASK_RUNNING);
1138 raw_spin_lock_irq(&sem->wait_lock);
1139 list_del(&waiter.list);
1140
1141 if (unlikely(wstate == WRITER_HANDOFF))
1142 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1143
1144 if (list_empty(&sem->wait_list))
1145 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1146 else
1147 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1148 raw_spin_unlock_irq(&sem->wait_lock);
1149 wake_up_q(&wake_q);
1150 lockevent_inc(rwsem_wlock_fail);
1151
1152 return ERR_PTR(-EINTR);
1153 }
1154
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org