tree: https://github.com/ammarfaizi2/linux-block paulmck/linux-rcu/dev.2022.11.18a head: 7831c47ed19bac29a16088dc2511c9df012ff128 commit: 7831c47ed19bac29a16088dc2511c9df012ff128 [48/48] rcu: Make SRCU mandatory config: x86_64-randconfig-a013 compiler: gcc-11 (Debian 11.3.0-8) 11.3.0 reproduce (this is a W=1 build): # https://github.com/ammarfaizi2/linux-block/commit/7831c47ed19bac29a16088dc2511c9df012ff128 git remote add ammarfaizi2-block https://github.com/ammarfaizi2/linux-block git fetch --no-tags ammarfaizi2-block paulmck/linux-rcu/dev.2022.11.18a git checkout 7831c47ed19bac29a16088dc2511c9df012ff128 # save the config file mkdir build_dir && cp config build_dir/.config make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot All errors (new ones prefixed by >>): In file included from kernel/rcu/update.c:605: kernel/rcu/tasks.h: In function 'cblist_init_generic': >> kernel/rcu/tasks.h:256:17: error: implicit declaration of function 'raw_spin_lock_rcu_node'; did you mean 'raw_spin_lock_init'? [-Werror=implicit-function-declaration] 256 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. | ^~~~~~~~~~~~~~~~~~~~~~ | raw_spin_lock_init >> kernel/rcu/tasks.h:264:17: error: implicit declaration of function 'raw_spin_unlock_rcu_node'; did you mean 'raw_spin_unlock_irqrestore'? [-Werror=implicit-function-declaration] 264 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. | ^~~~~~~~~~~~~~~~~~~~~~~~ | raw_spin_unlock_irqrestore In file included from kernel/rcu/update.c:605: kernel/rcu/tasks.h: In function 'call_rcu_tasks_generic': >> kernel/rcu/tasks.h:299:14: error: implicit declaration of function 'raw_spin_trylock_rcu_node'; did you mean 'raw_spin_trylock_irqsave'? [-Werror=implicit-function-declaration] 299 | if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. | ^~~~~~~~~~~~~~~~~~~~~~~~~ | raw_spin_trylock_irqsave >> kernel/rcu/tasks.h:317:9: error: implicit declaration of function 'raw_spin_unlock_irqrestore_rcu_node'; did you mean 'raw_spin_unlock_irqrestore'? [-Werror=implicit-function-declaration] 317 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | raw_spin_unlock_irqrestore kernel/rcu/tasks.h: In function 'rcu_barrier_tasks_generic': >> kernel/rcu/tasks.h:369:17: error: implicit declaration of function 'raw_spin_lock_irqsave_rcu_node'; did you mean 'raw_spin_lock_irqsave_nested'? [-Werror=implicit-function-declaration] 369 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | raw_spin_lock_irqsave_nested kernel/rcu/tasks.h: In function 'rcu_tasks_trace_pregp_step': kernel/rcu/tasks.h:1511:17: warning: 'flags' is used uninitialized [-Wuninitialized] 1511 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cc1: some warnings being treated as errors vim +256 kernel/rcu/tasks.h af051ca4e4231f Paul E. McKenney 2020-03-16 222 cafafd67765b21 Paul E. McKenney 2021-11-05 223 // Initialize per-CPU callback lists for the specified flavor of cafafd67765b21 Paul E. McKenney 2021-11-05 224 // Tasks RCU. cafafd67765b21 Paul E. McKenney 2021-11-05 225 static void cblist_init_generic(struct rcu_tasks *rtp) cafafd67765b21 Paul E. McKenney 2021-11-05 226 { cafafd67765b21 Paul E. McKenney 2021-11-05 227 int cpu; cafafd67765b21 Paul E. McKenney 2021-11-05 228 unsigned long flags; 8610b65680390a Paul E. McKenney 2021-11-12 229 int lim; da123016ca8cb5 Paul E. McKenney 2022-01-26 230 int shift; cafafd67765b21 Paul E. McKenney 2021-11-05 231 cafafd67765b21 Paul E. McKenney 2021-11-05 232 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); ab97152f88a4d5 Paul E. McKenney 2021-11-24 233 if (rcu_task_enqueue_lim < 0) { ab97152f88a4d5 Paul E. McKenney 2021-11-24 234 rcu_task_enqueue_lim = 1; ab97152f88a4d5 Paul E. McKenney 2021-11-24 235 rcu_task_cb_adjust = true; ab97152f88a4d5 Paul E. McKenney 2021-11-24 236 pr_info("%s: Setting adjustable number of callback queues.\n", __func__); ab97152f88a4d5 Paul E. McKenney 2021-11-24 237 } else if (rcu_task_enqueue_lim == 0) { 8610b65680390a Paul E. McKenney 2021-11-12 238 rcu_task_enqueue_lim = 1; ab97152f88a4d5 Paul E. McKenney 2021-11-24 239 } 8610b65680390a Paul E. McKenney 2021-11-12 240 lim = rcu_task_enqueue_lim; 8610b65680390a Paul E. McKenney 2021-11-12 241 8610b65680390a Paul E. McKenney 2021-11-12 242 if (lim > nr_cpu_ids) 8610b65680390a Paul E. McKenney 2021-11-12 243 lim = nr_cpu_ids; da123016ca8cb5 Paul E. McKenney 2022-01-26 244 shift = ilog2(nr_cpu_ids / lim); da123016ca8cb5 Paul E. McKenney 2022-01-26 245 if (((nr_cpu_ids - 1) >> shift) >= lim) da123016ca8cb5 Paul E. McKenney 2022-01-26 246 shift++; da123016ca8cb5 Paul E. McKenney 2022-01-26 247 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); 2cee0789b458af Paul E. McKenney 2021-11-29 248 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); 8610b65680390a Paul E. McKenney 2021-11-12 249 smp_store_release(&rtp->percpu_enqueue_lim, lim); cafafd67765b21 Paul E. McKenney 2021-11-05 250 for_each_possible_cpu(cpu) { cafafd67765b21 Paul E. McKenney 2021-11-05 251 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); cafafd67765b21 Paul E. McKenney 2021-11-05 252 cafafd67765b21 Paul E. McKenney 2021-11-05 253 WARN_ON_ONCE(!rtpcp); cafafd67765b21 Paul E. McKenney 2021-11-05 254 if (cpu) 381a4f3b38603a Paul E. McKenney 2021-11-08 255 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); 381a4f3b38603a Paul E. McKenney 2021-11-08 @256 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 9b073de1c7a354 Paul E. McKenney 2021-11-08 257 if (rcu_segcblist_empty(&rtpcp->cblist)) 9b073de1c7a354 Paul E. McKenney 2021-11-08 258 rcu_segcblist_init(&rtpcp->cblist); d363f833c6d883 Paul E. McKenney 2021-11-10 259 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); d363f833c6d883 Paul E. McKenney 2021-11-10 260 rtpcp->cpu = cpu; d363f833c6d883 Paul E. McKenney 2021-11-10 261 rtpcp->rtpp = rtp; 434c9eefb959c3 Paul E. McKenney 2022-05-16 262 if (!rtpcp->rtp_blkd_tasks.next) 434c9eefb959c3 Paul E. McKenney 2022-05-16 263 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 381a4f3b38603a Paul E. McKenney 2021-11-08 @264 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. cafafd67765b21 Paul E. McKenney 2021-11-05 265 } cafafd67765b21 Paul E. McKenney 2021-11-05 266 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 8610b65680390a Paul E. McKenney 2021-11-12 267 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); cafafd67765b21 Paul E. McKenney 2021-11-05 268 } cafafd67765b21 Paul E. McKenney 2021-11-05 269 3063b33a347c08 Paul E. McKenney 2021-11-23 270 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). 3063b33a347c08 Paul E. McKenney 2021-11-23 271 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) 3063b33a347c08 Paul E. McKenney 2021-11-23 272 { 3063b33a347c08 Paul E. McKenney 2021-11-23 273 struct rcu_tasks *rtp; 3063b33a347c08 Paul E. McKenney 2021-11-23 274 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); 3063b33a347c08 Paul E. McKenney 2021-11-23 275 3063b33a347c08 Paul E. McKenney 2021-11-23 276 rtp = rtpcp->rtpp; 88db792bbe9b14 Sebastian Andrzej Siewior 2022-03-04 277 rcuwait_wake_up(&rtp->cbs_wait); 3063b33a347c08 Paul E. McKenney 2021-11-23 278 } 3063b33a347c08 Paul E. McKenney 2021-11-23 279 5873b8a94e5dae Paul E. McKenney 2020-03-03 280 // Enqueue a callback for the specified flavor of Tasks RCU. 5873b8a94e5dae Paul E. McKenney 2020-03-03 281 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, 5873b8a94e5dae Paul E. McKenney 2020-03-03 282 struct rcu_tasks *rtp) eacd6f04a13331 Paul E. McKenney 2020-03-02 283 { 07d95c34e8125a Eric Dumazet 2022-04-04 284 int chosen_cpu; eacd6f04a13331 Paul E. McKenney 2020-03-02 285 unsigned long flags; 07d95c34e8125a Eric Dumazet 2022-04-04 286 int ideal_cpu; 7d13d30bb6c54b Paul E. McKenney 2021-11-22 287 unsigned long j; ab97152f88a4d5 Paul E. McKenney 2021-11-24 288 bool needadjust = false; eacd6f04a13331 Paul E. McKenney 2020-03-02 289 bool needwake; cafafd67765b21 Paul E. McKenney 2021-11-05 290 struct rcu_tasks_percpu *rtpcp; eacd6f04a13331 Paul E. McKenney 2020-03-02 291 eacd6f04a13331 Paul E. McKenney 2020-03-02 292 rhp->next = NULL; eacd6f04a13331 Paul E. McKenney 2020-03-02 293 rhp->func = func; cafafd67765b21 Paul E. McKenney 2021-11-05 294 local_irq_save(flags); fd796e4139b481 Paul E. McKenney 2021-11-29 295 rcu_read_lock(); 07d95c34e8125a Eric Dumazet 2022-04-04 296 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); 07d95c34e8125a Eric Dumazet 2022-04-04 297 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); 07d95c34e8125a Eric Dumazet 2022-04-04 298 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); 7d13d30bb6c54b Paul E. McKenney 2021-11-22 @299 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. 381a4f3b38603a Paul E. McKenney 2021-11-08 300 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 7d13d30bb6c54b Paul E. McKenney 2021-11-22 301 j = jiffies; 7d13d30bb6c54b Paul E. McKenney 2021-11-22 302 if (rtpcp->rtp_jiffies != j) { 7d13d30bb6c54b Paul E. McKenney 2021-11-22 303 rtpcp->rtp_jiffies = j; 7d13d30bb6c54b Paul E. McKenney 2021-11-22 304 rtpcp->rtp_n_lock_retries = 0; 7d13d30bb6c54b Paul E. McKenney 2021-11-22 305 } ab97152f88a4d5 Paul E. McKenney 2021-11-24 306 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && ab97152f88a4d5 Paul E. McKenney 2021-11-24 307 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) ab97152f88a4d5 Paul E. McKenney 2021-11-24 308 needadjust = true; // Defer adjustment to avoid deadlock. 7d13d30bb6c54b Paul E. McKenney 2021-11-22 309 } 9b073de1c7a354 Paul E. McKenney 2021-11-08 310 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { 381a4f3b38603a Paul E. McKenney 2021-11-08 311 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. cafafd67765b21 Paul E. McKenney 2021-11-05 312 cblist_init_generic(rtp); 381a4f3b38603a Paul E. McKenney 2021-11-08 313 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. cafafd67765b21 Paul E. McKenney 2021-11-05 314 } 9b073de1c7a354 Paul E. McKenney 2021-11-08 315 needwake = rcu_segcblist_empty(&rtpcp->cblist); 9b073de1c7a354 Paul E. McKenney 2021-11-08 316 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); 381a4f3b38603a Paul E. McKenney 2021-11-08 @317 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); ab97152f88a4d5 Paul E. McKenney 2021-11-24 318 if (unlikely(needadjust)) { ab97152f88a4d5 Paul E. McKenney 2021-11-24 319 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); ab97152f88a4d5 Paul E. McKenney 2021-11-24 320 if (rtp->percpu_enqueue_lim != nr_cpu_ids) { 00a8b4b54cd69d Paul E. McKenney 2022-02-02 321 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); fd796e4139b481 Paul E. McKenney 2021-11-29 322 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); ab97152f88a4d5 Paul E. McKenney 2021-11-24 323 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); ab97152f88a4d5 Paul E. McKenney 2021-11-24 324 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); ab97152f88a4d5 Paul E. McKenney 2021-11-24 325 } ab97152f88a4d5 Paul E. McKenney 2021-11-24 326 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); ab97152f88a4d5 Paul E. McKenney 2021-11-24 327 } fd796e4139b481 Paul E. McKenney 2021-11-29 328 rcu_read_unlock(); eacd6f04a13331 Paul E. McKenney 2020-03-02 329 /* We can't create the thread unless interrupts are enabled. */ 07e105158d97b4 Paul E. McKenney 2020-03-02 330 if (needwake && READ_ONCE(rtp->kthread_ptr)) 3063b33a347c08 Paul E. McKenney 2021-11-23 331 irq_work_queue(&rtpcp->rtp_irq_work); eacd6f04a13331 Paul E. McKenney 2020-03-02 332 } eacd6f04a13331 Paul E. McKenney 2020-03-02 333 ce9b1c667f03e0 Paul E. McKenney 2021-11-11 334 // RCU callback function for rcu_barrier_tasks_generic(). ce9b1c667f03e0 Paul E. McKenney 2021-11-11 335 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) ce9b1c667f03e0 Paul E. McKenney 2021-11-11 336 { ce9b1c667f03e0 Paul E. McKenney 2021-11-11 337 struct rcu_tasks *rtp; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 338 struct rcu_tasks_percpu *rtpcp; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 339 ce9b1c667f03e0 Paul E. McKenney 2021-11-11 340 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 341 rtp = rtpcp->rtpp; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 342 if (atomic_dec_and_test(&rtp->barrier_q_count)) ce9b1c667f03e0 Paul E. McKenney 2021-11-11 343 complete(&rtp->barrier_q_completion); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 344 } ce9b1c667f03e0 Paul E. McKenney 2021-11-11 345 ce9b1c667f03e0 Paul E. McKenney 2021-11-11 346 // Wait for all in-flight callbacks for the specified RCU Tasks flavor. ce9b1c667f03e0 Paul E. McKenney 2021-11-11 347 // Operates in a manner similar to rcu_barrier(). ce9b1c667f03e0 Paul E. McKenney 2021-11-11 348 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) ce9b1c667f03e0 Paul E. McKenney 2021-11-11 349 { ce9b1c667f03e0 Paul E. McKenney 2021-11-11 350 int cpu; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 351 unsigned long flags; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 352 struct rcu_tasks_percpu *rtpcp; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 353 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 354 ce9b1c667f03e0 Paul E. McKenney 2021-11-11 355 mutex_lock(&rtp->barrier_q_mutex); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 356 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { ce9b1c667f03e0 Paul E. McKenney 2021-11-11 357 smp_mb(); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 358 mutex_unlock(&rtp->barrier_q_mutex); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 359 return; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 360 } ce9b1c667f03e0 Paul E. McKenney 2021-11-11 361 rcu_seq_start(&rtp->barrier_q_seq); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 362 init_completion(&rtp->barrier_q_completion); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 363 atomic_set(&rtp->barrier_q_count, 2); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 364 for_each_possible_cpu(cpu) { 2cee0789b458af Paul E. McKenney 2021-11-29 365 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) ce9b1c667f03e0 Paul E. McKenney 2021-11-11 366 break; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 367 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 368 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; ce9b1c667f03e0 Paul E. McKenney 2021-11-11 @369 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 370 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) ce9b1c667f03e0 Paul E. McKenney 2021-11-11 371 atomic_inc(&rtp->barrier_q_count); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 372 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 373 } ce9b1c667f03e0 Paul E. McKenney 2021-11-11 374 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) ce9b1c667f03e0 Paul E. McKenney 2021-11-11 375 complete(&rtp->barrier_q_completion); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 376 wait_for_completion(&rtp->barrier_q_completion); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 377 rcu_seq_end(&rtp->barrier_q_seq); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 378 mutex_unlock(&rtp->barrier_q_mutex); ce9b1c667f03e0 Paul E. McKenney 2021-11-11 379 } ce9b1c667f03e0 Paul E. McKenney 2021-11-11 380 :::::: The code at line 256 was first introduced by commit :::::: 381a4f3b38603aab47e5500609d5ec733b5d0ecb rcu-tasks: Use spin_lock_rcu_node() and friends :::::: TO: Paul E. McKenney :::::: CC: Paul E. McKenney -- 0-DAY CI Kernel Test Service https://01.org/lkp