From e62498b3daa94d72c9394a628d48d853f295e450 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 21 Mar 2022 10:22:37 +0100 Subject: [PATCH] net: Revert the softirq will run annotation in ____napi_schedule(). The lockdep annotation lockdep_assert_softirq_will_run() expects that either hard or soft interrupts are disabled because both guaranty that the "raised" soft-interrupts will be processed once the context is left. This triggers in flush_smp_call_function_from_idle() but it this case it explicitly calls do_softirq() in case of pending softirqs. Revert the "softirq will run" annotation in ____napi_schedule() and move the check back to __netif_rx() as it was. Keep the IRQ-off assert in ____napi_schedule() because this is always required. Fixes: dd30f625f3f88 ("net: Add lockdep asserts to ____napi_schedule().") Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Jason A. Donenfeld Link: https://lore.kernel.org/r/YjhD3ZKWysyw8rc6@linutronix.de Signed-off-by: Jakub Kicinski --- include/linux/lockdep.h | 7 ------- net/core/dev.c | 3 +-- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 0cc65d2167015..467b94257105e 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -329,12 +329,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_assert_none_held_once() \ lockdep_assert_once(!current->lockdep_depth) -/* - * Ensure that softirq is handled within the callchain and not delayed and - * handled by chance. - */ -#define lockdep_assert_softirq_will_run() \ - lockdep_assert_once(hardirq_count() | softirq_count()) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -420,7 +414,6 @@ extern int lockdep_is_held(const void *); #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_none_held_once() do { } while (0) -#define lockdep_assert_softirq_will_run() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/net/core/dev.c b/net/core/dev.c index 8e0cc5f2020d3..8a5109479dbe2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4277,7 +4277,6 @@ static inline void ____napi_schedule(struct softnet_data *sd, { struct task_struct *thread; - lockdep_assert_softirq_will_run(); lockdep_assert_irqs_disabled(); if (test_bit(NAPI_STATE_THREADED, &napi->state)) { @@ -4887,7 +4886,7 @@ int __netif_rx(struct sk_buff *skb) { int ret; - lockdep_assert_softirq_will_run(); + lockdep_assert_once(hardirq_count() | softirq_count()); trace_netif_rx_entry(skb); ret = netif_rx_internal(skb); -- 2.39.5