____napi_schedule() needs to be invoked with disabled interrupts due to
__raise_softirq_irqoff (in order not to corrupt the per-CPU list).
____napi_schedule() needs also to be invoked from an interrupt context
so that the raised-softirq is processed while the interrupt context is
left.
Add lockdep asserts for both conditions.
While this is the second time the irq/softirq check is needed, provide a
generic lockdep_assert_softirq_will_run() which is used by both caller.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
#define lockdep_assert_none_held_once() \
lockdep_assert_once(!current->lockdep_depth)
+/*
+ * Ensure that softirq is handled within the callchain and not delayed and
+ * handled by chance.
+ */
+#define lockdep_assert_softirq_will_run() \
+ lockdep_assert_once(hardirq_count() | softirq_count())
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0)
+#define lockdep_assert_softirq_will_run() do { } while (0)
#define lockdep_recursing(tsk) (0)
{
struct task_struct *thread;
+ lockdep_assert_softirq_will_run();
+ lockdep_assert_irqs_disabled();
+
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
/* Paired with smp_mb__before_atomic() in
* napi_enable()/dev_set_threaded().
{
int ret;
- lockdep_assert_once(hardirq_count() | softirq_count());
+ lockdep_assert_softirq_will_run();
trace_netif_rx_entry(skb);
ret = netif_rx_internal(skb);