2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
75 if (tsk && tsk->state != TASK_RUNNING)
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
98 WARN_ON_ONCE(in_irq());
100 raw_local_irq_save(flags);
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
108 preempt_count() += cnt;
110 * Were softirqs turned off above:
112 if (softirq_count() == cnt)
113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
116 if (preempt_count() == cnt)
117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
122 add_preempt_count(cnt);
125 #endif /* CONFIG_TRACE_IRQFLAGS */
127 void local_bh_disable(void)
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
133 EXPORT_SYMBOL(local_bh_disable);
135 static void __local_bh_enable(unsigned int cnt)
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
150 void _local_bh_enable(void)
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
155 EXPORT_SYMBOL(_local_bh_enable);
157 static inline void _local_bh_enable_ip(unsigned long ip)
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
164 * Are softirqs going to be turned on now:
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
167 trace_softirqs_on(ip);
169 * Keep preemption disabled until we are done with
170 * softirq processing:
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
178 #ifdef CONFIG_TRACE_IRQFLAGS
181 preempt_check_resched();
184 void local_bh_enable(void)
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188 EXPORT_SYMBOL(local_bh_enable);
190 void local_bh_enable_ip(unsigned long ip)
192 _local_bh_enable_ip(ip);
194 EXPORT_SYMBOL(local_bh_enable_ip);
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
205 #define MAX_SOFTIRQ_RESTART 10
207 asmlinkage void __do_softirq(void)
209 struct softirq_action *h;
211 int max_restart = MAX_SOFTIRQ_RESTART;
214 pending = local_softirq_pending();
215 account_system_vtime(current);
217 __local_bh_disable((unsigned long)__builtin_return_address(0),
219 lockdep_softirq_enter();
221 cpu = smp_processor_id();
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
232 int prev_count = preempt_count();
233 kstat_incr_softirqs_this_cpu(h - softirq_vec);
235 trace_softirq_entry(h, softirq_vec);
237 trace_softirq_exit(h, softirq_vec);
238 if (unlikely(prev_count != preempt_count())) {
239 printk(KERN_ERR "huh, entered softirq %td %s %p"
240 "with preempt_count %08x,"
241 " exited with %08x?\n", h - softirq_vec,
242 softirq_to_name[h - softirq_vec],
243 h->action, prev_count, preempt_count());
244 preempt_count() = prev_count;
255 pending = local_softirq_pending();
256 if (pending && --max_restart)
262 lockdep_softirq_exit();
264 account_system_vtime(current);
265 __local_bh_enable(SOFTIRQ_OFFSET);
268 #ifndef __ARCH_HAS_DO_SOFTIRQ
270 asmlinkage void do_softirq(void)
278 local_irq_save(flags);
280 pending = local_softirq_pending();
285 local_irq_restore(flags);
291 * Enter an interrupt context.
295 int cpu = smp_processor_id();
298 if (idle_cpu(cpu) && !in_interrupt()) {
300 tick_check_idle(cpu);
305 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
306 # define invoke_softirq() __do_softirq()
308 # define invoke_softirq() do_softirq()
312 * Exit an interrupt context. Process softirqs if needed and possible:
316 account_system_vtime(current);
317 trace_hardirq_exit();
318 sub_preempt_count(IRQ_EXIT_OFFSET);
319 if (!in_interrupt() && local_softirq_pending())
324 /* Make sure that timer wheel updates are propagated */
325 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
326 tick_nohz_stop_sched_tick(0);
328 preempt_enable_no_resched();
332 * This function must run with irqs disabled!
334 inline void raise_softirq_irqoff(unsigned int nr)
336 __raise_softirq_irqoff(nr);
339 * If we're in an interrupt or softirq, we're done
340 * (this also catches softirq-disabled code). We will
341 * actually run the softirq once we return from
342 * the irq or softirq.
344 * Otherwise we wake up ksoftirqd to make sure we
345 * schedule the softirq soon.
351 void raise_softirq(unsigned int nr)
355 local_irq_save(flags);
356 raise_softirq_irqoff(nr);
357 local_irq_restore(flags);
360 void open_softirq(int nr, void (*action)(struct softirq_action *))
362 softirq_vec[nr].action = action;
370 struct tasklet_struct *head;
371 struct tasklet_struct **tail;
374 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
375 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
377 void __tasklet_schedule(struct tasklet_struct *t)
381 local_irq_save(flags);
383 *__get_cpu_var(tasklet_vec).tail = t;
384 __get_cpu_var(tasklet_vec).tail = &(t->next);
385 raise_softirq_irqoff(TASKLET_SOFTIRQ);
386 local_irq_restore(flags);
389 EXPORT_SYMBOL(__tasklet_schedule);
391 void __tasklet_hi_schedule(struct tasklet_struct *t)
395 local_irq_save(flags);
397 *__get_cpu_var(tasklet_hi_vec).tail = t;
398 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
399 raise_softirq_irqoff(HI_SOFTIRQ);
400 local_irq_restore(flags);
403 EXPORT_SYMBOL(__tasklet_hi_schedule);
405 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
407 BUG_ON(!irqs_disabled());
409 t->next = __get_cpu_var(tasklet_hi_vec).head;
410 __get_cpu_var(tasklet_hi_vec).head = t;
411 __raise_softirq_irqoff(HI_SOFTIRQ);
414 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
416 static void tasklet_action(struct softirq_action *a)
418 struct tasklet_struct *list;
421 list = __get_cpu_var(tasklet_vec).head;
422 __get_cpu_var(tasklet_vec).head = NULL;
423 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
427 struct tasklet_struct *t = list;
431 if (tasklet_trylock(t)) {
432 if (!atomic_read(&t->count)) {
433 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
444 *__get_cpu_var(tasklet_vec).tail = t;
445 __get_cpu_var(tasklet_vec).tail = &(t->next);
446 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
451 static void tasklet_hi_action(struct softirq_action *a)
453 struct tasklet_struct *list;
456 list = __get_cpu_var(tasklet_hi_vec).head;
457 __get_cpu_var(tasklet_hi_vec).head = NULL;
458 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
462 struct tasklet_struct *t = list;
466 if (tasklet_trylock(t)) {
467 if (!atomic_read(&t->count)) {
468 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
479 *__get_cpu_var(tasklet_hi_vec).tail = t;
480 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
481 __raise_softirq_irqoff(HI_SOFTIRQ);
487 void tasklet_init(struct tasklet_struct *t,
488 void (*func)(unsigned long), unsigned long data)
492 atomic_set(&t->count, 0);
497 EXPORT_SYMBOL(tasklet_init);
499 void tasklet_kill(struct tasklet_struct *t)
502 printk("Attempt to kill tasklet from interrupt\n");
504 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
507 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
509 tasklet_unlock_wait(t);
510 clear_bit(TASKLET_STATE_SCHED, &t->state);
513 EXPORT_SYMBOL(tasklet_kill);
520 * The trampoline is called when the hrtimer expires. It schedules a tasklet
521 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
522 * hrtimer callback, but from softirq context.
524 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
526 struct tasklet_hrtimer *ttimer =
527 container_of(timer, struct tasklet_hrtimer, timer);
529 tasklet_hi_schedule(&ttimer->tasklet);
530 return HRTIMER_NORESTART;
534 * Helper function which calls the hrtimer callback from
535 * tasklet/softirq context
537 static void __tasklet_hrtimer_trampoline(unsigned long data)
539 struct tasklet_hrtimer *ttimer = (void *)data;
540 enum hrtimer_restart restart;
542 restart = ttimer->function(&ttimer->timer);
543 if (restart != HRTIMER_NORESTART)
544 hrtimer_restart(&ttimer->timer);
548 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
549 * @ttimer: tasklet_hrtimer which is initialized
550 * @function: hrtimer callback funtion which gets called from softirq context
551 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
552 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
554 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
555 enum hrtimer_restart (*function)(struct hrtimer *),
556 clockid_t which_clock, enum hrtimer_mode mode)
558 hrtimer_init(&ttimer->timer, which_clock, mode);
559 ttimer->timer.function = __hrtimer_tasklet_trampoline;
560 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
561 (unsigned long)ttimer);
562 ttimer->function = function;
564 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
567 * Remote softirq bits
570 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
571 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
573 static void __local_trigger(struct call_single_data *cp, int softirq)
575 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
577 list_add_tail(&cp->list, head);
579 /* Trigger the softirq only if the list was previously empty. */
580 if (head->next == &cp->list)
581 raise_softirq_irqoff(softirq);
584 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
585 static void remote_softirq_receive(void *data)
587 struct call_single_data *cp = data;
593 local_irq_save(flags);
594 __local_trigger(cp, softirq);
595 local_irq_restore(flags);
598 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
600 if (cpu_online(cpu)) {
601 cp->func = remote_softirq_receive;
606 __smp_call_function_single(cpu, cp, 0);
611 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
612 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
619 * __send_remote_softirq - try to schedule softirq work on a remote cpu
620 * @cp: private SMP call function data area
621 * @cpu: the remote cpu
622 * @this_cpu: the currently executing cpu
623 * @softirq: the softirq for the work
625 * Attempt to schedule softirq work on a remote cpu. If this cannot be
626 * done, the work is instead queued up on the local cpu.
628 * Interrupts must be disabled.
630 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
632 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
633 __local_trigger(cp, softirq);
635 EXPORT_SYMBOL(__send_remote_softirq);
638 * send_remote_softirq - try to schedule softirq work on a remote cpu
639 * @cp: private SMP call function data area
640 * @cpu: the remote cpu
641 * @softirq: the softirq for the work
643 * Like __send_remote_softirq except that disabling interrupts and
644 * computing the current cpu is done for the caller.
646 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
651 local_irq_save(flags);
652 this_cpu = smp_processor_id();
653 __send_remote_softirq(cp, cpu, this_cpu, softirq);
654 local_irq_restore(flags);
656 EXPORT_SYMBOL(send_remote_softirq);
658 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
659 unsigned long action, void *hcpu)
662 * If a CPU goes away, splice its entries to the current CPU
663 * and trigger a run of the softirq
665 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
666 int cpu = (unsigned long) hcpu;
670 for (i = 0; i < NR_SOFTIRQS; i++) {
671 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
672 struct list_head *local_head;
674 if (list_empty(head))
677 local_head = &__get_cpu_var(softirq_work_list[i]);
678 list_splice_init(head, local_head);
679 raise_softirq_irqoff(i);
687 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
688 .notifier_call = remote_softirq_cpu_notify,
691 void __init softirq_init(void)
695 for_each_possible_cpu(cpu) {
698 per_cpu(tasklet_vec, cpu).tail =
699 &per_cpu(tasklet_vec, cpu).head;
700 per_cpu(tasklet_hi_vec, cpu).tail =
701 &per_cpu(tasklet_hi_vec, cpu).head;
702 for (i = 0; i < NR_SOFTIRQS; i++)
703 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
706 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
708 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
709 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
712 static int run_ksoftirqd(void * __bind_cpu)
714 set_current_state(TASK_INTERRUPTIBLE);
716 current->flags |= PF_KSOFTIRQD;
717 while (!kthread_should_stop()) {
719 if (!local_softirq_pending()) {
720 preempt_enable_no_resched();
725 __set_current_state(TASK_RUNNING);
727 while (local_softirq_pending()) {
728 /* Preempt disable stops cpu going offline.
729 If already offline, we'll be on wrong CPU:
731 if (cpu_is_offline((long)__bind_cpu))
734 preempt_enable_no_resched();
737 rcu_note_context_switch((long)__bind_cpu);
740 set_current_state(TASK_INTERRUPTIBLE);
742 __set_current_state(TASK_RUNNING);
747 /* Wait for kthread_stop */
748 set_current_state(TASK_INTERRUPTIBLE);
749 while (!kthread_should_stop()) {
751 set_current_state(TASK_INTERRUPTIBLE);
753 __set_current_state(TASK_RUNNING);
757 #ifdef CONFIG_HOTPLUG_CPU
759 * tasklet_kill_immediate is called to remove a tasklet which can already be
760 * scheduled for execution on @cpu.
762 * Unlike tasklet_kill, this function removes the tasklet
763 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
765 * When this function is called, @cpu must be in the CPU_DEAD state.
767 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
769 struct tasklet_struct **i;
771 BUG_ON(cpu_online(cpu));
772 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
774 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
777 /* CPU is dead, so no lock needed. */
778 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
781 /* If this was the tail element, move the tail ptr */
783 per_cpu(tasklet_vec, cpu).tail = i;
790 static void takeover_tasklets(unsigned int cpu)
792 /* CPU is dead, so no lock needed. */
795 /* Find end, append list for that CPU. */
796 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
797 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
798 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
799 per_cpu(tasklet_vec, cpu).head = NULL;
800 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
802 raise_softirq_irqoff(TASKLET_SOFTIRQ);
804 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
805 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
806 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
807 per_cpu(tasklet_hi_vec, cpu).head = NULL;
808 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
810 raise_softirq_irqoff(HI_SOFTIRQ);
814 #endif /* CONFIG_HOTPLUG_CPU */
816 static int __cpuinit cpu_callback(struct notifier_block *nfb,
817 unsigned long action,
820 int hotcpu = (unsigned long)hcpu;
821 struct task_struct *p;
825 case CPU_UP_PREPARE_FROZEN:
826 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
828 printk("ksoftirqd for %i failed\n", hotcpu);
829 return notifier_from_errno(PTR_ERR(p));
831 kthread_bind(p, hotcpu);
832 per_cpu(ksoftirqd, hotcpu) = p;
835 case CPU_ONLINE_FROZEN:
836 wake_up_process(per_cpu(ksoftirqd, hotcpu));
838 #ifdef CONFIG_HOTPLUG_CPU
839 case CPU_UP_CANCELED:
840 case CPU_UP_CANCELED_FROZEN:
841 if (!per_cpu(ksoftirqd, hotcpu))
843 /* Unbind so it can run. Fall thru. */
844 kthread_bind(per_cpu(ksoftirqd, hotcpu),
845 cpumask_any(cpu_online_mask));
847 case CPU_DEAD_FROZEN: {
848 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
850 p = per_cpu(ksoftirqd, hotcpu);
851 per_cpu(ksoftirqd, hotcpu) = NULL;
852 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
854 takeover_tasklets(hotcpu);
857 #endif /* CONFIG_HOTPLUG_CPU */
862 static struct notifier_block __cpuinitdata cpu_nfb = {
863 .notifier_call = cpu_callback
866 static __init int spawn_ksoftirqd(void)
868 void *cpu = (void *)(long)smp_processor_id();
869 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
871 BUG_ON(err != NOTIFY_OK);
872 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
873 register_cpu_notifier(&cpu_nfb);
876 early_initcall(spawn_ksoftirqd);
880 * Call a function on all processors
882 int on_each_cpu(void (*func) (void *info), void *info, int wait)
887 ret = smp_call_function(func, info, wait);
894 EXPORT_SYMBOL(on_each_cpu);
898 * [ These __weak aliases are kept in a separate compilation unit, so that
899 * GCC does not inline them incorrectly. ]
902 int __init __weak early_irq_init(void)
907 int __init __weak arch_probe_nr_irqs(void)
912 int __init __weak arch_early_irq_init(void)
917 int __weak arch_init_chip_data(struct irq_desc *desc, int node)