Commit | Line | Data |
---|---|---|
5e605b64 JA |
1 | /* |
2 | * Functions related to interrupt-poll handling in the block layer. This | |
3 | * is similar to NAPI for network devices. | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/cpu.h> | |
12 | #include <linux/blk-iopoll.h> | |
13 | #include <linux/delay.h> | |
14 | ||
15 | #include "blk.h" | |
16 | ||
17 | int blk_iopoll_enabled = 1; | |
18 | EXPORT_SYMBOL(blk_iopoll_enabled); | |
19 | ||
20 | static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); | |
21 | ||
22 | /** | |
23 | * blk_iopoll_sched - Schedule a run of the iopoll handler | |
24 | * @iop: The parent iopoll structure | |
25 | * | |
26 | * Description: | |
1badcfbd JA |
27 | * Add this blk_iopoll structure to the pending poll list and trigger the |
28 | * raise of the blk iopoll softirq. The driver must already have gotten a | |
29 | * succesful return from blk_iopoll_sched_prep() before calling this. | |
5e605b64 JA |
30 | **/ |
31 | void blk_iopoll_sched(struct blk_iopoll *iop) | |
32 | { | |
33 | unsigned long flags; | |
34 | ||
35 | local_irq_save(flags); | |
36 | list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); | |
37 | __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); | |
38 | local_irq_restore(flags); | |
39 | } | |
40 | EXPORT_SYMBOL(blk_iopoll_sched); | |
41 | ||
42 | /** | |
43 | * __blk_iopoll_complete - Mark this @iop as un-polled again | |
44 | * @iop: The parent iopoll structure | |
45 | * | |
46 | * Description: | |
1badcfbd JA |
47 | * See blk_iopoll_complete(). This function must be called with interrupts |
48 | * disabled. | |
5e605b64 JA |
49 | **/ |
50 | void __blk_iopoll_complete(struct blk_iopoll *iop) | |
51 | { | |
52 | list_del(&iop->list); | |
53 | smp_mb__before_clear_bit(); | |
54 | clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); | |
55 | } | |
56 | EXPORT_SYMBOL(__blk_iopoll_complete); | |
57 | ||
58 | /** | |
59 | * blk_iopoll_complete - Mark this @iop as un-polled again | |
60 | * @iop: The parent iopoll structure | |
61 | * | |
62 | * Description: | |
1badcfbd JA |
63 | * If a driver consumes less than the assigned budget in its run of the |
64 | * iopoll handler, it'll end the polled mode by calling this function. The | |
65 | * iopoll handler will not be invoked again before blk_iopoll_sched_prep() | |
66 | * is called. | |
5e605b64 JA |
67 | **/ |
68 | void blk_iopoll_complete(struct blk_iopoll *iopoll) | |
69 | { | |
70 | unsigned long flags; | |
71 | ||
72 | local_irq_save(flags); | |
73 | __blk_iopoll_complete(iopoll); | |
74 | local_irq_restore(flags); | |
75 | } | |
76 | EXPORT_SYMBOL(blk_iopoll_complete); | |
77 | ||
78 | static void blk_iopoll_softirq(struct softirq_action *h) | |
79 | { | |
80 | struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); | |
81 | unsigned long start_time = jiffies; | |
82 | int rearm = 0, budget = 64; | |
83 | ||
84 | local_irq_disable(); | |
85 | ||
86 | while (!list_empty(list)) { | |
87 | struct blk_iopoll *iop; | |
88 | int work, weight; | |
89 | ||
90 | /* | |
91 | * If softirq window is exhausted then punt. | |
92 | */ | |
93 | if (budget <= 0 || time_after(jiffies, start_time)) { | |
94 | rearm = 1; | |
95 | break; | |
96 | } | |
97 | ||
98 | local_irq_enable(); | |
99 | ||
100 | /* Even though interrupts have been re-enabled, this | |
101 | * access is safe because interrupts can only add new | |
102 | * entries to the tail of this list, and only ->poll() | |
103 | * calls can remove this head entry from the list. | |
104 | */ | |
105 | iop = list_entry(list->next, struct blk_iopoll, list); | |
106 | ||
107 | weight = iop->weight; | |
108 | work = 0; | |
109 | if (test_bit(IOPOLL_F_SCHED, &iop->state)) | |
110 | work = iop->poll(iop, weight); | |
111 | ||
112 | budget -= work; | |
113 | ||
114 | local_irq_disable(); | |
115 | ||
116 | /* Drivers must not modify the NAPI state if they | |
117 | * consume the entire weight. In such cases this code | |
118 | * still "owns" the NAPI instance and therefore can | |
119 | * move the instance around on the list at-will. | |
120 | */ | |
121 | if (work >= weight) { | |
122 | if (blk_iopoll_disable_pending(iop)) | |
123 | __blk_iopoll_complete(iop); | |
124 | else | |
125 | list_move_tail(&iop->list, list); | |
126 | } | |
127 | } | |
128 | ||
129 | if (rearm) | |
130 | __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); | |
131 | ||
132 | local_irq_enable(); | |
133 | } | |
134 | ||
135 | /** | |
136 | * blk_iopoll_disable - Disable iopoll on this @iop | |
137 | * @iop: The parent iopoll structure | |
138 | * | |
139 | * Description: | |
140 | * Disable io polling and wait for any pending callbacks to have completed. | |
141 | **/ | |
142 | void blk_iopoll_disable(struct blk_iopoll *iop) | |
143 | { | |
144 | set_bit(IOPOLL_F_DISABLE, &iop->state); | |
145 | while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state)) | |
146 | msleep(1); | |
147 | clear_bit(IOPOLL_F_DISABLE, &iop->state); | |
148 | } | |
149 | EXPORT_SYMBOL(blk_iopoll_disable); | |
150 | ||
151 | /** | |
152 | * blk_iopoll_enable - Enable iopoll on this @iop | |
153 | * @iop: The parent iopoll structure | |
154 | * | |
155 | * Description: | |
1badcfbd JA |
156 | * Enable iopoll on this @iop. Note that the handler run will not be |
157 | * scheduled, it will only mark it as active. | |
5e605b64 JA |
158 | **/ |
159 | void blk_iopoll_enable(struct blk_iopoll *iop) | |
160 | { | |
161 | BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); | |
1badcfbd | 162 | smp_mb__before_clear_bit(); |
5e605b64 JA |
163 | clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); |
164 | } | |
165 | EXPORT_SYMBOL(blk_iopoll_enable); | |
166 | ||
167 | /** | |
168 | * blk_iopoll_init - Initialize this @iop | |
169 | * @iop: The parent iopoll structure | |
170 | * @weight: The default weight (or command completion budget) | |
171 | * @poll_fn: The handler to invoke | |
172 | * | |
173 | * Description: | |
1badcfbd JA |
174 | * Initialize this blk_iopoll structure. Before being actively used, the |
175 | * driver must call blk_iopoll_enable(). | |
5e605b64 JA |
176 | **/ |
177 | void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn) | |
178 | { | |
179 | memset(iop, 0, sizeof(*iop)); | |
180 | INIT_LIST_HEAD(&iop->list); | |
181 | iop->weight = weight; | |
182 | iop->poll = poll_fn; | |
183 | set_bit(IOPOLL_F_SCHED, &iop->state); | |
184 | } | |
185 | EXPORT_SYMBOL(blk_iopoll_init); | |
186 | ||
187 | static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self, | |
188 | unsigned long action, void *hcpu) | |
189 | { | |
190 | /* | |
191 | * If a CPU goes away, splice its entries to the current CPU | |
192 | * and trigger a run of the softirq | |
193 | */ | |
194 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | |
195 | int cpu = (unsigned long) hcpu; | |
196 | ||
197 | local_irq_disable(); | |
198 | list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), | |
199 | &__get_cpu_var(blk_cpu_iopoll)); | |
200 | raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); | |
201 | local_irq_enable(); | |
202 | } | |
203 | ||
204 | return NOTIFY_OK; | |
205 | } | |
206 | ||
207 | static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = { | |
208 | .notifier_call = blk_iopoll_cpu_notify, | |
209 | }; | |
210 | ||
211 | static __init int blk_iopoll_setup(void) | |
212 | { | |
213 | int i; | |
214 | ||
215 | for_each_possible_cpu(i) | |
216 | INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); | |
217 | ||
218 | open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq); | |
219 | register_hotcpu_notifier(&blk_iopoll_cpu_notifier); | |
220 | return 0; | |
221 | } | |
222 | subsys_initcall(blk_iopoll_setup); |