Commit | Line | Data |
---|---|---|
4d05a28d | 1 | /****************************************************************************** |
4d05a28d KRW |
2 | * |
3 | * Back-end of the driver for virtual block devices. This portion of the | |
4 | * driver exports a 'unified' block-device interface that can be accessed | |
5 | * by any operating system that implements a compatible front end. A | |
6 | * reference front-end implementation can be found in: | |
a1397fa3 | 7 | * drivers/block/xen-blkfront.c |
4d05a28d KRW |
8 | * |
9 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License version 2 | |
14 | * as published by the Free Software Foundation; or, when distributed | |
15 | * separately from the Linux kernel or incorporated into other | |
16 | * software packages, subject to the following license: | |
17 | * | |
18 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
19 | * of this source file (the "Software"), to deal in the Software without | |
20 | * restriction, including without limitation the rights to use, copy, modify, | |
21 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
22 | * and to permit persons to whom the Software is furnished to do so, subject to | |
23 | * the following conditions: | |
24 | * | |
25 | * The above copyright notice and this permission notice shall be included in | |
26 | * all copies or substantial portions of the Software. | |
27 | * | |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
31 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
32 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
33 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
34 | * IN THE SOFTWARE. | |
35 | */ | |
36 | ||
77387b82 TC |
37 | #define pr_fmt(fmt) "xen-blkback: " fmt |
38 | ||
4d05a28d KRW |
39 | #include <linux/spinlock.h> |
40 | #include <linux/kthread.h> | |
41 | #include <linux/list.h> | |
42 | #include <linux/delay.h> | |
88122933 | 43 | #include <linux/freezer.h> |
0a8704a5 | 44 | #include <linux/bitmap.h> |
afd91d07 | 45 | |
88122933 JF |
46 | #include <xen/events.h> |
47 | #include <xen/page.h> | |
e79affc3 | 48 | #include <xen/xen.h> |
88122933 JF |
49 | #include <asm/xen/hypervisor.h> |
50 | #include <asm/xen/hypercall.h> | |
087ffecd | 51 | #include <xen/balloon.h> |
c43cf3ea | 52 | #include <xen/grant_table.h> |
4d05a28d KRW |
53 | #include "common.h" |
54 | ||
c6cc142d RPM |
55 | /* |
56 | * Maximum number of unused free pages to keep in the internal buffer. | |
57 | * Setting this to a value too low will reduce memory used in each backend, | |
58 | * but can have a performance penalty. | |
59 | * | |
60 | * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can | |
61 | * be set to a lower value that might degrade performance on some intensive | |
62 | * IO workloads. | |
63 | */ | |
64 | ||
402b27f9 | 65 | static int xen_blkif_max_buffer_pages = 1024; |
c6cc142d RPM |
66 | module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); |
67 | MODULE_PARM_DESC(max_buffer_pages, | |
68 | "Maximum number of free pages to keep in each block backend buffer"); | |
69 | ||
3f3aad5e RPM |
70 | /* |
71 | * Maximum number of grants to map persistently in blkback. For maximum | |
72 | * performance this should be the total numbers of grants that can be used | |
73 | * to fill the ring, but since this might become too high, specially with | |
74 | * the use of indirect descriptors, we set it to a value that provides good | |
75 | * performance without using too much memory. | |
76 | * | |
77 | * When the list of persistent grants is full we clean it up using a LRU | |
78 | * algorithm. | |
79 | */ | |
80 | ||
402b27f9 | 81 | static int xen_blkif_max_pgrants = 1056; |
3f3aad5e RPM |
82 | module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); |
83 | MODULE_PARM_DESC(max_persistent_grants, | |
84 | "Maximum number of grants to map persistently"); | |
85 | ||
d62d8600 BL |
86 | /* |
87 | * Maximum number of rings/queues blkback supports, allow as many queues as there | |
88 | * are CPUs if user has not specified a value. | |
89 | */ | |
90 | unsigned int xenblk_max_queues; | |
91 | module_param_named(max_queues, xenblk_max_queues, uint, 0644); | |
92 | MODULE_PARM_DESC(max_queues, | |
93 | "Maximum number of hardware queues per virtual disk." \ | |
94 | "By default it is the number of online CPUs."); | |
95 | ||
86839c56 BL |
96 | /* |
97 | * Maximum order of pages to be used for the shared ring between front and | |
98 | * backend, 4KB page granularity is used. | |
99 | */ | |
9cce2914 | 100 | unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; |
86839c56 BL |
101 | module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); |
102 | MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); | |
3f3aad5e RPM |
103 | /* |
104 | * The LRU mechanism to clean the lists of persistent grants needs to | |
105 | * be executed periodically. The time interval between consecutive executions | |
106 | * of the purge mechanism is set in ms. | |
107 | */ | |
108 | #define LRU_INTERVAL 100 | |
109 | ||
110 | /* | |
111 | * When the persistent grants list is full we will remove unused grants | |
112 | * from the list. The percent number of grants to be removed at each LRU | |
113 | * execution. | |
114 | */ | |
115 | #define LRU_PERCENT_CLEAN 5 | |
116 | ||
4d05a28d | 117 | /* Run-time switchable: /sys/module/blkback/parameters/ */ |
2e9977c2 | 118 | static unsigned int log_stats; |
4d05a28d | 119 | module_param(log_stats, int, 0644); |
4d05a28d | 120 | |
4d05a28d KRW |
121 | #define BLKBACK_INVALID_HANDLE (~0) |
122 | ||
ff4b156f | 123 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
c6cc142d RPM |
124 | #define NUM_BATCH_FREE_PAGES 10 |
125 | ||
d4bf0065 | 126 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) |
c6cc142d RPM |
127 | { |
128 | unsigned long flags; | |
129 | ||
d4bf0065 BL |
130 | spin_lock_irqsave(&ring->free_pages_lock, flags); |
131 | if (list_empty(&ring->free_pages)) { | |
132 | BUG_ON(ring->free_pages_num != 0); | |
133 | spin_unlock_irqrestore(&ring->free_pages_lock, flags); | |
ff4b156f | 134 | return gnttab_alloc_pages(1, page); |
c6cc142d | 135 | } |
d4bf0065 BL |
136 | BUG_ON(ring->free_pages_num == 0); |
137 | page[0] = list_first_entry(&ring->free_pages, struct page, lru); | |
c6cc142d | 138 | list_del(&page[0]->lru); |
d4bf0065 BL |
139 | ring->free_pages_num--; |
140 | spin_unlock_irqrestore(&ring->free_pages_lock, flags); | |
efe08a3e | 141 | |
c6cc142d RPM |
142 | return 0; |
143 | } | |
144 | ||
d4bf0065 | 145 | static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page, |
c6cc142d | 146 | int num) |
4d05a28d | 147 | { |
c6cc142d RPM |
148 | unsigned long flags; |
149 | int i; | |
150 | ||
d4bf0065 | 151 | spin_lock_irqsave(&ring->free_pages_lock, flags); |
c6cc142d | 152 | for (i = 0; i < num; i++) |
d4bf0065 BL |
153 | list_add(&page[i]->lru, &ring->free_pages); |
154 | ring->free_pages_num += num; | |
155 | spin_unlock_irqrestore(&ring->free_pages_lock, flags); | |
c6cc142d RPM |
156 | } |
157 | ||
d4bf0065 | 158 | static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num) |
c6cc142d RPM |
159 | { |
160 | /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ | |
161 | struct page *page[NUM_BATCH_FREE_PAGES]; | |
162 | unsigned int num_pages = 0; | |
163 | unsigned long flags; | |
164 | ||
d4bf0065 BL |
165 | spin_lock_irqsave(&ring->free_pages_lock, flags); |
166 | while (ring->free_pages_num > num) { | |
167 | BUG_ON(list_empty(&ring->free_pages)); | |
168 | page[num_pages] = list_first_entry(&ring->free_pages, | |
c6cc142d RPM |
169 | struct page, lru); |
170 | list_del(&page[num_pages]->lru); | |
d4bf0065 | 171 | ring->free_pages_num--; |
c6cc142d | 172 | if (++num_pages == NUM_BATCH_FREE_PAGES) { |
d4bf0065 | 173 | spin_unlock_irqrestore(&ring->free_pages_lock, flags); |
ff4b156f | 174 | gnttab_free_pages(num_pages, page); |
d4bf0065 | 175 | spin_lock_irqsave(&ring->free_pages_lock, flags); |
c6cc142d RPM |
176 | num_pages = 0; |
177 | } | |
178 | } | |
d4bf0065 | 179 | spin_unlock_irqrestore(&ring->free_pages_lock, flags); |
c6cc142d | 180 | if (num_pages != 0) |
ff4b156f | 181 | gnttab_free_pages(num_pages, page); |
4d05a28d KRW |
182 | } |
183 | ||
c6cc142d RPM |
184 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) |
185 | ||
59795700 BL |
186 | static int do_block_io_op(struct xen_blkif_ring *ring); |
187 | static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |
fc53bf75 KRW |
188 | struct blkif_request *req, |
189 | struct pending_req *pending_req); | |
59795700 | 190 | static void make_response(struct xen_blkif_ring *ring, u64 id, |
4d05a28d KRW |
191 | unsigned short op, int st); |
192 | ||
7dc34117 RPM |
193 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
194 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ | |
217fd5e7 | 195 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ |
0a8704a5 | 196 | &(pos)->node != NULL; \ |
7dc34117 RPM |
197 | (pos) = container_of(n, typeof(*(pos)), node), \ |
198 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | |
0a8704a5 RPM |
199 | |
200 | ||
3f3aad5e RPM |
201 | /* |
202 | * We don't need locking around the persistent grant helpers | |
d4bf0065 | 203 | * because blkback uses a single-thread for each backend, so we |
3f3aad5e RPM |
204 | * can be sure that this functions will never be called recursively. |
205 | * | |
206 | * The only exception to that is put_persistent_grant, that can be called | |
207 | * from interrupt context (by xen_blkbk_unmap), so we have to use atomic | |
208 | * bit operations to modify the flags of a persistent grant and to count | |
209 | * the number of used grants. | |
210 | */ | |
d4bf0065 | 211 | static int add_persistent_gnt(struct xen_blkif_ring *ring, |
0a8704a5 RPM |
212 | struct persistent_gnt *persistent_gnt) |
213 | { | |
3f3aad5e | 214 | struct rb_node **new = NULL, *parent = NULL; |
0a8704a5 | 215 | struct persistent_gnt *this; |
d4bf0065 | 216 | struct xen_blkif *blkif = ring->blkif; |
0a8704a5 | 217 | |
d4bf0065 | 218 | if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) { |
3f3aad5e RPM |
219 | if (!blkif->vbd.overflow_max_grants) |
220 | blkif->vbd.overflow_max_grants = 1; | |
221 | return -EBUSY; | |
222 | } | |
0a8704a5 | 223 | /* Figure out where to put new node */ |
d4bf0065 | 224 | new = &ring->persistent_gnts.rb_node; |
0a8704a5 RPM |
225 | while (*new) { |
226 | this = container_of(*new, struct persistent_gnt, node); | |
227 | ||
228 | parent = *new; | |
229 | if (persistent_gnt->gnt < this->gnt) | |
230 | new = &((*new)->rb_left); | |
231 | else if (persistent_gnt->gnt > this->gnt) | |
232 | new = &((*new)->rb_right); | |
233 | else { | |
77387b82 | 234 | pr_alert_ratelimited("trying to add a gref that's already in the tree\n"); |
c6cc142d | 235 | return -EINVAL; |
0a8704a5 RPM |
236 | } |
237 | } | |
238 | ||
3f3aad5e RPM |
239 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); |
240 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | |
0a8704a5 RPM |
241 | /* Add new node and rebalance tree. */ |
242 | rb_link_node(&(persistent_gnt->node), parent, new); | |
d4bf0065 BL |
243 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
244 | ring->persistent_gnt_c++; | |
245 | atomic_inc(&ring->persistent_gnt_in_use); | |
c6cc142d | 246 | return 0; |
0a8704a5 RPM |
247 | } |
248 | ||
d4bf0065 | 249 | static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, |
0a8704a5 RPM |
250 | grant_ref_t gref) |
251 | { | |
252 | struct persistent_gnt *data; | |
3f3aad5e | 253 | struct rb_node *node = NULL; |
0a8704a5 | 254 | |
d4bf0065 | 255 | node = ring->persistent_gnts.rb_node; |
0a8704a5 RPM |
256 | while (node) { |
257 | data = container_of(node, struct persistent_gnt, node); | |
258 | ||
259 | if (gref < data->gnt) | |
260 | node = node->rb_left; | |
261 | else if (gref > data->gnt) | |
262 | node = node->rb_right; | |
3f3aad5e RPM |
263 | else { |
264 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | |
77387b82 | 265 | pr_alert_ratelimited("requesting a grant already in use\n"); |
3f3aad5e RPM |
266 | return NULL; |
267 | } | |
268 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | |
d4bf0065 | 269 | atomic_inc(&ring->persistent_gnt_in_use); |
0a8704a5 | 270 | return data; |
3f3aad5e | 271 | } |
0a8704a5 RPM |
272 | } |
273 | return NULL; | |
274 | } | |
275 | ||
d4bf0065 | 276 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
3f3aad5e RPM |
277 | struct persistent_gnt *persistent_gnt) |
278 | { | |
279 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | |
77387b82 | 280 | pr_alert_ratelimited("freeing a grant already unused\n"); |
3f3aad5e RPM |
281 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); |
282 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | |
d4bf0065 | 283 | atomic_dec(&ring->persistent_gnt_in_use); |
3f3aad5e RPM |
284 | } |
285 | ||
d4bf0065 | 286 | static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root, |
c6cc142d | 287 | unsigned int num) |
4d4f270f RPM |
288 | { |
289 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
290 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
291 | struct persistent_gnt *persistent_gnt; | |
7dc34117 | 292 | struct rb_node *n; |
4d4f270f | 293 | int segs_to_unmap = 0; |
c43cf3ea | 294 | struct gntab_unmap_queue_data unmap_data; |
c43cf3ea | 295 | |
c43cf3ea JH |
296 | unmap_data.pages = pages; |
297 | unmap_data.unmap_ops = unmap; | |
298 | unmap_data.kunmap_ops = NULL; | |
4d4f270f | 299 | |
7dc34117 | 300 | foreach_grant_safe(persistent_gnt, n, root, node) { |
4d4f270f RPM |
301 | BUG_ON(persistent_gnt->handle == |
302 | BLKBACK_INVALID_HANDLE); | |
303 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | |
304 | (unsigned long) pfn_to_kaddr(page_to_pfn( | |
305 | persistent_gnt->page)), | |
306 | GNTMAP_host_map, | |
307 | persistent_gnt->handle); | |
308 | ||
309 | pages[segs_to_unmap] = persistent_gnt->page; | |
4d4f270f RPM |
310 | |
311 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || | |
312 | !rb_next(&persistent_gnt->node)) { | |
c43cf3ea JH |
313 | |
314 | unmap_data.count = segs_to_unmap; | |
b44166cd | 315 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
c43cf3ea | 316 | |
d4bf0065 | 317 | put_free_pages(ring, pages, segs_to_unmap); |
4d4f270f RPM |
318 | segs_to_unmap = 0; |
319 | } | |
7dc34117 RPM |
320 | |
321 | rb_erase(&persistent_gnt->node, root); | |
322 | kfree(persistent_gnt); | |
323 | num--; | |
4d4f270f RPM |
324 | } |
325 | BUG_ON(num != 0); | |
326 | } | |
327 | ||
abb97b8c | 328 | void xen_blkbk_unmap_purged_grants(struct work_struct *work) |
3f3aad5e RPM |
329 | { |
330 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
331 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
332 | struct persistent_gnt *persistent_gnt; | |
325d73bf | 333 | int segs_to_unmap = 0; |
d4bf0065 | 334 | struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work); |
325d73bf | 335 | struct gntab_unmap_queue_data unmap_data; |
325d73bf | 336 | |
325d73bf BL |
337 | unmap_data.pages = pages; |
338 | unmap_data.unmap_ops = unmap; | |
339 | unmap_data.kunmap_ops = NULL; | |
3f3aad5e | 340 | |
d4bf0065 BL |
341 | while(!list_empty(&ring->persistent_purge_list)) { |
342 | persistent_gnt = list_first_entry(&ring->persistent_purge_list, | |
3f3aad5e RPM |
343 | struct persistent_gnt, |
344 | remove_node); | |
345 | list_del(&persistent_gnt->remove_node); | |
346 | ||
347 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | |
348 | vaddr(persistent_gnt->page), | |
349 | GNTMAP_host_map, | |
350 | persistent_gnt->handle); | |
351 | ||
352 | pages[segs_to_unmap] = persistent_gnt->page; | |
353 | ||
354 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | |
325d73bf | 355 | unmap_data.count = segs_to_unmap; |
b44166cd | 356 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
d4bf0065 | 357 | put_free_pages(ring, pages, segs_to_unmap); |
3f3aad5e RPM |
358 | segs_to_unmap = 0; |
359 | } | |
360 | kfree(persistent_gnt); | |
361 | } | |
362 | if (segs_to_unmap > 0) { | |
325d73bf | 363 | unmap_data.count = segs_to_unmap; |
b44166cd | 364 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
d4bf0065 | 365 | put_free_pages(ring, pages, segs_to_unmap); |
3f3aad5e RPM |
366 | } |
367 | } | |
368 | ||
d4bf0065 | 369 | static void purge_persistent_gnt(struct xen_blkif_ring *ring) |
3f3aad5e RPM |
370 | { |
371 | struct persistent_gnt *persistent_gnt; | |
372 | struct rb_node *n; | |
373 | unsigned int num_clean, total; | |
2d910543 | 374 | bool scan_used = false, clean_used = false; |
3f3aad5e RPM |
375 | struct rb_root *root; |
376 | ||
d4bf0065 BL |
377 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || |
378 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && | |
379 | !ring->blkif->vbd.overflow_max_grants)) { | |
59795700 | 380 | goto out; |
3f3aad5e RPM |
381 | } |
382 | ||
d4bf0065 | 383 | if (work_busy(&ring->persistent_purge_work)) { |
53bc7dc0 | 384 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
59795700 | 385 | goto out; |
3f3aad5e RPM |
386 | } |
387 | ||
388 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | |
d4bf0065 BL |
389 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; |
390 | num_clean = min(ring->persistent_gnt_c, num_clean); | |
2d910543 | 391 | if ((num_clean == 0) || |
d4bf0065 | 392 | (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) |
59795700 | 393 | goto out; |
3f3aad5e RPM |
394 | |
395 | /* | |
396 | * At this point, we can assure that there will be no calls | |
397 | * to get_persistent_grant (because we are executing this code from | |
398 | * xen_blkif_schedule), there can only be calls to put_persistent_gnt, | |
399 | * which means that the number of currently used grants will go down, | |
400 | * but never up, so we will always be able to remove the requested | |
401 | * number of grants. | |
402 | */ | |
403 | ||
404 | total = num_clean; | |
405 | ||
77387b82 | 406 | pr_debug("Going to purge %u persistent grants\n", num_clean); |
3f3aad5e | 407 | |
d4bf0065 BL |
408 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
409 | root = &ring->persistent_gnts; | |
3f3aad5e RPM |
410 | purge_list: |
411 | foreach_grant_safe(persistent_gnt, n, root, node) { | |
412 | BUG_ON(persistent_gnt->handle == | |
413 | BLKBACK_INVALID_HANDLE); | |
414 | ||
2d910543 RPM |
415 | if (clean_used) { |
416 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | |
417 | continue; | |
418 | } | |
419 | ||
3f3aad5e RPM |
420 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) |
421 | continue; | |
422 | if (!scan_used && | |
423 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | |
424 | continue; | |
425 | ||
426 | rb_erase(&persistent_gnt->node, root); | |
427 | list_add(&persistent_gnt->remove_node, | |
d4bf0065 | 428 | &ring->persistent_purge_list); |
3f3aad5e RPM |
429 | if (--num_clean == 0) |
430 | goto finished; | |
431 | } | |
432 | /* | |
433 | * If we get here it means we also need to start cleaning | |
434 | * grants that were used since last purge in order to cope | |
435 | * with the requested num | |
436 | */ | |
2d910543 | 437 | if (!scan_used && !clean_used) { |
77387b82 | 438 | pr_debug("Still missing %u purged frames\n", num_clean); |
3f3aad5e RPM |
439 | scan_used = true; |
440 | goto purge_list; | |
441 | } | |
442 | finished: | |
2d910543 | 443 | if (!clean_used) { |
77387b82 | 444 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); |
2d910543 RPM |
445 | clean_used = true; |
446 | goto purge_list; | |
3f3aad5e | 447 | } |
2d910543 | 448 | |
d4bf0065 BL |
449 | ring->persistent_gnt_c -= (total - num_clean); |
450 | ring->blkif->vbd.overflow_max_grants = 0; | |
3f3aad5e RPM |
451 | |
452 | /* We can defer this work */ | |
d4bf0065 | 453 | schedule_work(&ring->persistent_purge_work); |
77387b82 | 454 | pr_debug("Purged %u/%u\n", (total - num_clean), total); |
59795700 BL |
455 | |
456 | out: | |
3f3aad5e RPM |
457 | return; |
458 | } | |
459 | ||
a1397fa3 KRW |
460 | /* |
461 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | |
4d05a28d | 462 | */ |
59795700 | 463 | static struct pending_req *alloc_req(struct xen_blkif_ring *ring) |
4d05a28d | 464 | { |
2e9977c2 | 465 | struct pending_req *req = NULL; |
4d05a28d KRW |
466 | unsigned long flags; |
467 | ||
59795700 BL |
468 | spin_lock_irqsave(&ring->pending_free_lock, flags); |
469 | if (!list_empty(&ring->pending_free)) { | |
470 | req = list_entry(ring->pending_free.next, struct pending_req, | |
2e9977c2 | 471 | free_list); |
4d05a28d KRW |
472 | list_del(&req->free_list); |
473 | } | |
59795700 | 474 | spin_unlock_irqrestore(&ring->pending_free_lock, flags); |
4d05a28d KRW |
475 | return req; |
476 | } | |
477 | ||
a1397fa3 KRW |
478 | /* |
479 | * Return the 'pending_req' structure back to the freepool. We also | |
480 | * wake up the thread if it was waiting for a free page. | |
481 | */ | |
59795700 | 482 | static void free_req(struct xen_blkif_ring *ring, struct pending_req *req) |
4d05a28d KRW |
483 | { |
484 | unsigned long flags; | |
485 | int was_empty; | |
486 | ||
59795700 BL |
487 | spin_lock_irqsave(&ring->pending_free_lock, flags); |
488 | was_empty = list_empty(&ring->pending_free); | |
489 | list_add(&req->free_list, &ring->pending_free); | |
490 | spin_unlock_irqrestore(&ring->pending_free_lock, flags); | |
4d05a28d | 491 | if (was_empty) |
59795700 | 492 | wake_up(&ring->pending_free_wq); |
4d05a28d KRW |
493 | } |
494 | ||
ee9ff853 KRW |
495 | /* |
496 | * Routines for managing virtual block devices (vbds). | |
497 | */ | |
3d814731 KRW |
498 | static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, |
499 | int operation) | |
ee9ff853 | 500 | { |
3d814731 | 501 | struct xen_vbd *vbd = &blkif->vbd; |
ee9ff853 KRW |
502 | int rc = -EACCES; |
503 | ||
a022606e | 504 | if ((operation != REQ_OP_READ) && vbd->readonly) |
ee9ff853 KRW |
505 | goto out; |
506 | ||
8ab52150 JB |
507 | if (likely(req->nr_sects)) { |
508 | blkif_sector_t end = req->sector_number + req->nr_sects; | |
509 | ||
510 | if (unlikely(end < req->sector_number)) | |
511 | goto out; | |
512 | if (unlikely(end > vbd_sz(vbd))) | |
513 | goto out; | |
514 | } | |
ee9ff853 KRW |
515 | |
516 | req->dev = vbd->pdevice; | |
517 | req->bdev = vbd->bdev; | |
518 | rc = 0; | |
519 | ||
520 | out: | |
521 | return rc; | |
522 | } | |
523 | ||
3d814731 | 524 | static void xen_vbd_resize(struct xen_blkif *blkif) |
ee9ff853 | 525 | { |
3d814731 | 526 | struct xen_vbd *vbd = &blkif->vbd; |
ee9ff853 KRW |
527 | struct xenbus_transaction xbt; |
528 | int err; | |
8b6bf747 | 529 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); |
42c7841d | 530 | unsigned long long new_size = vbd_sz(vbd); |
ee9ff853 | 531 | |
77387b82 | 532 | pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n", |
ee9ff853 | 533 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); |
77387b82 | 534 | pr_info("VBD Resize: new size %llu\n", new_size); |
ee9ff853 KRW |
535 | vbd->size = new_size; |
536 | again: | |
537 | err = xenbus_transaction_start(&xbt); | |
538 | if (err) { | |
77387b82 | 539 | pr_warn("Error starting transaction\n"); |
ee9ff853 KRW |
540 | return; |
541 | } | |
542 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | |
42c7841d | 543 | (unsigned long long)vbd_sz(vbd)); |
ee9ff853 | 544 | if (err) { |
77387b82 | 545 | pr_warn("Error writing new size\n"); |
ee9ff853 KRW |
546 | goto abort; |
547 | } | |
548 | /* | |
549 | * Write the current state; we will use this to synchronize | |
550 | * the front-end. If the current state is "connected" the | |
551 | * front-end will get the new size information online. | |
552 | */ | |
553 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); | |
554 | if (err) { | |
77387b82 | 555 | pr_warn("Error writing the state\n"); |
ee9ff853 KRW |
556 | goto abort; |
557 | } | |
558 | ||
559 | err = xenbus_transaction_end(xbt, 0); | |
560 | if (err == -EAGAIN) | |
561 | goto again; | |
562 | if (err) | |
77387b82 | 563 | pr_warn("Error ending transaction\n"); |
496b318e | 564 | return; |
ee9ff853 KRW |
565 | abort: |
566 | xenbus_transaction_end(xbt, 1); | |
567 | } | |
568 | ||
a1397fa3 | 569 | /* |
b0aef179 KRW |
570 | * Notification from the guest OS. |
571 | */ | |
59795700 | 572 | static void blkif_notify_work(struct xen_blkif_ring *ring) |
4d05a28d | 573 | { |
59795700 BL |
574 | ring->waiting_reqs = 1; |
575 | wake_up(&ring->wq); | |
b0aef179 | 576 | } |
4d05a28d | 577 | |
8b6bf747 | 578 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id) |
b0aef179 KRW |
579 | { |
580 | blkif_notify_work(dev_id); | |
581 | return IRQ_HANDLED; | |
4d05a28d KRW |
582 | } |
583 | ||
2e9977c2 | 584 | /* |
4d05a28d KRW |
585 | * SCHEDULER FUNCTIONS |
586 | */ | |
587 | ||
d4bf0065 | 588 | static void print_stats(struct xen_blkif_ring *ring) |
4d05a28d | 589 | { |
77387b82 | 590 | pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
3f3aad5e | 591 | " | ds %4llu | pg: %4u/%4d\n", |
db6fbc10 BL |
592 | current->comm, ring->st_oo_req, |
593 | ring->st_rd_req, ring->st_wr_req, | |
594 | ring->st_f_req, ring->st_ds_req, | |
d4bf0065 | 595 | ring->persistent_gnt_c, |
3f3aad5e | 596 | xen_blkif_max_pgrants); |
db6fbc10 BL |
597 | ring->st_print = jiffies + msecs_to_jiffies(10 * 1000); |
598 | ring->st_rd_req = 0; | |
599 | ring->st_wr_req = 0; | |
600 | ring->st_oo_req = 0; | |
601 | ring->st_ds_req = 0; | |
4d05a28d KRW |
602 | } |
603 | ||
8b6bf747 | 604 | int xen_blkif_schedule(void *arg) |
4d05a28d | 605 | { |
59795700 BL |
606 | struct xen_blkif_ring *ring = arg; |
607 | struct xen_blkif *blkif = ring->blkif; | |
3d814731 | 608 | struct xen_vbd *vbd = &blkif->vbd; |
3f3aad5e | 609 | unsigned long timeout; |
8e3f8755 | 610 | int ret; |
4d05a28d | 611 | |
a6e7af12 | 612 | set_freezable(); |
4d05a28d KRW |
613 | while (!kthread_should_stop()) { |
614 | if (try_to_freeze()) | |
615 | continue; | |
42c7841d | 616 | if (unlikely(vbd->size != vbd_sz(vbd))) |
3d814731 | 617 | xen_vbd_resize(blkif); |
4d05a28d | 618 | |
3f3aad5e RPM |
619 | timeout = msecs_to_jiffies(LRU_INTERVAL); |
620 | ||
621 | timeout = wait_event_interruptible_timeout( | |
59795700 BL |
622 | ring->wq, |
623 | ring->waiting_reqs || kthread_should_stop(), | |
3f3aad5e RPM |
624 | timeout); |
625 | if (timeout == 0) | |
626 | goto purge_gnt_list; | |
627 | timeout = wait_event_interruptible_timeout( | |
59795700 BL |
628 | ring->pending_free_wq, |
629 | !list_empty(&ring->pending_free) || | |
3f3aad5e RPM |
630 | kthread_should_stop(), |
631 | timeout); | |
632 | if (timeout == 0) | |
633 | goto purge_gnt_list; | |
4d05a28d | 634 | |
59795700 | 635 | ring->waiting_reqs = 0; |
4d05a28d KRW |
636 | smp_mb(); /* clear flag *before* checking for work */ |
637 | ||
59795700 | 638 | ret = do_block_io_op(ring); |
8e3f8755 | 639 | if (ret > 0) |
59795700 | 640 | ring->waiting_reqs = 1; |
8e3f8755 | 641 | if (ret == -EACCES) |
59795700 | 642 | wait_event_interruptible(ring->shutdown_wq, |
8e3f8755 | 643 | kthread_should_stop()); |
4d05a28d | 644 | |
3f3aad5e RPM |
645 | purge_gnt_list: |
646 | if (blkif->vbd.feature_gnt_persistent && | |
d4bf0065 BL |
647 | time_after(jiffies, ring->next_lru)) { |
648 | purge_persistent_gnt(ring); | |
649 | ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); | |
3f3aad5e RPM |
650 | } |
651 | ||
c6cc142d | 652 | /* Shrink if we have more than xen_blkif_max_buffer_pages */ |
d4bf0065 | 653 | shrink_free_pagepool(ring, xen_blkif_max_buffer_pages); |
c6cc142d | 654 | |
db6fbc10 | 655 | if (log_stats && time_after(jiffies, ring->st_print)) |
d4bf0065 | 656 | print_stats(ring); |
4d05a28d KRW |
657 | } |
658 | ||
ef753411 | 659 | /* Drain pending purge work */ |
d4bf0065 | 660 | flush_work(&ring->persistent_purge_work); |
c6cc142d | 661 | |
ef753411 | 662 | if (log_stats) |
d4bf0065 | 663 | print_stats(ring); |
ef753411 | 664 | |
59795700 | 665 | ring->xenblkd = NULL; |
ef753411 RPM |
666 | |
667 | return 0; | |
668 | } | |
669 | ||
670 | /* | |
671 | * Remove persistent grants and empty the pool of free pages | |
672 | */ | |
59795700 | 673 | void xen_blkbk_free_caches(struct xen_blkif_ring *ring) |
ef753411 | 674 | { |
0a8704a5 | 675 | /* Free all persistent grant pages */ |
d4bf0065 BL |
676 | if (!RB_EMPTY_ROOT(&ring->persistent_gnts)) |
677 | free_persistent_gnts(ring, &ring->persistent_gnts, | |
678 | ring->persistent_gnt_c); | |
0a8704a5 | 679 | |
d4bf0065 BL |
680 | BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); |
681 | ring->persistent_gnt_c = 0; | |
0a8704a5 | 682 | |
2ed22e3c | 683 | /* Since we are shutting down remove all pages from the buffer */ |
d4bf0065 | 684 | shrink_free_pagepool(ring, 0 /* All */); |
4d05a28d KRW |
685 | } |
686 | ||
c43cf3ea | 687 | static unsigned int xen_blkbk_unmap_prepare( |
59795700 | 688 | struct xen_blkif_ring *ring, |
c43cf3ea JH |
689 | struct grant_page **pages, |
690 | unsigned int num, | |
691 | struct gnttab_unmap_grant_ref *unmap_ops, | |
692 | struct page **unmap_pages) | |
b0aef179 | 693 | { |
b0aef179 | 694 | unsigned int i, invcount = 0; |
b0aef179 | 695 | |
31552ee3 | 696 | for (i = 0; i < num; i++) { |
bb642e83 | 697 | if (pages[i]->persistent_gnt != NULL) { |
d4bf0065 | 698 | put_persistent_gnt(ring, pages[i]->persistent_gnt); |
0a8704a5 | 699 | continue; |
3f3aad5e | 700 | } |
bb642e83 | 701 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) |
b0aef179 | 702 | continue; |
bb642e83 | 703 | unmap_pages[invcount] = pages[i]->page; |
c43cf3ea | 704 | gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page), |
bb642e83 RPM |
705 | GNTMAP_host_map, pages[i]->handle); |
706 | pages[i]->handle = BLKBACK_INVALID_HANDLE; | |
c43cf3ea | 707 | invcount++; |
306b82a8 | 708 | } |
c43cf3ea | 709 | |
306b82a8 | 710 | return invcount; |
c43cf3ea JH |
711 | } |
712 | ||
713 | static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data) | |
714 | { | |
59795700 BL |
715 | struct pending_req *pending_req = (struct pending_req *)(data->data); |
716 | struct xen_blkif_ring *ring = pending_req->ring; | |
717 | struct xen_blkif *blkif = ring->blkif; | |
c43cf3ea JH |
718 | |
719 | /* BUG_ON used to reproduce existing behaviour, | |
720 | but is this the best way to deal with this? */ | |
721 | BUG_ON(result); | |
722 | ||
d4bf0065 | 723 | put_free_pages(ring, data->pages, data->count); |
59795700 | 724 | make_response(ring, pending_req->id, |
c43cf3ea | 725 | pending_req->operation, pending_req->status); |
59795700 | 726 | free_req(ring, pending_req); |
c43cf3ea JH |
727 | /* |
728 | * Make sure the request is freed before releasing blkif, | |
729 | * or there could be a race between free_req and the | |
730 | * cleanup done in xen_blkif_free during shutdown. | |
731 | * | |
732 | * NB: The fact that we might try to wake up pending_free_wq | |
733 | * before drain_complete (in case there's a drain going on) | |
734 | * it's not a problem with our current implementation | |
735 | * because we can assure there's no thread waiting on | |
736 | * pending_free_wq if there's a drain going on, but it has | |
737 | * to be taken into account if the current model is changed. | |
738 | */ | |
59795700 | 739 | if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) { |
c43cf3ea JH |
740 | complete(&blkif->drain_complete); |
741 | } | |
742 | xen_blkif_put(blkif); | |
743 | } | |
744 | ||
745 | static void xen_blkbk_unmap_and_respond(struct pending_req *req) | |
746 | { | |
747 | struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data; | |
59795700 | 748 | struct xen_blkif_ring *ring = req->ring; |
c43cf3ea JH |
749 | struct grant_page **pages = req->segments; |
750 | unsigned int invcount; | |
751 | ||
59795700 | 752 | invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, |
c43cf3ea JH |
753 | req->unmap, req->unmap_pages); |
754 | ||
755 | work->data = req; | |
756 | work->done = xen_blkbk_unmap_and_respond_callback; | |
757 | work->unmap_ops = req->unmap; | |
758 | work->kunmap_ops = NULL; | |
759 | work->pages = req->unmap_pages; | |
760 | work->count = invcount; | |
761 | ||
762 | gnttab_unmap_refs_async(&req->gnttab_unmap_data); | |
763 | } | |
764 | ||
765 | ||
766 | /* | |
767 | * Unmap the grant references. | |
768 | * | |
769 | * This could accumulate ops up to the batch size to reduce the number | |
770 | * of hypercalls, but since this is only used in error paths there's | |
771 | * no real need. | |
772 | */ | |
59795700 | 773 | static void xen_blkbk_unmap(struct xen_blkif_ring *ring, |
c43cf3ea JH |
774 | struct grant_page *pages[], |
775 | int num) | |
776 | { | |
777 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
778 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
779 | unsigned int invcount = 0; | |
780 | int ret; | |
781 | ||
782 | while (num) { | |
783 | unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
59795700 BL |
784 | |
785 | invcount = xen_blkbk_unmap_prepare(ring, pages, batch, | |
c43cf3ea JH |
786 | unmap, unmap_pages); |
787 | if (invcount) { | |
788 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | |
31552ee3 | 789 | BUG_ON(ret); |
d4bf0065 | 790 | put_free_pages(ring, unmap_pages, invcount); |
31552ee3 | 791 | } |
c43cf3ea JH |
792 | pages += batch; |
793 | num -= batch; | |
b0aef179 | 794 | } |
b0aef179 | 795 | } |
01f37f2d | 796 | |
59795700 | 797 | static int xen_blkbk_map(struct xen_blkif_ring *ring, |
bb642e83 | 798 | struct grant_page *pages[], |
31552ee3 | 799 | int num, bool ro) |
1a95fe6e KRW |
800 | { |
801 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
0a8704a5 RPM |
802 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
803 | struct persistent_gnt *persistent_gnt = NULL; | |
0a8704a5 | 804 | phys_addr_t addr = 0; |
c6cc142d | 805 | int i, seg_idx, new_map_idx; |
0a8704a5 | 806 | int segs_to_map = 0; |
1a95fe6e | 807 | int ret = 0; |
31552ee3 | 808 | int last_map = 0, map_until = 0; |
0a8704a5 | 809 | int use_persistent_gnts; |
59795700 | 810 | struct xen_blkif *blkif = ring->blkif; |
0a8704a5 RPM |
811 | |
812 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); | |
813 | ||
01f37f2d KRW |
814 | /* |
815 | * Fill out preq.nr_sects with proper amount of sectors, and setup | |
1a95fe6e KRW |
816 | * assign map[..] with the PFN of the page in our domain with the |
817 | * corresponding grant reference for each page. | |
818 | */ | |
31552ee3 RPM |
819 | again: |
820 | for (i = map_until; i < num; i++) { | |
1a95fe6e KRW |
821 | uint32_t flags; |
822 | ||
59795700 | 823 | if (use_persistent_gnts) { |
0a8704a5 | 824 | persistent_gnt = get_persistent_gnt( |
d4bf0065 | 825 | ring, |
bb642e83 | 826 | pages[i]->gref); |
59795700 | 827 | } |
0a8704a5 RPM |
828 | |
829 | if (persistent_gnt) { | |
830 | /* | |
831 | * We are using persistent grants and | |
832 | * the grant is already mapped | |
833 | */ | |
bb642e83 RPM |
834 | pages[i]->page = persistent_gnt->page; |
835 | pages[i]->persistent_gnt = persistent_gnt; | |
0a8704a5 | 836 | } else { |
d4bf0065 | 837 | if (get_free_page(ring, &pages[i]->page)) |
c6cc142d | 838 | goto out_of_memory; |
bb642e83 RPM |
839 | addr = vaddr(pages[i]->page); |
840 | pages_to_gnt[segs_to_map] = pages[i]->page; | |
841 | pages[i]->persistent_gnt = NULL; | |
0a8704a5 | 842 | flags = GNTMAP_host_map; |
31552ee3 | 843 | if (!use_persistent_gnts && ro) |
0a8704a5 RPM |
844 | flags |= GNTMAP_readonly; |
845 | gnttab_set_map_op(&map[segs_to_map++], addr, | |
bb642e83 | 846 | flags, pages[i]->gref, |
0a8704a5 RPM |
847 | blkif->domid); |
848 | } | |
31552ee3 RPM |
849 | map_until = i + 1; |
850 | if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) | |
851 | break; | |
1a95fe6e KRW |
852 | } |
853 | ||
0a8704a5 RPM |
854 | if (segs_to_map) { |
855 | ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); | |
856 | BUG_ON(ret); | |
857 | } | |
1a95fe6e | 858 | |
01f37f2d KRW |
859 | /* |
860 | * Now swizzle the MFN in our domain with the MFN from the other domain | |
1a95fe6e KRW |
861 | * so that when we access vaddr(pending_req,i) it has the contents of |
862 | * the page from the other domain. | |
863 | */ | |
31552ee3 | 864 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
bb642e83 | 865 | if (!pages[seg_idx]->persistent_gnt) { |
0a8704a5 | 866 | /* This is a newly mapped grant */ |
c6cc142d RPM |
867 | BUG_ON(new_map_idx >= segs_to_map); |
868 | if (unlikely(map[new_map_idx].status != 0)) { | |
77387b82 | 869 | pr_debug("invalid buffer -- could not remap it\n"); |
d4bf0065 | 870 | put_free_pages(ring, &pages[seg_idx]->page, 1); |
bb642e83 | 871 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
0a8704a5 | 872 | ret |= 1; |
31552ee3 | 873 | goto next; |
0a8704a5 | 874 | } |
bb642e83 | 875 | pages[seg_idx]->handle = map[new_map_idx].handle; |
c6cc142d | 876 | } else { |
31552ee3 | 877 | continue; |
0a8704a5 | 878 | } |
c6cc142d | 879 | if (use_persistent_gnts && |
d4bf0065 | 880 | ring->persistent_gnt_c < xen_blkif_max_pgrants) { |
c6cc142d RPM |
881 | /* |
882 | * We are using persistent grants, the grant is | |
3f3aad5e | 883 | * not mapped but we might have room for it. |
c6cc142d RPM |
884 | */ |
885 | persistent_gnt = kmalloc(sizeof(struct persistent_gnt), | |
886 | GFP_KERNEL); | |
887 | if (!persistent_gnt) { | |
0a8704a5 | 888 | /* |
c6cc142d RPM |
889 | * If we don't have enough memory to |
890 | * allocate the persistent_gnt struct | |
891 | * map this grant non-persistenly | |
0a8704a5 | 892 | */ |
31552ee3 | 893 | goto next; |
0a8704a5 | 894 | } |
c6cc142d RPM |
895 | persistent_gnt->gnt = map[new_map_idx].ref; |
896 | persistent_gnt->handle = map[new_map_idx].handle; | |
bb642e83 | 897 | persistent_gnt->page = pages[seg_idx]->page; |
d4bf0065 | 898 | if (add_persistent_gnt(ring, |
c6cc142d RPM |
899 | persistent_gnt)) { |
900 | kfree(persistent_gnt); | |
901 | persistent_gnt = NULL; | |
31552ee3 | 902 | goto next; |
c6cc142d | 903 | } |
bb642e83 | 904 | pages[seg_idx]->persistent_gnt = persistent_gnt; |
77387b82 | 905 | pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", |
d4bf0065 | 906 | persistent_gnt->gnt, ring->persistent_gnt_c, |
3f3aad5e | 907 | xen_blkif_max_pgrants); |
c6cc142d RPM |
908 | goto next; |
909 | } | |
910 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { | |
911 | blkif->vbd.overflow_max_grants = 1; | |
77387b82 | 912 | pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", |
c6cc142d | 913 | blkif->domid, blkif->vbd.handle); |
1a95fe6e | 914 | } |
c6cc142d RPM |
915 | /* |
916 | * We could not map this grant persistently, so use it as | |
917 | * a non-persistent grant. | |
918 | */ | |
c6cc142d | 919 | next: |
31552ee3 | 920 | new_map_idx++; |
1a95fe6e | 921 | } |
31552ee3 RPM |
922 | segs_to_map = 0; |
923 | last_map = map_until; | |
924 | if (map_until != num) | |
925 | goto again; | |
926 | ||
1a95fe6e | 927 | return ret; |
c6cc142d RPM |
928 | |
929 | out_of_memory: | |
77387b82 | 930 | pr_alert("%s: out of memory\n", __func__); |
d4bf0065 | 931 | put_free_pages(ring, pages_to_gnt, segs_to_map); |
c6cc142d | 932 | return -ENOMEM; |
1a95fe6e KRW |
933 | } |
934 | ||
bb642e83 | 935 | static int xen_blkbk_map_seg(struct pending_req *pending_req) |
31552ee3 | 936 | { |
402b27f9 | 937 | int rc; |
31552ee3 | 938 | |
59795700 | 939 | rc = xen_blkbk_map(pending_req->ring, pending_req->segments, |
6684fa1c | 940 | pending_req->nr_segs, |
31552ee3 | 941 | (pending_req->operation != BLKIF_OP_READ)); |
31552ee3 | 942 | |
402b27f9 RPM |
943 | return rc; |
944 | } | |
31552ee3 | 945 | |
402b27f9 RPM |
946 | static int xen_blkbk_parse_indirect(struct blkif_request *req, |
947 | struct pending_req *pending_req, | |
948 | struct seg_buf seg[], | |
949 | struct phys_req *preq) | |
950 | { | |
bb642e83 | 951 | struct grant_page **pages = pending_req->indirect_pages; |
59795700 | 952 | struct xen_blkif_ring *ring = pending_req->ring; |
402b27f9 | 953 | int indirect_grefs, rc, n, nseg, i; |
80bfa2f6 | 954 | struct blkif_request_segment *segments = NULL; |
402b27f9 | 955 | |
6684fa1c | 956 | nseg = pending_req->nr_segs; |
402b27f9 RPM |
957 | indirect_grefs = INDIRECT_PAGES(nseg); |
958 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | |
959 | ||
bb642e83 RPM |
960 | for (i = 0; i < indirect_grefs; i++) |
961 | pages[i]->gref = req->u.indirect.indirect_grefs[i]; | |
962 | ||
59795700 | 963 | rc = xen_blkbk_map(ring, pages, indirect_grefs, true); |
402b27f9 RPM |
964 | if (rc) |
965 | goto unmap; | |
966 | ||
967 | for (n = 0, i = 0; n < nseg; n++) { | |
18779149 RPM |
968 | uint8_t first_sect, last_sect; |
969 | ||
402b27f9 RPM |
970 | if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { |
971 | /* Map indirect segments */ | |
972 | if (segments) | |
973 | kunmap_atomic(segments); | |
bb642e83 | 974 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); |
402b27f9 RPM |
975 | } |
976 | i = n % SEGS_PER_INDIRECT_FRAME; | |
18779149 | 977 | |
bb642e83 | 978 | pending_req->segments[n]->gref = segments[i].gref; |
18779149 RPM |
979 | |
980 | first_sect = READ_ONCE(segments[i].first_sect); | |
981 | last_sect = READ_ONCE(segments[i].last_sect); | |
982 | if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { | |
402b27f9 RPM |
983 | rc = -EINVAL; |
984 | goto unmap; | |
985 | } | |
18779149 RPM |
986 | |
987 | seg[n].nsec = last_sect - first_sect + 1; | |
988 | seg[n].offset = first_sect << 9; | |
402b27f9 RPM |
989 | preq->nr_sects += seg[n].nsec; |
990 | } | |
991 | ||
992 | unmap: | |
993 | if (segments) | |
994 | kunmap_atomic(segments); | |
59795700 | 995 | xen_blkbk_unmap(ring, pages, indirect_grefs); |
402b27f9 | 996 | return rc; |
31552ee3 RPM |
997 | } |
998 | ||
59795700 | 999 | static int dispatch_discard_io(struct xen_blkif_ring *ring, |
42146352 | 1000 | struct blkif_request *req) |
b3cb0d6a LD |
1001 | { |
1002 | int err = 0; | |
1003 | int status = BLKIF_RSP_OKAY; | |
59795700 | 1004 | struct xen_blkif *blkif = ring->blkif; |
b3cb0d6a | 1005 | struct block_device *bdev = blkif->vbd.bdev; |
4dae7670 | 1006 | unsigned long secure; |
604c499c | 1007 | struct phys_req preq; |
b3cb0d6a | 1008 | |
ea5ec76d VN |
1009 | xen_blkif_get(blkif); |
1010 | ||
604c499c KRW |
1011 | preq.sector_number = req->u.discard.sector_number; |
1012 | preq.nr_sects = req->u.discard.nr_sectors; | |
1013 | ||
a022606e | 1014 | err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE); |
604c499c | 1015 | if (err) { |
77387b82 | 1016 | pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", |
604c499c KRW |
1017 | preq.sector_number, |
1018 | preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); | |
1019 | goto fail_response; | |
1020 | } | |
db6fbc10 | 1021 | ring->st_ds_req++; |
42146352 | 1022 | |
4dae7670 KRW |
1023 | secure = (blkif->vbd.discard_secure && |
1024 | (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? | |
1025 | BLKDEV_DISCARD_SECURE : 0; | |
1026 | ||
1027 | err = blkdev_issue_discard(bdev, req->u.discard.sector_number, | |
1028 | req->u.discard.nr_sectors, | |
1029 | GFP_KERNEL, secure); | |
604c499c | 1030 | fail_response: |
b3cb0d6a | 1031 | if (err == -EOPNOTSUPP) { |
77387b82 | 1032 | pr_debug("discard op failed, not supported\n"); |
b3cb0d6a LD |
1033 | status = BLKIF_RSP_EOPNOTSUPP; |
1034 | } else if (err) | |
1035 | status = BLKIF_RSP_ERROR; | |
1036 | ||
59795700 | 1037 | make_response(ring, req->u.discard.id, req->operation, status); |
42146352 KRW |
1038 | xen_blkif_put(blkif); |
1039 | return err; | |
b3cb0d6a LD |
1040 | } |
1041 | ||
59795700 | 1042 | static int dispatch_other_io(struct xen_blkif_ring *ring, |
0e367ae4 DV |
1043 | struct blkif_request *req, |
1044 | struct pending_req *pending_req) | |
1045 | { | |
59795700 BL |
1046 | free_req(ring, pending_req); |
1047 | make_response(ring, req->u.other.id, req->operation, | |
0e367ae4 DV |
1048 | BLKIF_RSP_EOPNOTSUPP); |
1049 | return -EIO; | |
1050 | } | |
1051 | ||
59795700 | 1052 | static void xen_blk_drain_io(struct xen_blkif_ring *ring) |
29bde093 | 1053 | { |
59795700 BL |
1054 | struct xen_blkif *blkif = ring->blkif; |
1055 | ||
29bde093 KRW |
1056 | atomic_set(&blkif->drain, 1); |
1057 | do { | |
59795700 | 1058 | if (atomic_read(&ring->inflight) == 0) |
6927d920 | 1059 | break; |
29bde093 KRW |
1060 | wait_for_completion_interruptible_timeout( |
1061 | &blkif->drain_complete, HZ); | |
1062 | ||
1063 | if (!atomic_read(&blkif->drain)) | |
1064 | break; | |
29bde093 KRW |
1065 | } while (!kthread_should_stop()); |
1066 | atomic_set(&blkif->drain, 0); | |
1067 | } | |
1068 | ||
4e4cbee9 CH |
1069 | static void __end_block_io_op(struct pending_req *pending_req, |
1070 | blk_status_t error) | |
4d05a28d KRW |
1071 | { |
1072 | /* An error fails the entire request. */ | |
4e4cbee9 CH |
1073 | if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE && |
1074 | error == BLK_STS_NOTSUPP) { | |
77387b82 | 1075 | pr_debug("flush diskcache op failed, not supported\n"); |
59795700 | 1076 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); |
4d05a28d | 1077 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
4e4cbee9 CH |
1078 | } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER && |
1079 | error == BLK_STS_NOTSUPP) { | |
77387b82 | 1080 | pr_debug("write barrier op failed, not supported\n"); |
59795700 | 1081 | xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); |
29bde093 | 1082 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
4d05a28d | 1083 | } else if (error) { |
77387b82 | 1084 | pr_debug("Buffer not up-to-date at end of operation," |
ebe81906 | 1085 | " error=%d\n", error); |
4d05a28d KRW |
1086 | pending_req->status = BLKIF_RSP_ERROR; |
1087 | } | |
1088 | ||
01f37f2d KRW |
1089 | /* |
1090 | * If all of the bio's have completed it is time to unmap | |
a1397fa3 | 1091 | * the grant references associated with 'request' and provide |
2e9977c2 KRW |
1092 | * the proper response on the ring. |
1093 | */ | |
c43cf3ea JH |
1094 | if (atomic_dec_and_test(&pending_req->pendcnt)) |
1095 | xen_blkbk_unmap_and_respond(pending_req); | |
4d05a28d KRW |
1096 | } |
1097 | ||
a1397fa3 KRW |
1098 | /* |
1099 | * bio callback. | |
1100 | */ | |
4246a0b6 | 1101 | static void end_block_io_op(struct bio *bio) |
4d05a28d | 1102 | { |
4e4cbee9 | 1103 | __end_block_io_op(bio->bi_private, bio->bi_status); |
4d05a28d | 1104 | bio_put(bio); |
4d05a28d KRW |
1105 | } |
1106 | ||
1107 | ||
4d05a28d | 1108 | |
a1397fa3 KRW |
1109 | /* |
1110 | * Function to copy the from the ring buffer the 'struct blkif_request' | |
1111 | * (which has the sectors we want, number of them, grant references, etc), | |
1112 | * and transmute it to the block API to hand it over to the proper block disk. | |
4d05a28d | 1113 | */ |
b4726a9d | 1114 | static int |
59795700 | 1115 | __do_block_io_op(struct xen_blkif_ring *ring) |
4d05a28d | 1116 | { |
59795700 | 1117 | union blkif_back_rings *blk_rings = &ring->blk_rings; |
88122933 | 1118 | struct blkif_request req; |
2e9977c2 | 1119 | struct pending_req *pending_req; |
4d05a28d KRW |
1120 | RING_IDX rc, rp; |
1121 | int more_to_do = 0; | |
1122 | ||
1123 | rc = blk_rings->common.req_cons; | |
1124 | rp = blk_rings->common.sring->req_prod; | |
1125 | rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
1126 | ||
8e3f8755 KRW |
1127 | if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { |
1128 | rc = blk_rings->common.rsp_prod_pvt; | |
77387b82 | 1129 | pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", |
59795700 | 1130 | rp, rc, rp - rc, ring->blkif->vbd.pdevice); |
8e3f8755 KRW |
1131 | return -EACCES; |
1132 | } | |
4d05a28d KRW |
1133 | while (rc != rp) { |
1134 | ||
1135 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) | |
1136 | break; | |
1137 | ||
8270b45b | 1138 | if (kthread_should_stop()) { |
4d05a28d KRW |
1139 | more_to_do = 1; |
1140 | break; | |
1141 | } | |
1142 | ||
59795700 | 1143 | pending_req = alloc_req(ring); |
8270b45b | 1144 | if (NULL == pending_req) { |
db6fbc10 | 1145 | ring->st_oo_req++; |
4d05a28d KRW |
1146 | more_to_do = 1; |
1147 | break; | |
1148 | } | |
1149 | ||
59795700 | 1150 | switch (ring->blkif->blk_protocol) { |
4d05a28d KRW |
1151 | case BLKIF_PROTOCOL_NATIVE: |
1152 | memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); | |
1153 | break; | |
1154 | case BLKIF_PROTOCOL_X86_32: | |
1155 | blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); | |
1156 | break; | |
1157 | case BLKIF_PROTOCOL_X86_64: | |
1158 | blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); | |
1159 | break; | |
1160 | default: | |
1161 | BUG(); | |
1162 | } | |
1163 | blk_rings->common.req_cons = ++rc; /* before make_response() */ | |
1164 | ||
1165 | /* Apply all sanity checks to /private copy/ of request. */ | |
1166 | barrier(); | |
0e367ae4 DV |
1167 | |
1168 | switch (req.operation) { | |
1169 | case BLKIF_OP_READ: | |
1170 | case BLKIF_OP_WRITE: | |
1171 | case BLKIF_OP_WRITE_BARRIER: | |
1172 | case BLKIF_OP_FLUSH_DISKCACHE: | |
402b27f9 | 1173 | case BLKIF_OP_INDIRECT: |
59795700 | 1174 | if (dispatch_rw_block_io(ring, &req, pending_req)) |
0e367ae4 DV |
1175 | goto done; |
1176 | break; | |
1177 | case BLKIF_OP_DISCARD: | |
59795700 BL |
1178 | free_req(ring, pending_req); |
1179 | if (dispatch_discard_io(ring, &req)) | |
0e367ae4 | 1180 | goto done; |
4d05a28d | 1181 | break; |
0e367ae4 | 1182 | default: |
59795700 | 1183 | if (dispatch_other_io(ring, &req, pending_req)) |
0e367ae4 DV |
1184 | goto done; |
1185 | break; | |
1186 | } | |
4d05a28d KRW |
1187 | |
1188 | /* Yield point for this unbounded loop. */ | |
1189 | cond_resched(); | |
1190 | } | |
0e367ae4 | 1191 | done: |
4d05a28d KRW |
1192 | return more_to_do; |
1193 | } | |
1194 | ||
b4726a9d | 1195 | static int |
59795700 | 1196 | do_block_io_op(struct xen_blkif_ring *ring) |
b4726a9d | 1197 | { |
59795700 | 1198 | union blkif_back_rings *blk_rings = &ring->blk_rings; |
b4726a9d DS |
1199 | int more_to_do; |
1200 | ||
1201 | do { | |
59795700 | 1202 | more_to_do = __do_block_io_op(ring); |
b4726a9d DS |
1203 | if (more_to_do) |
1204 | break; | |
1205 | ||
1206 | RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); | |
1207 | } while (more_to_do); | |
1208 | ||
1209 | return more_to_do; | |
1210 | } | |
a1397fa3 | 1211 | /* |
01f37f2d KRW |
1212 | * Transmutation of the 'struct blkif_request' to a proper 'struct bio' |
1213 | * and call the 'submit_bio' to pass it to the underlying storage. | |
a1397fa3 | 1214 | */ |
59795700 | 1215 | static int dispatch_rw_block_io(struct xen_blkif_ring *ring, |
30fd1502 KRW |
1216 | struct blkif_request *req, |
1217 | struct pending_req *pending_req) | |
4d05a28d | 1218 | { |
4d05a28d | 1219 | struct phys_req preq; |
402b27f9 | 1220 | struct seg_buf *seg = pending_req->seg; |
4d05a28d KRW |
1221 | unsigned int nseg; |
1222 | struct bio *bio = NULL; | |
402b27f9 | 1223 | struct bio **biolist = pending_req->biolist; |
1a95fe6e | 1224 | int i, nbio = 0; |
4d05a28d | 1225 | int operation; |
a022606e | 1226 | int operation_flags = 0; |
a19be5f0 | 1227 | struct blk_plug plug; |
29bde093 | 1228 | bool drain = false; |
bb642e83 | 1229 | struct grant_page **pages = pending_req->segments; |
402b27f9 RPM |
1230 | unsigned short req_operation; |
1231 | ||
1232 | req_operation = req->operation == BLKIF_OP_INDIRECT ? | |
1233 | req->u.indirect.indirect_op : req->operation; | |
67de5dfb | 1234 | |
402b27f9 RPM |
1235 | if ((req->operation == BLKIF_OP_INDIRECT) && |
1236 | (req_operation != BLKIF_OP_READ) && | |
1237 | (req_operation != BLKIF_OP_WRITE)) { | |
77387b82 | 1238 | pr_debug("Invalid indirect operation (%u)\n", req_operation); |
402b27f9 RPM |
1239 | goto fail_response; |
1240 | } | |
4d05a28d | 1241 | |
402b27f9 | 1242 | switch (req_operation) { |
4d05a28d | 1243 | case BLKIF_OP_READ: |
db6fbc10 | 1244 | ring->st_rd_req++; |
a022606e | 1245 | operation = REQ_OP_READ; |
4d05a28d KRW |
1246 | break; |
1247 | case BLKIF_OP_WRITE: | |
db6fbc10 | 1248 | ring->st_wr_req++; |
a022606e | 1249 | operation = REQ_OP_WRITE; |
70fd7614 | 1250 | operation_flags = REQ_SYNC | REQ_IDLE; |
4d05a28d | 1251 | break; |
29bde093 KRW |
1252 | case BLKIF_OP_WRITE_BARRIER: |
1253 | drain = true; | |
3f2c9405 | 1254 | /* fall through */ |
24f567f9 | 1255 | case BLKIF_OP_FLUSH_DISKCACHE: |
db6fbc10 | 1256 | ring->st_f_req++; |
a022606e | 1257 | operation = REQ_OP_WRITE; |
70fd7614 | 1258 | operation_flags = REQ_PREFLUSH; |
4d05a28d KRW |
1259 | break; |
1260 | default: | |
1261 | operation = 0; /* make gcc happy */ | |
fc53bf75 KRW |
1262 | goto fail_response; |
1263 | break; | |
4d05a28d KRW |
1264 | } |
1265 | ||
42146352 | 1266 | /* Check that the number of segments is sane. */ |
402b27f9 RPM |
1267 | nseg = req->operation == BLKIF_OP_INDIRECT ? |
1268 | req->u.indirect.nr_segments : req->u.rw.nr_segments; | |
97e36834 | 1269 | |
70fd7614 | 1270 | if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) || |
402b27f9 RPM |
1271 | unlikely((req->operation != BLKIF_OP_INDIRECT) && |
1272 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || | |
1273 | unlikely((req->operation == BLKIF_OP_INDIRECT) && | |
1274 | (nseg > MAX_INDIRECT_SEGMENTS))) { | |
77387b82 | 1275 | pr_debug("Bad number of segments in request (%d)\n", nseg); |
1a95fe6e | 1276 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
1277 | goto fail_response; |
1278 | } | |
1279 | ||
4d05a28d KRW |
1280 | preq.nr_sects = 0; |
1281 | ||
59795700 | 1282 | pending_req->ring = ring; |
97e36834 | 1283 | pending_req->id = req->u.rw.id; |
402b27f9 | 1284 | pending_req->operation = req_operation; |
4d05a28d | 1285 | pending_req->status = BLKIF_RSP_OKAY; |
6684fa1c | 1286 | pending_req->nr_segs = nseg; |
e9350493 | 1287 | |
402b27f9 RPM |
1288 | if (req->operation != BLKIF_OP_INDIRECT) { |
1289 | preq.dev = req->u.rw.handle; | |
1290 | preq.sector_number = req->u.rw.sector_number; | |
1291 | for (i = 0; i < nseg; i++) { | |
bb642e83 | 1292 | pages[i]->gref = req->u.rw.seg[i].gref; |
402b27f9 RPM |
1293 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
1294 | req->u.rw.seg[i].first_sect + 1; | |
1295 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | |
67de5dfb | 1296 | if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || |
402b27f9 RPM |
1297 | (req->u.rw.seg[i].last_sect < |
1298 | req->u.rw.seg[i].first_sect)) | |
1299 | goto fail_response; | |
1300 | preq.nr_sects += seg[i].nsec; | |
1301 | } | |
1302 | } else { | |
1303 | preq.dev = req->u.indirect.handle; | |
1304 | preq.sector_number = req->u.indirect.sector_number; | |
1305 | if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) | |
4d05a28d | 1306 | goto fail_response; |
4d05a28d KRW |
1307 | } |
1308 | ||
59795700 | 1309 | if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { |
77387b82 | 1310 | pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", |
a022606e | 1311 | operation == REQ_OP_READ ? "read" : "write", |
ebe81906 | 1312 | preq.sector_number, |
a72d9002 | 1313 | preq.sector_number + preq.nr_sects, |
59795700 | 1314 | ring->blkif->vbd.pdevice); |
1a95fe6e | 1315 | goto fail_response; |
4d05a28d | 1316 | } |
01f37f2d KRW |
1317 | |
1318 | /* | |
3d814731 | 1319 | * This check _MUST_ be done after xen_vbd_translate as the preq.bdev |
01f37f2d KRW |
1320 | * is set there. |
1321 | */ | |
e9350493 KRW |
1322 | for (i = 0; i < nseg; i++) { |
1323 | if (((int)preq.sector_number|(int)seg[i].nsec) & | |
1324 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { | |
77387b82 | 1325 | pr_debug("Misaligned I/O request from domain %d\n", |
59795700 | 1326 | ring->blkif->domid); |
e9350493 KRW |
1327 | goto fail_response; |
1328 | } | |
1329 | } | |
01f37f2d | 1330 | |
29bde093 | 1331 | /* Wait on all outstanding I/O's and once that has been completed |
70fd7614 | 1332 | * issue the flush. |
29bde093 KRW |
1333 | */ |
1334 | if (drain) | |
59795700 | 1335 | xen_blk_drain_io(pending_req->ring); |
29bde093 | 1336 | |
01f37f2d KRW |
1337 | /* |
1338 | * If we have failed at this point, we need to undo the M2P override, | |
2e9977c2 KRW |
1339 | * set gnttab_set_unmap_op on all of the grant references and perform |
1340 | * the hypercall to unmap the grants - that is all done in | |
9f3aedf5 | 1341 | * xen_blkbk_unmap. |
2e9977c2 | 1342 | */ |
bb642e83 | 1343 | if (xen_blkbk_map_seg(pending_req)) |
4d05a28d KRW |
1344 | goto fail_flush; |
1345 | ||
b3cb0d6a LD |
1346 | /* |
1347 | * This corresponding xen_blkif_put is done in __end_block_io_op, or | |
1348 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | |
1349 | */ | |
59795700 BL |
1350 | xen_blkif_get(ring->blkif); |
1351 | atomic_inc(&ring->inflight); | |
4d05a28d KRW |
1352 | |
1353 | for (i = 0; i < nseg; i++) { | |
4d05a28d KRW |
1354 | while ((bio == NULL) || |
1355 | (bio_add_page(bio, | |
bb642e83 | 1356 | pages[i]->page, |
4d05a28d | 1357 | seg[i].nsec << 9, |
ffb1dabd | 1358 | seg[i].offset) == 0)) { |
2e9977c2 | 1359 | |
1e0f7a21 RPM |
1360 | int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES); |
1361 | bio = bio_alloc(GFP_KERNEL, nr_iovecs); | |
4d05a28d KRW |
1362 | if (unlikely(bio == NULL)) |
1363 | goto fail_put_bio; | |
1364 | ||
03e0edf9 | 1365 | biolist[nbio++] = bio; |
74d46992 | 1366 | bio_set_dev(bio, preq.bdev); |
4d05a28d KRW |
1367 | bio->bi_private = pending_req; |
1368 | bio->bi_end_io = end_block_io_op; | |
4f024f37 | 1369 | bio->bi_iter.bi_sector = preq.sector_number; |
a022606e | 1370 | bio_set_op_attrs(bio, operation, operation_flags); |
4d05a28d KRW |
1371 | } |
1372 | ||
1373 | preq.sector_number += seg[i].nsec; | |
1374 | } | |
1375 | ||
b3cb0d6a | 1376 | /* This will be hit if the operation was a flush or discard. */ |
4d05a28d | 1377 | if (!bio) { |
70fd7614 | 1378 | BUG_ON(operation_flags != REQ_PREFLUSH); |
b0f80127 | 1379 | |
42146352 KRW |
1380 | bio = bio_alloc(GFP_KERNEL, 0); |
1381 | if (unlikely(bio == NULL)) | |
1382 | goto fail_put_bio; | |
4d05a28d | 1383 | |
42146352 | 1384 | biolist[nbio++] = bio; |
74d46992 | 1385 | bio_set_dev(bio, preq.bdev); |
42146352 KRW |
1386 | bio->bi_private = pending_req; |
1387 | bio->bi_end_io = end_block_io_op; | |
a022606e | 1388 | bio_set_op_attrs(bio, operation, operation_flags); |
4d05a28d KRW |
1389 | } |
1390 | ||
77089926 | 1391 | atomic_set(&pending_req->pendcnt, nbio); |
a19be5f0 KRW |
1392 | blk_start_plug(&plug); |
1393 | ||
77089926 | 1394 | for (i = 0; i < nbio; i++) |
4e49ea4a | 1395 | submit_bio(biolist[i]); |
77089926 | 1396 | |
a19be5f0 | 1397 | /* Let the I/Os go.. */ |
3d68b399 | 1398 | blk_finish_plug(&plug); |
a19be5f0 | 1399 | |
a022606e | 1400 | if (operation == REQ_OP_READ) |
db6fbc10 | 1401 | ring->st_rd_sect += preq.nr_sects; |
a022606e | 1402 | else if (operation == REQ_OP_WRITE) |
db6fbc10 | 1403 | ring->st_wr_sect += preq.nr_sects; |
4d05a28d | 1404 | |
fc53bf75 | 1405 | return 0; |
4d05a28d KRW |
1406 | |
1407 | fail_flush: | |
59795700 | 1408 | xen_blkbk_unmap(ring, pending_req->segments, |
6684fa1c | 1409 | pending_req->nr_segs); |
4d05a28d | 1410 | fail_response: |
0faa8cca | 1411 | /* Haven't submitted any bio's yet. */ |
59795700 BL |
1412 | make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); |
1413 | free_req(ring, pending_req); | |
4d05a28d | 1414 | msleep(1); /* back off a bit */ |
fc53bf75 | 1415 | return -EIO; |
4d05a28d KRW |
1416 | |
1417 | fail_put_bio: | |
03e0edf9 | 1418 | for (i = 0; i < nbio; i++) |
77089926 | 1419 | bio_put(biolist[i]); |
0e5e098a | 1420 | atomic_set(&pending_req->pendcnt, 1); |
4e4cbee9 | 1421 | __end_block_io_op(pending_req, BLK_STS_RESOURCE); |
4d05a28d | 1422 | msleep(1); /* back off a bit */ |
fc53bf75 | 1423 | return -EIO; |
4d05a28d KRW |
1424 | } |
1425 | ||
1426 | ||
1427 | ||
a1397fa3 KRW |
1428 | /* |
1429 | * Put a response on the ring on how the operation fared. | |
4d05a28d | 1430 | */ |
59795700 | 1431 | static void make_response(struct xen_blkif_ring *ring, u64 id, |
4d05a28d KRW |
1432 | unsigned short op, int st) |
1433 | { | |
089bc014 | 1434 | struct blkif_response *resp; |
4d05a28d | 1435 | unsigned long flags; |
59795700 | 1436 | union blkif_back_rings *blk_rings; |
4d05a28d KRW |
1437 | int notify; |
1438 | ||
59795700 BL |
1439 | spin_lock_irqsave(&ring->blk_ring_lock, flags); |
1440 | blk_rings = &ring->blk_rings; | |
4d05a28d | 1441 | /* Place on the response ring for the relevant domain. */ |
59795700 | 1442 | switch (ring->blkif->blk_protocol) { |
4d05a28d | 1443 | case BLKIF_PROTOCOL_NATIVE: |
089bc014 JB |
1444 | resp = RING_GET_RESPONSE(&blk_rings->native, |
1445 | blk_rings->native.rsp_prod_pvt); | |
4d05a28d KRW |
1446 | break; |
1447 | case BLKIF_PROTOCOL_X86_32: | |
089bc014 JB |
1448 | resp = RING_GET_RESPONSE(&blk_rings->x86_32, |
1449 | blk_rings->x86_32.rsp_prod_pvt); | |
4d05a28d KRW |
1450 | break; |
1451 | case BLKIF_PROTOCOL_X86_64: | |
089bc014 JB |
1452 | resp = RING_GET_RESPONSE(&blk_rings->x86_64, |
1453 | blk_rings->x86_64.rsp_prod_pvt); | |
4d05a28d KRW |
1454 | break; |
1455 | default: | |
1456 | BUG(); | |
1457 | } | |
089bc014 JB |
1458 | |
1459 | resp->id = id; | |
1460 | resp->operation = op; | |
1461 | resp->status = st; | |
1462 | ||
4d05a28d KRW |
1463 | blk_rings->common.rsp_prod_pvt++; |
1464 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | |
59795700 | 1465 | spin_unlock_irqrestore(&ring->blk_ring_lock, flags); |
4d05a28d | 1466 | if (notify) |
59795700 | 1467 | notify_remote_via_irq(ring->irq); |
4d05a28d KRW |
1468 | } |
1469 | ||
8b6bf747 | 1470 | static int __init xen_blkif_init(void) |
4d05a28d | 1471 | { |
8770b268 | 1472 | int rc = 0; |
4d05a28d | 1473 | |
b2167ba6 | 1474 | if (!xen_domain()) |
4d05a28d KRW |
1475 | return -ENODEV; |
1476 | ||
9cce2914 | 1477 | if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { |
86839c56 | 1478 | pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", |
9cce2914 JG |
1479 | xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); |
1480 | xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; | |
86839c56 BL |
1481 | } |
1482 | ||
d62d8600 BL |
1483 | if (xenblk_max_queues == 0) |
1484 | xenblk_max_queues = num_online_cpus(); | |
1485 | ||
8b6bf747 | 1486 | rc = xen_blkif_interface_init(); |
8770b268 KRW |
1487 | if (rc) |
1488 | goto failed_init; | |
4d05a28d | 1489 | |
8b6bf747 | 1490 | rc = xen_blkif_xenbus_init(); |
8770b268 KRW |
1491 | if (rc) |
1492 | goto failed_init; | |
4d05a28d | 1493 | |
8770b268 | 1494 | failed_init: |
8770b268 | 1495 | return rc; |
4d05a28d KRW |
1496 | } |
1497 | ||
8b6bf747 | 1498 | module_init(xen_blkif_init); |
4d05a28d KRW |
1499 | |
1500 | MODULE_LICENSE("Dual BSD/GPL"); | |
a7e9357f | 1501 | MODULE_ALIAS("xen-backend:vbd"); |