Merge tag 'spi-fix-v4.20-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/brooni...
[linux-2.6-block.git] / drivers / block / xen-blkback / blkback.c
CommitLineData
4d05a28d 1/******************************************************************************
4d05a28d
KRW
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
a1397fa3 7 * drivers/block/xen-blkfront.c
4d05a28d
KRW
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
77387b82
TC
37#define pr_fmt(fmt) "xen-blkback: " fmt
38
4d05a28d
KRW
39#include <linux/spinlock.h>
40#include <linux/kthread.h>
41#include <linux/list.h>
42#include <linux/delay.h>
88122933 43#include <linux/freezer.h>
0a8704a5 44#include <linux/bitmap.h>
afd91d07 45
88122933
JF
46#include <xen/events.h>
47#include <xen/page.h>
e79affc3 48#include <xen/xen.h>
88122933
JF
49#include <asm/xen/hypervisor.h>
50#include <asm/xen/hypercall.h>
087ffecd 51#include <xen/balloon.h>
c43cf3ea 52#include <xen/grant_table.h>
4d05a28d
KRW
53#include "common.h"
54
c6cc142d
RPM
55/*
56 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
59 *
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
62 * IO workloads.
63 */
64
402b27f9 65static int xen_blkif_max_buffer_pages = 1024;
c6cc142d
RPM
66module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67MODULE_PARM_DESC(max_buffer_pages,
68"Maximum number of free pages to keep in each block backend buffer");
69
3f3aad5e
RPM
70/*
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
76 *
77 * When the list of persistent grants is full we clean it up using a LRU
78 * algorithm.
79 */
80
402b27f9 81static int xen_blkif_max_pgrants = 1056;
3f3aad5e
RPM
82module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
85
973e5405
JG
86/*
87 * How long a persistent grant is allowed to remain allocated without being in
88 * use. The time is in seconds, 0 means indefinitely long.
89 */
90
91static unsigned int xen_blkif_pgrant_timeout = 60;
92module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
93 uint, 0644);
94MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 "Time in seconds an unused persistent grant is allowed to "
96 "remain allocated. Default is 60, 0 means unlimited.");
97
d62d8600
BL
98/*
99 * Maximum number of rings/queues blkback supports, allow as many queues as there
100 * are CPUs if user has not specified a value.
101 */
102unsigned int xenblk_max_queues;
103module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104MODULE_PARM_DESC(max_queues,
105 "Maximum number of hardware queues per virtual disk." \
106 "By default it is the number of online CPUs.");
107
86839c56
BL
108/*
109 * Maximum order of pages to be used for the shared ring between front and
110 * backend, 4KB page granularity is used.
111 */
9cce2914 112unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
5657a819 113module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
86839c56 114MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
3f3aad5e
RPM
115/*
116 * The LRU mechanism to clean the lists of persistent grants needs to
117 * be executed periodically. The time interval between consecutive executions
118 * of the purge mechanism is set in ms.
119 */
120#define LRU_INTERVAL 100
121
122/*
123 * When the persistent grants list is full we will remove unused grants
124 * from the list. The percent number of grants to be removed at each LRU
125 * execution.
126 */
127#define LRU_PERCENT_CLEAN 5
128
4d05a28d 129/* Run-time switchable: /sys/module/blkback/parameters/ */
2e9977c2 130static unsigned int log_stats;
4d05a28d 131module_param(log_stats, int, 0644);
4d05a28d 132
4d05a28d
KRW
133#define BLKBACK_INVALID_HANDLE (~0)
134
ff4b156f 135/* Number of free pages to remove on each call to gnttab_free_pages */
c6cc142d
RPM
136#define NUM_BATCH_FREE_PAGES 10
137
973e5405
JG
138static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139{
140 return xen_blkif_pgrant_timeout &&
141 (jiffies - persistent_gnt->last_used >=
142 HZ * xen_blkif_pgrant_timeout);
143}
144
d4bf0065 145static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
c6cc142d
RPM
146{
147 unsigned long flags;
148
d4bf0065
BL
149 spin_lock_irqsave(&ring->free_pages_lock, flags);
150 if (list_empty(&ring->free_pages)) {
151 BUG_ON(ring->free_pages_num != 0);
152 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ff4b156f 153 return gnttab_alloc_pages(1, page);
c6cc142d 154 }
d4bf0065
BL
155 BUG_ON(ring->free_pages_num == 0);
156 page[0] = list_first_entry(&ring->free_pages, struct page, lru);
c6cc142d 157 list_del(&page[0]->lru);
d4bf0065
BL
158 ring->free_pages_num--;
159 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
efe08a3e 160
c6cc142d
RPM
161 return 0;
162}
163
d4bf0065 164static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
c6cc142d 165 int num)
4d05a28d 166{
c6cc142d
RPM
167 unsigned long flags;
168 int i;
169
d4bf0065 170 spin_lock_irqsave(&ring->free_pages_lock, flags);
c6cc142d 171 for (i = 0; i < num; i++)
d4bf0065
BL
172 list_add(&page[i]->lru, &ring->free_pages);
173 ring->free_pages_num += num;
174 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
c6cc142d
RPM
175}
176
d4bf0065 177static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
c6cc142d
RPM
178{
179 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
180 struct page *page[NUM_BATCH_FREE_PAGES];
181 unsigned int num_pages = 0;
182 unsigned long flags;
183
d4bf0065
BL
184 spin_lock_irqsave(&ring->free_pages_lock, flags);
185 while (ring->free_pages_num > num) {
186 BUG_ON(list_empty(&ring->free_pages));
187 page[num_pages] = list_first_entry(&ring->free_pages,
c6cc142d
RPM
188 struct page, lru);
189 list_del(&page[num_pages]->lru);
d4bf0065 190 ring->free_pages_num--;
c6cc142d 191 if (++num_pages == NUM_BATCH_FREE_PAGES) {
d4bf0065 192 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ff4b156f 193 gnttab_free_pages(num_pages, page);
d4bf0065 194 spin_lock_irqsave(&ring->free_pages_lock, flags);
c6cc142d
RPM
195 num_pages = 0;
196 }
197 }
d4bf0065 198 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
c6cc142d 199 if (num_pages != 0)
ff4b156f 200 gnttab_free_pages(num_pages, page);
4d05a28d
KRW
201}
202
c6cc142d
RPM
203#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
204
59795700
BL
205static int do_block_io_op(struct xen_blkif_ring *ring);
206static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
fc53bf75
KRW
207 struct blkif_request *req,
208 struct pending_req *pending_req);
59795700 209static void make_response(struct xen_blkif_ring *ring, u64 id,
4d05a28d
KRW
210 unsigned short op, int st);
211
7dc34117
RPM
212#define foreach_grant_safe(pos, n, rbtree, node) \
213 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
217fd5e7 214 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
0a8704a5 215 &(pos)->node != NULL; \
7dc34117
RPM
216 (pos) = container_of(n, typeof(*(pos)), node), \
217 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
0a8704a5
RPM
218
219
3f3aad5e
RPM
220/*
221 * We don't need locking around the persistent grant helpers
d4bf0065 222 * because blkback uses a single-thread for each backend, so we
3f3aad5e
RPM
223 * can be sure that this functions will never be called recursively.
224 *
225 * The only exception to that is put_persistent_grant, that can be called
226 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
227 * bit operations to modify the flags of a persistent grant and to count
228 * the number of used grants.
229 */
d4bf0065 230static int add_persistent_gnt(struct xen_blkif_ring *ring,
0a8704a5
RPM
231 struct persistent_gnt *persistent_gnt)
232{
3f3aad5e 233 struct rb_node **new = NULL, *parent = NULL;
0a8704a5 234 struct persistent_gnt *this;
d4bf0065 235 struct xen_blkif *blkif = ring->blkif;
0a8704a5 236
d4bf0065 237 if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
3f3aad5e
RPM
238 if (!blkif->vbd.overflow_max_grants)
239 blkif->vbd.overflow_max_grants = 1;
240 return -EBUSY;
241 }
0a8704a5 242 /* Figure out where to put new node */
d4bf0065 243 new = &ring->persistent_gnts.rb_node;
0a8704a5
RPM
244 while (*new) {
245 this = container_of(*new, struct persistent_gnt, node);
246
247 parent = *new;
248 if (persistent_gnt->gnt < this->gnt)
249 new = &((*new)->rb_left);
250 else if (persistent_gnt->gnt > this->gnt)
251 new = &((*new)->rb_right);
252 else {
77387b82 253 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
c6cc142d 254 return -EINVAL;
0a8704a5
RPM
255 }
256 }
257
d77ff24e 258 persistent_gnt->active = true;
0a8704a5
RPM
259 /* Add new node and rebalance tree. */
260 rb_link_node(&(persistent_gnt->node), parent, new);
d4bf0065
BL
261 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
262 ring->persistent_gnt_c++;
263 atomic_inc(&ring->persistent_gnt_in_use);
c6cc142d 264 return 0;
0a8704a5
RPM
265}
266
d4bf0065 267static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
0a8704a5
RPM
268 grant_ref_t gref)
269{
270 struct persistent_gnt *data;
3f3aad5e 271 struct rb_node *node = NULL;
0a8704a5 272
d4bf0065 273 node = ring->persistent_gnts.rb_node;
0a8704a5
RPM
274 while (node) {
275 data = container_of(node, struct persistent_gnt, node);
276
277 if (gref < data->gnt)
278 node = node->rb_left;
279 else if (gref > data->gnt)
280 node = node->rb_right;
3f3aad5e 281 else {
d77ff24e 282 if (data->active) {
77387b82 283 pr_alert_ratelimited("requesting a grant already in use\n");
3f3aad5e
RPM
284 return NULL;
285 }
d77ff24e 286 data->active = true;
d4bf0065 287 atomic_inc(&ring->persistent_gnt_in_use);
0a8704a5 288 return data;
3f3aad5e 289 }
0a8704a5
RPM
290 }
291 return NULL;
292}
293
d4bf0065 294static void put_persistent_gnt(struct xen_blkif_ring *ring,
3f3aad5e
RPM
295 struct persistent_gnt *persistent_gnt)
296{
d77ff24e 297 if (!persistent_gnt->active)
77387b82 298 pr_alert_ratelimited("freeing a grant already unused\n");
973e5405 299 persistent_gnt->last_used = jiffies;
d77ff24e 300 persistent_gnt->active = false;
d4bf0065 301 atomic_dec(&ring->persistent_gnt_in_use);
3f3aad5e
RPM
302}
303
d4bf0065 304static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
c6cc142d 305 unsigned int num)
4d4f270f
RPM
306{
307 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
308 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
309 struct persistent_gnt *persistent_gnt;
7dc34117 310 struct rb_node *n;
4d4f270f 311 int segs_to_unmap = 0;
c43cf3ea 312 struct gntab_unmap_queue_data unmap_data;
c43cf3ea 313
c43cf3ea
JH
314 unmap_data.pages = pages;
315 unmap_data.unmap_ops = unmap;
316 unmap_data.kunmap_ops = NULL;
4d4f270f 317
7dc34117 318 foreach_grant_safe(persistent_gnt, n, root, node) {
4d4f270f
RPM
319 BUG_ON(persistent_gnt->handle ==
320 BLKBACK_INVALID_HANDLE);
321 gnttab_set_unmap_op(&unmap[segs_to_unmap],
322 (unsigned long) pfn_to_kaddr(page_to_pfn(
323 persistent_gnt->page)),
324 GNTMAP_host_map,
325 persistent_gnt->handle);
326
327 pages[segs_to_unmap] = persistent_gnt->page;
4d4f270f
RPM
328
329 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
330 !rb_next(&persistent_gnt->node)) {
c43cf3ea
JH
331
332 unmap_data.count = segs_to_unmap;
b44166cd 333 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
c43cf3ea 334
d4bf0065 335 put_free_pages(ring, pages, segs_to_unmap);
4d4f270f
RPM
336 segs_to_unmap = 0;
337 }
7dc34117
RPM
338
339 rb_erase(&persistent_gnt->node, root);
340 kfree(persistent_gnt);
341 num--;
4d4f270f
RPM
342 }
343 BUG_ON(num != 0);
344}
345
abb97b8c 346void xen_blkbk_unmap_purged_grants(struct work_struct *work)
3f3aad5e
RPM
347{
348 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
349 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
350 struct persistent_gnt *persistent_gnt;
325d73bf 351 int segs_to_unmap = 0;
d4bf0065 352 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
325d73bf 353 struct gntab_unmap_queue_data unmap_data;
325d73bf 354
325d73bf
BL
355 unmap_data.pages = pages;
356 unmap_data.unmap_ops = unmap;
357 unmap_data.kunmap_ops = NULL;
3f3aad5e 358
d4bf0065
BL
359 while(!list_empty(&ring->persistent_purge_list)) {
360 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
3f3aad5e
RPM
361 struct persistent_gnt,
362 remove_node);
363 list_del(&persistent_gnt->remove_node);
364
365 gnttab_set_unmap_op(&unmap[segs_to_unmap],
366 vaddr(persistent_gnt->page),
367 GNTMAP_host_map,
368 persistent_gnt->handle);
369
370 pages[segs_to_unmap] = persistent_gnt->page;
371
372 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
325d73bf 373 unmap_data.count = segs_to_unmap;
b44166cd 374 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
d4bf0065 375 put_free_pages(ring, pages, segs_to_unmap);
3f3aad5e
RPM
376 segs_to_unmap = 0;
377 }
378 kfree(persistent_gnt);
379 }
380 if (segs_to_unmap > 0) {
325d73bf 381 unmap_data.count = segs_to_unmap;
b44166cd 382 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
d4bf0065 383 put_free_pages(ring, pages, segs_to_unmap);
3f3aad5e
RPM
384 }
385}
386
d4bf0065 387static void purge_persistent_gnt(struct xen_blkif_ring *ring)
3f3aad5e
RPM
388{
389 struct persistent_gnt *persistent_gnt;
390 struct rb_node *n;
391 unsigned int num_clean, total;
973e5405 392 bool scan_used = false;
3f3aad5e
RPM
393 struct rb_root *root;
394
d4bf0065 395 if (work_busy(&ring->persistent_purge_work)) {
53bc7dc0 396 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
59795700 397 goto out;
3f3aad5e
RPM
398 }
399
973e5405
JG
400 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
401 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
402 !ring->blkif->vbd.overflow_max_grants)) {
403 num_clean = 0;
404 } else {
405 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
406 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
407 num_clean;
408 num_clean = min(ring->persistent_gnt_c, num_clean);
409 pr_debug("Going to purge at least %u persistent grants\n",
410 num_clean);
411 }
3f3aad5e
RPM
412
413 /*
414 * At this point, we can assure that there will be no calls
415 * to get_persistent_grant (because we are executing this code from
416 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
417 * which means that the number of currently used grants will go down,
418 * but never up, so we will always be able to remove the requested
419 * number of grants.
420 */
421
973e5405 422 total = 0;
3f3aad5e 423
d4bf0065
BL
424 BUG_ON(!list_empty(&ring->persistent_purge_list));
425 root = &ring->persistent_gnts;
3f3aad5e
RPM
426purge_list:
427 foreach_grant_safe(persistent_gnt, n, root, node) {
428 BUG_ON(persistent_gnt->handle ==
429 BLKBACK_INVALID_HANDLE);
430
d77ff24e 431 if (persistent_gnt->active)
3f3aad5e 432 continue;
973e5405
JG
433 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
434 continue;
435 if (scan_used && total >= num_clean)
3f3aad5e
RPM
436 continue;
437
438 rb_erase(&persistent_gnt->node, root);
439 list_add(&persistent_gnt->remove_node,
d4bf0065 440 &ring->persistent_purge_list);
973e5405 441 total++;
3f3aad5e
RPM
442 }
443 /*
973e5405 444 * Check whether we also need to start cleaning
3f3aad5e
RPM
445 * grants that were used since last purge in order to cope
446 * with the requested num
447 */
973e5405
JG
448 if (!scan_used && total < num_clean) {
449 pr_debug("Still missing %u purged frames\n", num_clean - total);
3f3aad5e
RPM
450 scan_used = true;
451 goto purge_list;
452 }
2d910543 453
973e5405
JG
454 if (total) {
455 ring->persistent_gnt_c -= total;
456 ring->blkif->vbd.overflow_max_grants = 0;
3f3aad5e 457
973e5405
JG
458 /* We can defer this work */
459 schedule_work(&ring->persistent_purge_work);
460 pr_debug("Purged %u/%u\n", num_clean, total);
461 }
59795700
BL
462
463out:
3f3aad5e
RPM
464 return;
465}
466
a1397fa3
KRW
467/*
468 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
4d05a28d 469 */
59795700 470static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
4d05a28d 471{
2e9977c2 472 struct pending_req *req = NULL;
4d05a28d
KRW
473 unsigned long flags;
474
59795700
BL
475 spin_lock_irqsave(&ring->pending_free_lock, flags);
476 if (!list_empty(&ring->pending_free)) {
477 req = list_entry(ring->pending_free.next, struct pending_req,
2e9977c2 478 free_list);
4d05a28d
KRW
479 list_del(&req->free_list);
480 }
59795700 481 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
4d05a28d
KRW
482 return req;
483}
484
a1397fa3
KRW
485/*
486 * Return the 'pending_req' structure back to the freepool. We also
487 * wake up the thread if it was waiting for a free page.
488 */
59795700 489static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
4d05a28d
KRW
490{
491 unsigned long flags;
492 int was_empty;
493
59795700
BL
494 spin_lock_irqsave(&ring->pending_free_lock, flags);
495 was_empty = list_empty(&ring->pending_free);
496 list_add(&req->free_list, &ring->pending_free);
497 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
4d05a28d 498 if (was_empty)
59795700 499 wake_up(&ring->pending_free_wq);
4d05a28d
KRW
500}
501
ee9ff853
KRW
502/*
503 * Routines for managing virtual block devices (vbds).
504 */
3d814731
KRW
505static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
506 int operation)
ee9ff853 507{
3d814731 508 struct xen_vbd *vbd = &blkif->vbd;
ee9ff853
KRW
509 int rc = -EACCES;
510
a022606e 511 if ((operation != REQ_OP_READ) && vbd->readonly)
ee9ff853
KRW
512 goto out;
513
8ab52150
JB
514 if (likely(req->nr_sects)) {
515 blkif_sector_t end = req->sector_number + req->nr_sects;
516
517 if (unlikely(end < req->sector_number))
518 goto out;
519 if (unlikely(end > vbd_sz(vbd)))
520 goto out;
521 }
ee9ff853
KRW
522
523 req->dev = vbd->pdevice;
524 req->bdev = vbd->bdev;
525 rc = 0;
526
527 out:
528 return rc;
529}
530
3d814731 531static void xen_vbd_resize(struct xen_blkif *blkif)
ee9ff853 532{
3d814731 533 struct xen_vbd *vbd = &blkif->vbd;
ee9ff853
KRW
534 struct xenbus_transaction xbt;
535 int err;
8b6bf747 536 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
42c7841d 537 unsigned long long new_size = vbd_sz(vbd);
ee9ff853 538
77387b82 539 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
ee9ff853 540 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
77387b82 541 pr_info("VBD Resize: new size %llu\n", new_size);
ee9ff853
KRW
542 vbd->size = new_size;
543again:
544 err = xenbus_transaction_start(&xbt);
545 if (err) {
77387b82 546 pr_warn("Error starting transaction\n");
ee9ff853
KRW
547 return;
548 }
549 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
42c7841d 550 (unsigned long long)vbd_sz(vbd));
ee9ff853 551 if (err) {
77387b82 552 pr_warn("Error writing new size\n");
ee9ff853
KRW
553 goto abort;
554 }
555 /*
556 * Write the current state; we will use this to synchronize
557 * the front-end. If the current state is "connected" the
558 * front-end will get the new size information online.
559 */
560 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
561 if (err) {
77387b82 562 pr_warn("Error writing the state\n");
ee9ff853
KRW
563 goto abort;
564 }
565
566 err = xenbus_transaction_end(xbt, 0);
567 if (err == -EAGAIN)
568 goto again;
569 if (err)
77387b82 570 pr_warn("Error ending transaction\n");
496b318e 571 return;
ee9ff853
KRW
572abort:
573 xenbus_transaction_end(xbt, 1);
574}
575
a1397fa3 576/*
b0aef179
KRW
577 * Notification from the guest OS.
578 */
59795700 579static void blkif_notify_work(struct xen_blkif_ring *ring)
4d05a28d 580{
59795700
BL
581 ring->waiting_reqs = 1;
582 wake_up(&ring->wq);
b0aef179 583}
4d05a28d 584
8b6bf747 585irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
b0aef179
KRW
586{
587 blkif_notify_work(dev_id);
588 return IRQ_HANDLED;
4d05a28d
KRW
589}
590
2e9977c2 591/*
4d05a28d
KRW
592 * SCHEDULER FUNCTIONS
593 */
594
d4bf0065 595static void print_stats(struct xen_blkif_ring *ring)
4d05a28d 596{
77387b82 597 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
3f3aad5e 598 " | ds %4llu | pg: %4u/%4d\n",
db6fbc10
BL
599 current->comm, ring->st_oo_req,
600 ring->st_rd_req, ring->st_wr_req,
601 ring->st_f_req, ring->st_ds_req,
d4bf0065 602 ring->persistent_gnt_c,
3f3aad5e 603 xen_blkif_max_pgrants);
db6fbc10
BL
604 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
605 ring->st_rd_req = 0;
606 ring->st_wr_req = 0;
607 ring->st_oo_req = 0;
608 ring->st_ds_req = 0;
4d05a28d
KRW
609}
610
8b6bf747 611int xen_blkif_schedule(void *arg)
4d05a28d 612{
59795700
BL
613 struct xen_blkif_ring *ring = arg;
614 struct xen_blkif *blkif = ring->blkif;
3d814731 615 struct xen_vbd *vbd = &blkif->vbd;
3f3aad5e 616 unsigned long timeout;
8e3f8755 617 int ret;
4d05a28d 618
a6e7af12 619 set_freezable();
4d05a28d
KRW
620 while (!kthread_should_stop()) {
621 if (try_to_freeze())
622 continue;
42c7841d 623 if (unlikely(vbd->size != vbd_sz(vbd)))
3d814731 624 xen_vbd_resize(blkif);
4d05a28d 625
3f3aad5e
RPM
626 timeout = msecs_to_jiffies(LRU_INTERVAL);
627
628 timeout = wait_event_interruptible_timeout(
59795700
BL
629 ring->wq,
630 ring->waiting_reqs || kthread_should_stop(),
3f3aad5e
RPM
631 timeout);
632 if (timeout == 0)
633 goto purge_gnt_list;
634 timeout = wait_event_interruptible_timeout(
59795700
BL
635 ring->pending_free_wq,
636 !list_empty(&ring->pending_free) ||
3f3aad5e
RPM
637 kthread_should_stop(),
638 timeout);
639 if (timeout == 0)
640 goto purge_gnt_list;
4d05a28d 641
59795700 642 ring->waiting_reqs = 0;
4d05a28d
KRW
643 smp_mb(); /* clear flag *before* checking for work */
644
59795700 645 ret = do_block_io_op(ring);
8e3f8755 646 if (ret > 0)
59795700 647 ring->waiting_reqs = 1;
8e3f8755 648 if (ret == -EACCES)
59795700 649 wait_event_interruptible(ring->shutdown_wq,
8e3f8755 650 kthread_should_stop());
4d05a28d 651
3f3aad5e
RPM
652purge_gnt_list:
653 if (blkif->vbd.feature_gnt_persistent &&
d4bf0065
BL
654 time_after(jiffies, ring->next_lru)) {
655 purge_persistent_gnt(ring);
656 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
3f3aad5e
RPM
657 }
658
c6cc142d 659 /* Shrink if we have more than xen_blkif_max_buffer_pages */
d4bf0065 660 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
c6cc142d 661
db6fbc10 662 if (log_stats && time_after(jiffies, ring->st_print))
d4bf0065 663 print_stats(ring);
4d05a28d
KRW
664 }
665
ef753411 666 /* Drain pending purge work */
d4bf0065 667 flush_work(&ring->persistent_purge_work);
c6cc142d 668
ef753411 669 if (log_stats)
d4bf0065 670 print_stats(ring);
ef753411 671
59795700 672 ring->xenblkd = NULL;
ef753411
RPM
673
674 return 0;
675}
676
677/*
678 * Remove persistent grants and empty the pool of free pages
679 */
59795700 680void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
ef753411 681{
0a8704a5 682 /* Free all persistent grant pages */
d4bf0065
BL
683 if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
684 free_persistent_gnts(ring, &ring->persistent_gnts,
685 ring->persistent_gnt_c);
0a8704a5 686
d4bf0065
BL
687 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
688 ring->persistent_gnt_c = 0;
0a8704a5 689
2ed22e3c 690 /* Since we are shutting down remove all pages from the buffer */
d4bf0065 691 shrink_free_pagepool(ring, 0 /* All */);
4d05a28d
KRW
692}
693
c43cf3ea 694static unsigned int xen_blkbk_unmap_prepare(
59795700 695 struct xen_blkif_ring *ring,
c43cf3ea
JH
696 struct grant_page **pages,
697 unsigned int num,
698 struct gnttab_unmap_grant_ref *unmap_ops,
699 struct page **unmap_pages)
b0aef179 700{
b0aef179 701 unsigned int i, invcount = 0;
b0aef179 702
31552ee3 703 for (i = 0; i < num; i++) {
bb642e83 704 if (pages[i]->persistent_gnt != NULL) {
d4bf0065 705 put_persistent_gnt(ring, pages[i]->persistent_gnt);
0a8704a5 706 continue;
3f3aad5e 707 }
bb642e83 708 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
b0aef179 709 continue;
bb642e83 710 unmap_pages[invcount] = pages[i]->page;
c43cf3ea 711 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
bb642e83
RPM
712 GNTMAP_host_map, pages[i]->handle);
713 pages[i]->handle = BLKBACK_INVALID_HANDLE;
c43cf3ea 714 invcount++;
306b82a8 715 }
c43cf3ea 716
306b82a8 717 return invcount;
c43cf3ea
JH
718}
719
720static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
721{
59795700
BL
722 struct pending_req *pending_req = (struct pending_req *)(data->data);
723 struct xen_blkif_ring *ring = pending_req->ring;
724 struct xen_blkif *blkif = ring->blkif;
c43cf3ea
JH
725
726 /* BUG_ON used to reproduce existing behaviour,
727 but is this the best way to deal with this? */
728 BUG_ON(result);
729
d4bf0065 730 put_free_pages(ring, data->pages, data->count);
59795700 731 make_response(ring, pending_req->id,
c43cf3ea 732 pending_req->operation, pending_req->status);
59795700 733 free_req(ring, pending_req);
c43cf3ea
JH
734 /*
735 * Make sure the request is freed before releasing blkif,
736 * or there could be a race between free_req and the
737 * cleanup done in xen_blkif_free during shutdown.
738 *
739 * NB: The fact that we might try to wake up pending_free_wq
740 * before drain_complete (in case there's a drain going on)
741 * it's not a problem with our current implementation
742 * because we can assure there's no thread waiting on
743 * pending_free_wq if there's a drain going on, but it has
744 * to be taken into account if the current model is changed.
745 */
59795700 746 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
c43cf3ea
JH
747 complete(&blkif->drain_complete);
748 }
749 xen_blkif_put(blkif);
750}
751
752static void xen_blkbk_unmap_and_respond(struct pending_req *req)
753{
754 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
59795700 755 struct xen_blkif_ring *ring = req->ring;
c43cf3ea
JH
756 struct grant_page **pages = req->segments;
757 unsigned int invcount;
758
59795700 759 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
c43cf3ea
JH
760 req->unmap, req->unmap_pages);
761
762 work->data = req;
763 work->done = xen_blkbk_unmap_and_respond_callback;
764 work->unmap_ops = req->unmap;
765 work->kunmap_ops = NULL;
766 work->pages = req->unmap_pages;
767 work->count = invcount;
768
769 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
770}
771
772
773/*
774 * Unmap the grant references.
775 *
776 * This could accumulate ops up to the batch size to reduce the number
777 * of hypercalls, but since this is only used in error paths there's
778 * no real need.
779 */
59795700 780static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
c43cf3ea
JH
781 struct grant_page *pages[],
782 int num)
783{
784 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
785 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
786 unsigned int invcount = 0;
787 int ret;
788
789 while (num) {
790 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
59795700
BL
791
792 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
c43cf3ea
JH
793 unmap, unmap_pages);
794 if (invcount) {
795 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
31552ee3 796 BUG_ON(ret);
d4bf0065 797 put_free_pages(ring, unmap_pages, invcount);
31552ee3 798 }
c43cf3ea
JH
799 pages += batch;
800 num -= batch;
b0aef179 801 }
b0aef179 802}
01f37f2d 803
59795700 804static int xen_blkbk_map(struct xen_blkif_ring *ring,
bb642e83 805 struct grant_page *pages[],
31552ee3 806 int num, bool ro)
1a95fe6e
KRW
807{
808 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
0a8704a5
RPM
809 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
810 struct persistent_gnt *persistent_gnt = NULL;
0a8704a5 811 phys_addr_t addr = 0;
c6cc142d 812 int i, seg_idx, new_map_idx;
0a8704a5 813 int segs_to_map = 0;
1a95fe6e 814 int ret = 0;
31552ee3 815 int last_map = 0, map_until = 0;
0a8704a5 816 int use_persistent_gnts;
59795700 817 struct xen_blkif *blkif = ring->blkif;
0a8704a5
RPM
818
819 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
820
01f37f2d
KRW
821 /*
822 * Fill out preq.nr_sects with proper amount of sectors, and setup
1a95fe6e
KRW
823 * assign map[..] with the PFN of the page in our domain with the
824 * corresponding grant reference for each page.
825 */
31552ee3
RPM
826again:
827 for (i = map_until; i < num; i++) {
1a95fe6e
KRW
828 uint32_t flags;
829
59795700 830 if (use_persistent_gnts) {
0a8704a5 831 persistent_gnt = get_persistent_gnt(
d4bf0065 832 ring,
bb642e83 833 pages[i]->gref);
59795700 834 }
0a8704a5
RPM
835
836 if (persistent_gnt) {
837 /*
838 * We are using persistent grants and
839 * the grant is already mapped
840 */
bb642e83
RPM
841 pages[i]->page = persistent_gnt->page;
842 pages[i]->persistent_gnt = persistent_gnt;
0a8704a5 843 } else {
d4bf0065 844 if (get_free_page(ring, &pages[i]->page))
c6cc142d 845 goto out_of_memory;
bb642e83
RPM
846 addr = vaddr(pages[i]->page);
847 pages_to_gnt[segs_to_map] = pages[i]->page;
848 pages[i]->persistent_gnt = NULL;
0a8704a5 849 flags = GNTMAP_host_map;
31552ee3 850 if (!use_persistent_gnts && ro)
0a8704a5
RPM
851 flags |= GNTMAP_readonly;
852 gnttab_set_map_op(&map[segs_to_map++], addr,
bb642e83 853 flags, pages[i]->gref,
0a8704a5
RPM
854 blkif->domid);
855 }
31552ee3
RPM
856 map_until = i + 1;
857 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
858 break;
1a95fe6e
KRW
859 }
860
0a8704a5
RPM
861 if (segs_to_map) {
862 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
863 BUG_ON(ret);
864 }
1a95fe6e 865
01f37f2d
KRW
866 /*
867 * Now swizzle the MFN in our domain with the MFN from the other domain
1a95fe6e
KRW
868 * so that when we access vaddr(pending_req,i) it has the contents of
869 * the page from the other domain.
870 */
31552ee3 871 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
bb642e83 872 if (!pages[seg_idx]->persistent_gnt) {
0a8704a5 873 /* This is a newly mapped grant */
c6cc142d
RPM
874 BUG_ON(new_map_idx >= segs_to_map);
875 if (unlikely(map[new_map_idx].status != 0)) {
77387b82 876 pr_debug("invalid buffer -- could not remap it\n");
d4bf0065 877 put_free_pages(ring, &pages[seg_idx]->page, 1);
bb642e83 878 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
0a8704a5 879 ret |= 1;
31552ee3 880 goto next;
0a8704a5 881 }
bb642e83 882 pages[seg_idx]->handle = map[new_map_idx].handle;
c6cc142d 883 } else {
31552ee3 884 continue;
0a8704a5 885 }
c6cc142d 886 if (use_persistent_gnts &&
d4bf0065 887 ring->persistent_gnt_c < xen_blkif_max_pgrants) {
c6cc142d
RPM
888 /*
889 * We are using persistent grants, the grant is
3f3aad5e 890 * not mapped but we might have room for it.
c6cc142d
RPM
891 */
892 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
893 GFP_KERNEL);
894 if (!persistent_gnt) {
0a8704a5 895 /*
c6cc142d
RPM
896 * If we don't have enough memory to
897 * allocate the persistent_gnt struct
898 * map this grant non-persistenly
0a8704a5 899 */
31552ee3 900 goto next;
0a8704a5 901 }
c6cc142d
RPM
902 persistent_gnt->gnt = map[new_map_idx].ref;
903 persistent_gnt->handle = map[new_map_idx].handle;
bb642e83 904 persistent_gnt->page = pages[seg_idx]->page;
d4bf0065 905 if (add_persistent_gnt(ring,
c6cc142d
RPM
906 persistent_gnt)) {
907 kfree(persistent_gnt);
908 persistent_gnt = NULL;
31552ee3 909 goto next;
c6cc142d 910 }
bb642e83 911 pages[seg_idx]->persistent_gnt = persistent_gnt;
77387b82 912 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
d4bf0065 913 persistent_gnt->gnt, ring->persistent_gnt_c,
3f3aad5e 914 xen_blkif_max_pgrants);
c6cc142d
RPM
915 goto next;
916 }
917 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
918 blkif->vbd.overflow_max_grants = 1;
77387b82 919 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
c6cc142d 920 blkif->domid, blkif->vbd.handle);
1a95fe6e 921 }
c6cc142d
RPM
922 /*
923 * We could not map this grant persistently, so use it as
924 * a non-persistent grant.
925 */
c6cc142d 926next:
31552ee3 927 new_map_idx++;
1a95fe6e 928 }
31552ee3
RPM
929 segs_to_map = 0;
930 last_map = map_until;
931 if (map_until != num)
932 goto again;
933
1a95fe6e 934 return ret;
c6cc142d
RPM
935
936out_of_memory:
77387b82 937 pr_alert("%s: out of memory\n", __func__);
d4bf0065 938 put_free_pages(ring, pages_to_gnt, segs_to_map);
c6cc142d 939 return -ENOMEM;
1a95fe6e
KRW
940}
941
bb642e83 942static int xen_blkbk_map_seg(struct pending_req *pending_req)
31552ee3 943{
402b27f9 944 int rc;
31552ee3 945
59795700 946 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
6684fa1c 947 pending_req->nr_segs,
31552ee3 948 (pending_req->operation != BLKIF_OP_READ));
31552ee3 949
402b27f9
RPM
950 return rc;
951}
31552ee3 952
402b27f9
RPM
953static int xen_blkbk_parse_indirect(struct blkif_request *req,
954 struct pending_req *pending_req,
955 struct seg_buf seg[],
956 struct phys_req *preq)
957{
bb642e83 958 struct grant_page **pages = pending_req->indirect_pages;
59795700 959 struct xen_blkif_ring *ring = pending_req->ring;
402b27f9 960 int indirect_grefs, rc, n, nseg, i;
80bfa2f6 961 struct blkif_request_segment *segments = NULL;
402b27f9 962
6684fa1c 963 nseg = pending_req->nr_segs;
402b27f9
RPM
964 indirect_grefs = INDIRECT_PAGES(nseg);
965 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
966
bb642e83
RPM
967 for (i = 0; i < indirect_grefs; i++)
968 pages[i]->gref = req->u.indirect.indirect_grefs[i];
969
59795700 970 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
402b27f9
RPM
971 if (rc)
972 goto unmap;
973
974 for (n = 0, i = 0; n < nseg; n++) {
18779149
RPM
975 uint8_t first_sect, last_sect;
976
402b27f9
RPM
977 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
978 /* Map indirect segments */
979 if (segments)
980 kunmap_atomic(segments);
bb642e83 981 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
402b27f9
RPM
982 }
983 i = n % SEGS_PER_INDIRECT_FRAME;
18779149 984
bb642e83 985 pending_req->segments[n]->gref = segments[i].gref;
18779149
RPM
986
987 first_sect = READ_ONCE(segments[i].first_sect);
988 last_sect = READ_ONCE(segments[i].last_sect);
989 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
402b27f9
RPM
990 rc = -EINVAL;
991 goto unmap;
992 }
18779149
RPM
993
994 seg[n].nsec = last_sect - first_sect + 1;
995 seg[n].offset = first_sect << 9;
402b27f9
RPM
996 preq->nr_sects += seg[n].nsec;
997 }
998
999unmap:
1000 if (segments)
1001 kunmap_atomic(segments);
59795700 1002 xen_blkbk_unmap(ring, pages, indirect_grefs);
402b27f9 1003 return rc;
31552ee3
RPM
1004}
1005
59795700 1006static int dispatch_discard_io(struct xen_blkif_ring *ring,
42146352 1007 struct blkif_request *req)
b3cb0d6a
LD
1008{
1009 int err = 0;
1010 int status = BLKIF_RSP_OKAY;
59795700 1011 struct xen_blkif *blkif = ring->blkif;
b3cb0d6a 1012 struct block_device *bdev = blkif->vbd.bdev;
4dae7670 1013 unsigned long secure;
604c499c 1014 struct phys_req preq;
b3cb0d6a 1015
ea5ec76d
VN
1016 xen_blkif_get(blkif);
1017
604c499c
KRW
1018 preq.sector_number = req->u.discard.sector_number;
1019 preq.nr_sects = req->u.discard.nr_sectors;
1020
a022606e 1021 err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
604c499c 1022 if (err) {
77387b82 1023 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
604c499c
KRW
1024 preq.sector_number,
1025 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1026 goto fail_response;
1027 }
db6fbc10 1028 ring->st_ds_req++;
42146352 1029
4dae7670
KRW
1030 secure = (blkif->vbd.discard_secure &&
1031 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1032 BLKDEV_DISCARD_SECURE : 0;
1033
1034 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1035 req->u.discard.nr_sectors,
1036 GFP_KERNEL, secure);
604c499c 1037fail_response:
b3cb0d6a 1038 if (err == -EOPNOTSUPP) {
77387b82 1039 pr_debug("discard op failed, not supported\n");
b3cb0d6a
LD
1040 status = BLKIF_RSP_EOPNOTSUPP;
1041 } else if (err)
1042 status = BLKIF_RSP_ERROR;
1043
59795700 1044 make_response(ring, req->u.discard.id, req->operation, status);
42146352
KRW
1045 xen_blkif_put(blkif);
1046 return err;
b3cb0d6a
LD
1047}
1048
59795700 1049static int dispatch_other_io(struct xen_blkif_ring *ring,
0e367ae4
DV
1050 struct blkif_request *req,
1051 struct pending_req *pending_req)
1052{
59795700
BL
1053 free_req(ring, pending_req);
1054 make_response(ring, req->u.other.id, req->operation,
0e367ae4
DV
1055 BLKIF_RSP_EOPNOTSUPP);
1056 return -EIO;
1057}
1058
59795700 1059static void xen_blk_drain_io(struct xen_blkif_ring *ring)
29bde093 1060{
59795700
BL
1061 struct xen_blkif *blkif = ring->blkif;
1062
29bde093
KRW
1063 atomic_set(&blkif->drain, 1);
1064 do {
59795700 1065 if (atomic_read(&ring->inflight) == 0)
6927d920 1066 break;
29bde093
KRW
1067 wait_for_completion_interruptible_timeout(
1068 &blkif->drain_complete, HZ);
1069
1070 if (!atomic_read(&blkif->drain))
1071 break;
29bde093
KRW
1072 } while (!kthread_should_stop());
1073 atomic_set(&blkif->drain, 0);
1074}
1075
4e4cbee9
CH
1076static void __end_block_io_op(struct pending_req *pending_req,
1077 blk_status_t error)
4d05a28d
KRW
1078{
1079 /* An error fails the entire request. */
4e4cbee9
CH
1080 if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1081 error == BLK_STS_NOTSUPP) {
77387b82 1082 pr_debug("flush diskcache op failed, not supported\n");
59795700 1083 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
4d05a28d 1084 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
4e4cbee9
CH
1085 } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1086 error == BLK_STS_NOTSUPP) {
77387b82 1087 pr_debug("write barrier op failed, not supported\n");
59795700 1088 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
29bde093 1089 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
4d05a28d 1090 } else if (error) {
77387b82 1091 pr_debug("Buffer not up-to-date at end of operation,"
ebe81906 1092 " error=%d\n", error);
4d05a28d
KRW
1093 pending_req->status = BLKIF_RSP_ERROR;
1094 }
1095
01f37f2d
KRW
1096 /*
1097 * If all of the bio's have completed it is time to unmap
a1397fa3 1098 * the grant references associated with 'request' and provide
2e9977c2
KRW
1099 * the proper response on the ring.
1100 */
c43cf3ea
JH
1101 if (atomic_dec_and_test(&pending_req->pendcnt))
1102 xen_blkbk_unmap_and_respond(pending_req);
4d05a28d
KRW
1103}
1104
a1397fa3
KRW
1105/*
1106 * bio callback.
1107 */
4246a0b6 1108static void end_block_io_op(struct bio *bio)
4d05a28d 1109{
4e4cbee9 1110 __end_block_io_op(bio->bi_private, bio->bi_status);
4d05a28d 1111 bio_put(bio);
4d05a28d
KRW
1112}
1113
1114
4d05a28d 1115
a1397fa3
KRW
1116/*
1117 * Function to copy the from the ring buffer the 'struct blkif_request'
1118 * (which has the sectors we want, number of them, grant references, etc),
1119 * and transmute it to the block API to hand it over to the proper block disk.
4d05a28d 1120 */
b4726a9d 1121static int
59795700 1122__do_block_io_op(struct xen_blkif_ring *ring)
4d05a28d 1123{
59795700 1124 union blkif_back_rings *blk_rings = &ring->blk_rings;
88122933 1125 struct blkif_request req;
2e9977c2 1126 struct pending_req *pending_req;
4d05a28d
KRW
1127 RING_IDX rc, rp;
1128 int more_to_do = 0;
1129
1130 rc = blk_rings->common.req_cons;
1131 rp = blk_rings->common.sring->req_prod;
1132 rmb(); /* Ensure we see queued requests up to 'rp'. */
1133
8e3f8755
KRW
1134 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1135 rc = blk_rings->common.rsp_prod_pvt;
77387b82 1136 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
59795700 1137 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
8e3f8755
KRW
1138 return -EACCES;
1139 }
4d05a28d
KRW
1140 while (rc != rp) {
1141
1142 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1143 break;
1144
8270b45b 1145 if (kthread_should_stop()) {
4d05a28d
KRW
1146 more_to_do = 1;
1147 break;
1148 }
1149
59795700 1150 pending_req = alloc_req(ring);
8270b45b 1151 if (NULL == pending_req) {
db6fbc10 1152 ring->st_oo_req++;
4d05a28d
KRW
1153 more_to_do = 1;
1154 break;
1155 }
1156
59795700 1157 switch (ring->blkif->blk_protocol) {
4d05a28d
KRW
1158 case BLKIF_PROTOCOL_NATIVE:
1159 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1160 break;
1161 case BLKIF_PROTOCOL_X86_32:
1162 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1163 break;
1164 case BLKIF_PROTOCOL_X86_64:
1165 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1166 break;
1167 default:
1168 BUG();
1169 }
1170 blk_rings->common.req_cons = ++rc; /* before make_response() */
1171
1172 /* Apply all sanity checks to /private copy/ of request. */
1173 barrier();
0e367ae4
DV
1174
1175 switch (req.operation) {
1176 case BLKIF_OP_READ:
1177 case BLKIF_OP_WRITE:
1178 case BLKIF_OP_WRITE_BARRIER:
1179 case BLKIF_OP_FLUSH_DISKCACHE:
402b27f9 1180 case BLKIF_OP_INDIRECT:
59795700 1181 if (dispatch_rw_block_io(ring, &req, pending_req))
0e367ae4
DV
1182 goto done;
1183 break;
1184 case BLKIF_OP_DISCARD:
59795700
BL
1185 free_req(ring, pending_req);
1186 if (dispatch_discard_io(ring, &req))
0e367ae4 1187 goto done;
4d05a28d 1188 break;
0e367ae4 1189 default:
59795700 1190 if (dispatch_other_io(ring, &req, pending_req))
0e367ae4
DV
1191 goto done;
1192 break;
1193 }
4d05a28d
KRW
1194
1195 /* Yield point for this unbounded loop. */
1196 cond_resched();
1197 }
0e367ae4 1198done:
4d05a28d
KRW
1199 return more_to_do;
1200}
1201
b4726a9d 1202static int
59795700 1203do_block_io_op(struct xen_blkif_ring *ring)
b4726a9d 1204{
59795700 1205 union blkif_back_rings *blk_rings = &ring->blk_rings;
b4726a9d
DS
1206 int more_to_do;
1207
1208 do {
59795700 1209 more_to_do = __do_block_io_op(ring);
b4726a9d
DS
1210 if (more_to_do)
1211 break;
1212
1213 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1214 } while (more_to_do);
1215
1216 return more_to_do;
1217}
a1397fa3 1218/*
01f37f2d
KRW
1219 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1220 * and call the 'submit_bio' to pass it to the underlying storage.
a1397fa3 1221 */
59795700 1222static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
30fd1502
KRW
1223 struct blkif_request *req,
1224 struct pending_req *pending_req)
4d05a28d 1225{
4d05a28d 1226 struct phys_req preq;
402b27f9 1227 struct seg_buf *seg = pending_req->seg;
4d05a28d
KRW
1228 unsigned int nseg;
1229 struct bio *bio = NULL;
402b27f9 1230 struct bio **biolist = pending_req->biolist;
1a95fe6e 1231 int i, nbio = 0;
4d05a28d 1232 int operation;
a022606e 1233 int operation_flags = 0;
a19be5f0 1234 struct blk_plug plug;
29bde093 1235 bool drain = false;
bb642e83 1236 struct grant_page **pages = pending_req->segments;
402b27f9
RPM
1237 unsigned short req_operation;
1238
1239 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1240 req->u.indirect.indirect_op : req->operation;
67de5dfb 1241
402b27f9
RPM
1242 if ((req->operation == BLKIF_OP_INDIRECT) &&
1243 (req_operation != BLKIF_OP_READ) &&
1244 (req_operation != BLKIF_OP_WRITE)) {
77387b82 1245 pr_debug("Invalid indirect operation (%u)\n", req_operation);
402b27f9
RPM
1246 goto fail_response;
1247 }
4d05a28d 1248
402b27f9 1249 switch (req_operation) {
4d05a28d 1250 case BLKIF_OP_READ:
db6fbc10 1251 ring->st_rd_req++;
a022606e 1252 operation = REQ_OP_READ;
4d05a28d
KRW
1253 break;
1254 case BLKIF_OP_WRITE:
db6fbc10 1255 ring->st_wr_req++;
a022606e 1256 operation = REQ_OP_WRITE;
70fd7614 1257 operation_flags = REQ_SYNC | REQ_IDLE;
4d05a28d 1258 break;
29bde093
KRW
1259 case BLKIF_OP_WRITE_BARRIER:
1260 drain = true;
3f2c9405 1261 /* fall through */
24f567f9 1262 case BLKIF_OP_FLUSH_DISKCACHE:
db6fbc10 1263 ring->st_f_req++;
a022606e 1264 operation = REQ_OP_WRITE;
70fd7614 1265 operation_flags = REQ_PREFLUSH;
4d05a28d
KRW
1266 break;
1267 default:
1268 operation = 0; /* make gcc happy */
fc53bf75
KRW
1269 goto fail_response;
1270 break;
4d05a28d
KRW
1271 }
1272
42146352 1273 /* Check that the number of segments is sane. */
402b27f9
RPM
1274 nseg = req->operation == BLKIF_OP_INDIRECT ?
1275 req->u.indirect.nr_segments : req->u.rw.nr_segments;
97e36834 1276
70fd7614 1277 if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
402b27f9
RPM
1278 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1279 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1280 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1281 (nseg > MAX_INDIRECT_SEGMENTS))) {
77387b82 1282 pr_debug("Bad number of segments in request (%d)\n", nseg);
1a95fe6e 1283 /* Haven't submitted any bio's yet. */
4d05a28d
KRW
1284 goto fail_response;
1285 }
1286
4d05a28d
KRW
1287 preq.nr_sects = 0;
1288
59795700 1289 pending_req->ring = ring;
97e36834 1290 pending_req->id = req->u.rw.id;
402b27f9 1291 pending_req->operation = req_operation;
4d05a28d 1292 pending_req->status = BLKIF_RSP_OKAY;
6684fa1c 1293 pending_req->nr_segs = nseg;
e9350493 1294
402b27f9
RPM
1295 if (req->operation != BLKIF_OP_INDIRECT) {
1296 preq.dev = req->u.rw.handle;
1297 preq.sector_number = req->u.rw.sector_number;
1298 for (i = 0; i < nseg; i++) {
bb642e83 1299 pages[i]->gref = req->u.rw.seg[i].gref;
402b27f9
RPM
1300 seg[i].nsec = req->u.rw.seg[i].last_sect -
1301 req->u.rw.seg[i].first_sect + 1;
1302 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
67de5dfb 1303 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
402b27f9
RPM
1304 (req->u.rw.seg[i].last_sect <
1305 req->u.rw.seg[i].first_sect))
1306 goto fail_response;
1307 preq.nr_sects += seg[i].nsec;
1308 }
1309 } else {
1310 preq.dev = req->u.indirect.handle;
1311 preq.sector_number = req->u.indirect.sector_number;
1312 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
4d05a28d 1313 goto fail_response;
4d05a28d
KRW
1314 }
1315
59795700 1316 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
77387b82 1317 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
a022606e 1318 operation == REQ_OP_READ ? "read" : "write",
ebe81906 1319 preq.sector_number,
a72d9002 1320 preq.sector_number + preq.nr_sects,
59795700 1321 ring->blkif->vbd.pdevice);
1a95fe6e 1322 goto fail_response;
4d05a28d 1323 }
01f37f2d
KRW
1324
1325 /*
3d814731 1326 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
01f37f2d
KRW
1327 * is set there.
1328 */
e9350493
KRW
1329 for (i = 0; i < nseg; i++) {
1330 if (((int)preq.sector_number|(int)seg[i].nsec) &
1331 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
77387b82 1332 pr_debug("Misaligned I/O request from domain %d\n",
59795700 1333 ring->blkif->domid);
e9350493
KRW
1334 goto fail_response;
1335 }
1336 }
01f37f2d 1337
29bde093 1338 /* Wait on all outstanding I/O's and once that has been completed
70fd7614 1339 * issue the flush.
29bde093
KRW
1340 */
1341 if (drain)
59795700 1342 xen_blk_drain_io(pending_req->ring);
29bde093 1343
01f37f2d
KRW
1344 /*
1345 * If we have failed at this point, we need to undo the M2P override,
2e9977c2
KRW
1346 * set gnttab_set_unmap_op on all of the grant references and perform
1347 * the hypercall to unmap the grants - that is all done in
9f3aedf5 1348 * xen_blkbk_unmap.
2e9977c2 1349 */
bb642e83 1350 if (xen_blkbk_map_seg(pending_req))
4d05a28d
KRW
1351 goto fail_flush;
1352
b3cb0d6a
LD
1353 /*
1354 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1355 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1356 */
59795700
BL
1357 xen_blkif_get(ring->blkif);
1358 atomic_inc(&ring->inflight);
4d05a28d
KRW
1359
1360 for (i = 0; i < nseg; i++) {
4d05a28d
KRW
1361 while ((bio == NULL) ||
1362 (bio_add_page(bio,
bb642e83 1363 pages[i]->page,
4d05a28d 1364 seg[i].nsec << 9,
ffb1dabd 1365 seg[i].offset) == 0)) {
2e9977c2 1366
1e0f7a21
RPM
1367 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1368 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
4d05a28d
KRW
1369 if (unlikely(bio == NULL))
1370 goto fail_put_bio;
1371
03e0edf9 1372 biolist[nbio++] = bio;
74d46992 1373 bio_set_dev(bio, preq.bdev);
4d05a28d
KRW
1374 bio->bi_private = pending_req;
1375 bio->bi_end_io = end_block_io_op;
4f024f37 1376 bio->bi_iter.bi_sector = preq.sector_number;
a022606e 1377 bio_set_op_attrs(bio, operation, operation_flags);
4d05a28d
KRW
1378 }
1379
1380 preq.sector_number += seg[i].nsec;
1381 }
1382
b3cb0d6a 1383 /* This will be hit if the operation was a flush or discard. */
4d05a28d 1384 if (!bio) {
70fd7614 1385 BUG_ON(operation_flags != REQ_PREFLUSH);
b0f80127 1386
42146352
KRW
1387 bio = bio_alloc(GFP_KERNEL, 0);
1388 if (unlikely(bio == NULL))
1389 goto fail_put_bio;
4d05a28d 1390
42146352 1391 biolist[nbio++] = bio;
74d46992 1392 bio_set_dev(bio, preq.bdev);
42146352
KRW
1393 bio->bi_private = pending_req;
1394 bio->bi_end_io = end_block_io_op;
a022606e 1395 bio_set_op_attrs(bio, operation, operation_flags);
4d05a28d
KRW
1396 }
1397
77089926 1398 atomic_set(&pending_req->pendcnt, nbio);
a19be5f0
KRW
1399 blk_start_plug(&plug);
1400
77089926 1401 for (i = 0; i < nbio; i++)
4e49ea4a 1402 submit_bio(biolist[i]);
77089926 1403
a19be5f0 1404 /* Let the I/Os go.. */
3d68b399 1405 blk_finish_plug(&plug);
a19be5f0 1406
a022606e 1407 if (operation == REQ_OP_READ)
db6fbc10 1408 ring->st_rd_sect += preq.nr_sects;
a022606e 1409 else if (operation == REQ_OP_WRITE)
db6fbc10 1410 ring->st_wr_sect += preq.nr_sects;
4d05a28d 1411
fc53bf75 1412 return 0;
4d05a28d
KRW
1413
1414 fail_flush:
59795700 1415 xen_blkbk_unmap(ring, pending_req->segments,
6684fa1c 1416 pending_req->nr_segs);
4d05a28d 1417 fail_response:
0faa8cca 1418 /* Haven't submitted any bio's yet. */
59795700
BL
1419 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1420 free_req(ring, pending_req);
4d05a28d 1421 msleep(1); /* back off a bit */
fc53bf75 1422 return -EIO;
4d05a28d
KRW
1423
1424 fail_put_bio:
03e0edf9 1425 for (i = 0; i < nbio; i++)
77089926 1426 bio_put(biolist[i]);
0e5e098a 1427 atomic_set(&pending_req->pendcnt, 1);
4e4cbee9 1428 __end_block_io_op(pending_req, BLK_STS_RESOURCE);
4d05a28d 1429 msleep(1); /* back off a bit */
fc53bf75 1430 return -EIO;
4d05a28d
KRW
1431}
1432
1433
1434
a1397fa3
KRW
1435/*
1436 * Put a response on the ring on how the operation fared.
4d05a28d 1437 */
59795700 1438static void make_response(struct xen_blkif_ring *ring, u64 id,
4d05a28d
KRW
1439 unsigned short op, int st)
1440{
089bc014 1441 struct blkif_response *resp;
4d05a28d 1442 unsigned long flags;
59795700 1443 union blkif_back_rings *blk_rings;
4d05a28d
KRW
1444 int notify;
1445
59795700
BL
1446 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1447 blk_rings = &ring->blk_rings;
4d05a28d 1448 /* Place on the response ring for the relevant domain. */
59795700 1449 switch (ring->blkif->blk_protocol) {
4d05a28d 1450 case BLKIF_PROTOCOL_NATIVE:
089bc014
JB
1451 resp = RING_GET_RESPONSE(&blk_rings->native,
1452 blk_rings->native.rsp_prod_pvt);
4d05a28d
KRW
1453 break;
1454 case BLKIF_PROTOCOL_X86_32:
089bc014
JB
1455 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1456 blk_rings->x86_32.rsp_prod_pvt);
4d05a28d
KRW
1457 break;
1458 case BLKIF_PROTOCOL_X86_64:
089bc014
JB
1459 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1460 blk_rings->x86_64.rsp_prod_pvt);
4d05a28d
KRW
1461 break;
1462 default:
1463 BUG();
1464 }
089bc014
JB
1465
1466 resp->id = id;
1467 resp->operation = op;
1468 resp->status = st;
1469
4d05a28d
KRW
1470 blk_rings->common.rsp_prod_pvt++;
1471 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
59795700 1472 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
4d05a28d 1473 if (notify)
59795700 1474 notify_remote_via_irq(ring->irq);
4d05a28d
KRW
1475}
1476
8b6bf747 1477static int __init xen_blkif_init(void)
4d05a28d 1478{
8770b268 1479 int rc = 0;
4d05a28d 1480
b2167ba6 1481 if (!xen_domain())
4d05a28d
KRW
1482 return -ENODEV;
1483
9cce2914 1484 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
86839c56 1485 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
9cce2914
JG
1486 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1487 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
86839c56
BL
1488 }
1489
d62d8600
BL
1490 if (xenblk_max_queues == 0)
1491 xenblk_max_queues = num_online_cpus();
1492
8b6bf747 1493 rc = xen_blkif_interface_init();
8770b268
KRW
1494 if (rc)
1495 goto failed_init;
4d05a28d 1496
8b6bf747 1497 rc = xen_blkif_xenbus_init();
8770b268
KRW
1498 if (rc)
1499 goto failed_init;
4d05a28d 1500
8770b268 1501 failed_init:
8770b268 1502 return rc;
4d05a28d
KRW
1503}
1504
8b6bf747 1505module_init(xen_blkif_init);
4d05a28d
KRW
1506
1507MODULE_LICENSE("Dual BSD/GPL");
a7e9357f 1508MODULE_ALIAS("xen-backend:vbd");