block: remove QUEUE_FLAG_DISCARD
[linux-block.git] / drivers / block / xen-blkfront.c
1 /*
2  * blkfront.c
3  *
4  * XenLinux virtual block device driver.
5  *
6  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8  * Copyright (c) 2004, Christian Limpach
9  * Copyright (c) 2004, Andrew Warfield
10  * Copyright (c) 2005, Christopher Clark
11  * Copyright (c) 2005, XenSource Ltd
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/cdrom.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/major.h>
46 #include <linux/mutex.h>
47 #include <linux/scatterlist.h>
48 #include <linux/bitmap.h>
49 #include <linux/list.h>
50 #include <linux/workqueue.h>
51 #include <linux/sched/mm.h>
52
53 #include <xen/xen.h>
54 #include <xen/xenbus.h>
55 #include <xen/grant_table.h>
56 #include <xen/events.h>
57 #include <xen/page.h>
58 #include <xen/platform_pci.h>
59
60 #include <xen/interface/grant_table.h>
61 #include <xen/interface/io/blkif.h>
62 #include <xen/interface/io/protocols.h>
63
64 #include <asm/xen/hypervisor.h>
65
66 /*
67  * The minimal size of segment supported by the block framework is PAGE_SIZE.
68  * When Linux is using a different page size than Xen, it may not be possible
69  * to put all the data in a single segment.
70  * This can happen when the backend doesn't support indirect descriptor and
71  * therefore the maximum amount of data that a request can carry is
72  * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
73  *
74  * Note that we only support one extra request. So the Linux page size
75  * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
76  * 88KB.
77  */
78 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
79
80 enum blkif_state {
81         BLKIF_STATE_DISCONNECTED,
82         BLKIF_STATE_CONNECTED,
83         BLKIF_STATE_SUSPENDED,
84         BLKIF_STATE_ERROR,
85 };
86
87 struct grant {
88         grant_ref_t gref;
89         struct page *page;
90         struct list_head node;
91 };
92
93 enum blk_req_status {
94         REQ_PROCESSING,
95         REQ_WAITING,
96         REQ_DONE,
97         REQ_ERROR,
98         REQ_EOPNOTSUPP,
99 };
100
101 struct blk_shadow {
102         struct blkif_request req;
103         struct request *request;
104         struct grant **grants_used;
105         struct grant **indirect_grants;
106         struct scatterlist *sg;
107         unsigned int num_sg;
108         enum blk_req_status status;
109
110         #define NO_ASSOCIATED_ID ~0UL
111         /*
112          * Id of the sibling if we ever need 2 requests when handling a
113          * block I/O request
114          */
115         unsigned long associated_id;
116 };
117
118 struct blkif_req {
119         blk_status_t    error;
120 };
121
122 static inline struct blkif_req *blkif_req(struct request *rq)
123 {
124         return blk_mq_rq_to_pdu(rq);
125 }
126
127 static DEFINE_MUTEX(blkfront_mutex);
128 static const struct block_device_operations xlvbd_block_fops;
129 static struct delayed_work blkfront_work;
130 static LIST_HEAD(info_list);
131
132 /*
133  * Maximum number of segments in indirect requests, the actual value used by
134  * the frontend driver is the minimum of this value and the value provided
135  * by the backend driver.
136  */
137
138 static unsigned int xen_blkif_max_segments = 32;
139 module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
140 MODULE_PARM_DESC(max_indirect_segments,
141                  "Maximum amount of segments in indirect requests (default is 32)");
142
143 static unsigned int xen_blkif_max_queues = 4;
144 module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
145 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
146
147 /*
148  * Maximum order of pages to be used for the shared ring between front and
149  * backend, 4KB page granularity is used.
150  */
151 static unsigned int xen_blkif_max_ring_order;
152 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
153 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
154
155 #define BLK_RING_SIZE(info)     \
156         __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
157
158 /*
159  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
160  * characters are enough. Define to 20 to keep consistent with backend.
161  */
162 #define RINGREF_NAME_LEN (20)
163 /*
164  * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
165  */
166 #define QUEUE_NAME_LEN (17)
167
168 /*
169  *  Per-ring info.
170  *  Every blkfront device can associate with one or more blkfront_ring_info,
171  *  depending on how many hardware queues/rings to be used.
172  */
173 struct blkfront_ring_info {
174         /* Lock to protect data in every ring buffer. */
175         spinlock_t ring_lock;
176         struct blkif_front_ring ring;
177         unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
178         unsigned int evtchn, irq;
179         struct work_struct work;
180         struct gnttab_free_callback callback;
181         struct list_head indirect_pages;
182         struct list_head grants;
183         unsigned int persistent_gnts_c;
184         unsigned long shadow_free;
185         struct blkfront_info *dev_info;
186         struct blk_shadow shadow[];
187 };
188
189 /*
190  * We have one of these per vbd, whether ide, scsi or 'other'.  They
191  * hang in private_data off the gendisk structure. We may end up
192  * putting all kinds of interesting stuff here :-)
193  */
194 struct blkfront_info
195 {
196         struct mutex mutex;
197         struct xenbus_device *xbdev;
198         struct gendisk *gd;
199         u16 sector_size;
200         unsigned int physical_sector_size;
201         unsigned long vdisk_info;
202         int vdevice;
203         blkif_vdev_t handle;
204         enum blkif_state connected;
205         /* Number of pages per ring buffer. */
206         unsigned int nr_ring_pages;
207         struct request_queue *rq;
208         unsigned int feature_flush:1;
209         unsigned int feature_fua:1;
210         unsigned int feature_discard:1;
211         unsigned int feature_secdiscard:1;
212         unsigned int feature_persistent:1;
213         unsigned int discard_granularity;
214         unsigned int discard_alignment;
215         /* Number of 4KB segments handled */
216         unsigned int max_indirect_segments;
217         int is_ready;
218         struct blk_mq_tag_set tag_set;
219         struct blkfront_ring_info *rinfo;
220         unsigned int nr_rings;
221         unsigned int rinfo_size;
222         /* Save uncomplete reqs and bios for migration. */
223         struct list_head requests;
224         struct bio_list bio_list;
225         struct list_head info_list;
226 };
227
228 static unsigned int nr_minors;
229 static unsigned long *minors;
230 static DEFINE_SPINLOCK(minor_lock);
231
232 #define GRANT_INVALID_REF       0
233
234 #define PARTS_PER_DISK          16
235 #define PARTS_PER_EXT_DISK      256
236
237 #define BLKIF_MAJOR(dev) ((dev)>>8)
238 #define BLKIF_MINOR(dev) ((dev) & 0xff)
239
240 #define EXT_SHIFT 28
241 #define EXTENDED (1<<EXT_SHIFT)
242 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
243 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
244 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
245 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
246 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
247 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
248
249 #define DEV_NAME        "xvd"   /* name in /dev */
250
251 /*
252  * Grants are always the same size as a Xen page (i.e 4KB).
253  * A physical segment is always the same size as a Linux page.
254  * Number of grants per physical segment
255  */
256 #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
257
258 #define GRANTS_PER_INDIRECT_FRAME \
259         (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
260
261 #define INDIRECT_GREFS(_grants)         \
262         DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
263
264 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
265 static void blkfront_gather_backend_features(struct blkfront_info *info);
266 static int negotiate_mq(struct blkfront_info *info);
267
268 #define for_each_rinfo(info, ptr, idx)                          \
269         for ((ptr) = (info)->rinfo, (idx) = 0;                  \
270              (idx) < (info)->nr_rings;                          \
271              (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
272
273 static inline struct blkfront_ring_info *
274 get_rinfo(const struct blkfront_info *info, unsigned int i)
275 {
276         BUG_ON(i >= info->nr_rings);
277         return (void *)info->rinfo + i * info->rinfo_size;
278 }
279
280 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
281 {
282         unsigned long free = rinfo->shadow_free;
283
284         BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
285         rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
286         rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
287         return free;
288 }
289
290 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
291                               unsigned long id)
292 {
293         if (rinfo->shadow[id].req.u.rw.id != id)
294                 return -EINVAL;
295         if (rinfo->shadow[id].request == NULL)
296                 return -EINVAL;
297         rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
298         rinfo->shadow[id].request = NULL;
299         rinfo->shadow_free = id;
300         return 0;
301 }
302
303 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
304 {
305         struct blkfront_info *info = rinfo->dev_info;
306         struct page *granted_page;
307         struct grant *gnt_list_entry, *n;
308         int i = 0;
309
310         while (i < num) {
311                 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
312                 if (!gnt_list_entry)
313                         goto out_of_memory;
314
315                 if (info->feature_persistent) {
316                         granted_page = alloc_page(GFP_NOIO);
317                         if (!granted_page) {
318                                 kfree(gnt_list_entry);
319                                 goto out_of_memory;
320                         }
321                         gnt_list_entry->page = granted_page;
322                 }
323
324                 gnt_list_entry->gref = GRANT_INVALID_REF;
325                 list_add(&gnt_list_entry->node, &rinfo->grants);
326                 i++;
327         }
328
329         return 0;
330
331 out_of_memory:
332         list_for_each_entry_safe(gnt_list_entry, n,
333                                  &rinfo->grants, node) {
334                 list_del(&gnt_list_entry->node);
335                 if (info->feature_persistent)
336                         __free_page(gnt_list_entry->page);
337                 kfree(gnt_list_entry);
338                 i--;
339         }
340         BUG_ON(i != 0);
341         return -ENOMEM;
342 }
343
344 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
345 {
346         struct grant *gnt_list_entry;
347
348         BUG_ON(list_empty(&rinfo->grants));
349         gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
350                                           node);
351         list_del(&gnt_list_entry->node);
352
353         if (gnt_list_entry->gref != GRANT_INVALID_REF)
354                 rinfo->persistent_gnts_c--;
355
356         return gnt_list_entry;
357 }
358
359 static inline void grant_foreign_access(const struct grant *gnt_list_entry,
360                                         const struct blkfront_info *info)
361 {
362         gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
363                                                  info->xbdev->otherend_id,
364                                                  gnt_list_entry->page,
365                                                  0);
366 }
367
368 static struct grant *get_grant(grant_ref_t *gref_head,
369                                unsigned long gfn,
370                                struct blkfront_ring_info *rinfo)
371 {
372         struct grant *gnt_list_entry = get_free_grant(rinfo);
373         struct blkfront_info *info = rinfo->dev_info;
374
375         if (gnt_list_entry->gref != GRANT_INVALID_REF)
376                 return gnt_list_entry;
377
378         /* Assign a gref to this page */
379         gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
380         BUG_ON(gnt_list_entry->gref == -ENOSPC);
381         if (info->feature_persistent)
382                 grant_foreign_access(gnt_list_entry, info);
383         else {
384                 /* Grant access to the GFN passed by the caller */
385                 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
386                                                 info->xbdev->otherend_id,
387                                                 gfn, 0);
388         }
389
390         return gnt_list_entry;
391 }
392
393 static struct grant *get_indirect_grant(grant_ref_t *gref_head,
394                                         struct blkfront_ring_info *rinfo)
395 {
396         struct grant *gnt_list_entry = get_free_grant(rinfo);
397         struct blkfront_info *info = rinfo->dev_info;
398
399         if (gnt_list_entry->gref != GRANT_INVALID_REF)
400                 return gnt_list_entry;
401
402         /* Assign a gref to this page */
403         gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
404         BUG_ON(gnt_list_entry->gref == -ENOSPC);
405         if (!info->feature_persistent) {
406                 struct page *indirect_page;
407
408                 /* Fetch a pre-allocated page to use for indirect grefs */
409                 BUG_ON(list_empty(&rinfo->indirect_pages));
410                 indirect_page = list_first_entry(&rinfo->indirect_pages,
411                                                  struct page, lru);
412                 list_del(&indirect_page->lru);
413                 gnt_list_entry->page = indirect_page;
414         }
415         grant_foreign_access(gnt_list_entry, info);
416
417         return gnt_list_entry;
418 }
419
420 static const char *op_name(int op)
421 {
422         static const char *const names[] = {
423                 [BLKIF_OP_READ] = "read",
424                 [BLKIF_OP_WRITE] = "write",
425                 [BLKIF_OP_WRITE_BARRIER] = "barrier",
426                 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
427                 [BLKIF_OP_DISCARD] = "discard" };
428
429         if (op < 0 || op >= ARRAY_SIZE(names))
430                 return "unknown";
431
432         if (!names[op])
433                 return "reserved";
434
435         return names[op];
436 }
437 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
438 {
439         unsigned int end = minor + nr;
440         int rc;
441
442         if (end > nr_minors) {
443                 unsigned long *bitmap, *old;
444
445                 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
446                                  GFP_KERNEL);
447                 if (bitmap == NULL)
448                         return -ENOMEM;
449
450                 spin_lock(&minor_lock);
451                 if (end > nr_minors) {
452                         old = minors;
453                         memcpy(bitmap, minors,
454                                BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
455                         minors = bitmap;
456                         nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
457                 } else
458                         old = bitmap;
459                 spin_unlock(&minor_lock);
460                 kfree(old);
461         }
462
463         spin_lock(&minor_lock);
464         if (find_next_bit(minors, end, minor) >= end) {
465                 bitmap_set(minors, minor, nr);
466                 rc = 0;
467         } else
468                 rc = -EBUSY;
469         spin_unlock(&minor_lock);
470
471         return rc;
472 }
473
474 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
475 {
476         unsigned int end = minor + nr;
477
478         BUG_ON(end > nr_minors);
479         spin_lock(&minor_lock);
480         bitmap_clear(minors,  minor, nr);
481         spin_unlock(&minor_lock);
482 }
483
484 static void blkif_restart_queue_callback(void *arg)
485 {
486         struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
487         schedule_work(&rinfo->work);
488 }
489
490 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
491 {
492         /* We don't have real geometry info, but let's at least return
493            values consistent with the size of the device */
494         sector_t nsect = get_capacity(bd->bd_disk);
495         sector_t cylinders = nsect;
496
497         hg->heads = 0xff;
498         hg->sectors = 0x3f;
499         sector_div(cylinders, hg->heads * hg->sectors);
500         hg->cylinders = cylinders;
501         if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
502                 hg->cylinders = 0xffff;
503         return 0;
504 }
505
506 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
507                        unsigned command, unsigned long argument)
508 {
509         struct blkfront_info *info = bdev->bd_disk->private_data;
510         int i;
511
512         switch (command) {
513         case CDROMMULTISESSION:
514                 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
515                         if (put_user(0, (char __user *)(argument + i)))
516                                 return -EFAULT;
517                 return 0;
518         case CDROM_GET_CAPABILITY:
519                 if (!(info->vdisk_info & VDISK_CDROM))
520                         return -EINVAL;
521                 return 0;
522         default:
523                 return -EINVAL;
524         }
525 }
526
527 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
528                                             struct request *req,
529                                             struct blkif_request **ring_req)
530 {
531         unsigned long id;
532
533         *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
534         rinfo->ring.req_prod_pvt++;
535
536         id = get_id_from_freelist(rinfo);
537         rinfo->shadow[id].request = req;
538         rinfo->shadow[id].status = REQ_PROCESSING;
539         rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
540
541         rinfo->shadow[id].req.u.rw.id = id;
542
543         return id;
544 }
545
546 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
547 {
548         struct blkfront_info *info = rinfo->dev_info;
549         struct blkif_request *ring_req, *final_ring_req;
550         unsigned long id;
551
552         /* Fill out a communications ring structure. */
553         id = blkif_ring_get_request(rinfo, req, &final_ring_req);
554         ring_req = &rinfo->shadow[id].req;
555
556         ring_req->operation = BLKIF_OP_DISCARD;
557         ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
558         ring_req->u.discard.id = id;
559         ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
560         if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
561                 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
562         else
563                 ring_req->u.discard.flag = 0;
564
565         /* Copy the request to the ring page. */
566         *final_ring_req = *ring_req;
567         rinfo->shadow[id].status = REQ_WAITING;
568
569         return 0;
570 }
571
572 struct setup_rw_req {
573         unsigned int grant_idx;
574         struct blkif_request_segment *segments;
575         struct blkfront_ring_info *rinfo;
576         struct blkif_request *ring_req;
577         grant_ref_t gref_head;
578         unsigned int id;
579         /* Only used when persistent grant is used and it's a write request */
580         bool need_copy;
581         unsigned int bvec_off;
582         char *bvec_data;
583
584         bool require_extra_req;
585         struct blkif_request *extra_ring_req;
586 };
587
588 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
589                                      unsigned int len, void *data)
590 {
591         struct setup_rw_req *setup = data;
592         int n, ref;
593         struct grant *gnt_list_entry;
594         unsigned int fsect, lsect;
595         /* Convenient aliases */
596         unsigned int grant_idx = setup->grant_idx;
597         struct blkif_request *ring_req = setup->ring_req;
598         struct blkfront_ring_info *rinfo = setup->rinfo;
599         /*
600          * We always use the shadow of the first request to store the list
601          * of grant associated to the block I/O request. This made the
602          * completion more easy to handle even if the block I/O request is
603          * split.
604          */
605         struct blk_shadow *shadow = &rinfo->shadow[setup->id];
606
607         if (unlikely(setup->require_extra_req &&
608                      grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
609                 /*
610                  * We are using the second request, setup grant_idx
611                  * to be the index of the segment array.
612                  */
613                 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
614                 ring_req = setup->extra_ring_req;
615         }
616
617         if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
618             (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
619                 if (setup->segments)
620                         kunmap_atomic(setup->segments);
621
622                 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
623                 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
624                 shadow->indirect_grants[n] = gnt_list_entry;
625                 setup->segments = kmap_atomic(gnt_list_entry->page);
626                 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
627         }
628
629         gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
630         ref = gnt_list_entry->gref;
631         /*
632          * All the grants are stored in the shadow of the first
633          * request. Therefore we have to use the global index.
634          */
635         shadow->grants_used[setup->grant_idx] = gnt_list_entry;
636
637         if (setup->need_copy) {
638                 void *shared_data;
639
640                 shared_data = kmap_atomic(gnt_list_entry->page);
641                 /*
642                  * this does not wipe data stored outside the
643                  * range sg->offset..sg->offset+sg->length.
644                  * Therefore, blkback *could* see data from
645                  * previous requests. This is OK as long as
646                  * persistent grants are shared with just one
647                  * domain. It may need refactoring if this
648                  * changes
649                  */
650                 memcpy(shared_data + offset,
651                        setup->bvec_data + setup->bvec_off,
652                        len);
653
654                 kunmap_atomic(shared_data);
655                 setup->bvec_off += len;
656         }
657
658         fsect = offset >> 9;
659         lsect = fsect + (len >> 9) - 1;
660         if (ring_req->operation != BLKIF_OP_INDIRECT) {
661                 ring_req->u.rw.seg[grant_idx] =
662                         (struct blkif_request_segment) {
663                                 .gref       = ref,
664                                 .first_sect = fsect,
665                                 .last_sect  = lsect };
666         } else {
667                 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
668                         (struct blkif_request_segment) {
669                                 .gref       = ref,
670                                 .first_sect = fsect,
671                                 .last_sect  = lsect };
672         }
673
674         (setup->grant_idx)++;
675 }
676
677 static void blkif_setup_extra_req(struct blkif_request *first,
678                                   struct blkif_request *second)
679 {
680         uint16_t nr_segments = first->u.rw.nr_segments;
681
682         /*
683          * The second request is only present when the first request uses
684          * all its segments. It's always the continuity of the first one.
685          */
686         first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
687
688         second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
689         second->u.rw.sector_number = first->u.rw.sector_number +
690                 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
691
692         second->u.rw.handle = first->u.rw.handle;
693         second->operation = first->operation;
694 }
695
696 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
697 {
698         struct blkfront_info *info = rinfo->dev_info;
699         struct blkif_request *ring_req, *extra_ring_req = NULL;
700         struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
701         unsigned long id, extra_id = NO_ASSOCIATED_ID;
702         bool require_extra_req = false;
703         int i;
704         struct setup_rw_req setup = {
705                 .grant_idx = 0,
706                 .segments = NULL,
707                 .rinfo = rinfo,
708                 .need_copy = rq_data_dir(req) && info->feature_persistent,
709         };
710
711         /*
712          * Used to store if we are able to queue the request by just using
713          * existing persistent grants, or if we have to get new grants,
714          * as there are not sufficiently many free.
715          */
716         bool new_persistent_gnts = false;
717         struct scatterlist *sg;
718         int num_sg, max_grefs, num_grant;
719
720         max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
721         if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
722                 /*
723                  * If we are using indirect segments we need to account
724                  * for the indirect grefs used in the request.
725                  */
726                 max_grefs += INDIRECT_GREFS(max_grefs);
727
728         /* Check if we have enough persistent grants to allocate a requests */
729         if (rinfo->persistent_gnts_c < max_grefs) {
730                 new_persistent_gnts = true;
731
732                 if (gnttab_alloc_grant_references(
733                     max_grefs - rinfo->persistent_gnts_c,
734                     &setup.gref_head) < 0) {
735                         gnttab_request_free_callback(
736                                 &rinfo->callback,
737                                 blkif_restart_queue_callback,
738                                 rinfo,
739                                 max_grefs - rinfo->persistent_gnts_c);
740                         return 1;
741                 }
742         }
743
744         /* Fill out a communications ring structure. */
745         id = blkif_ring_get_request(rinfo, req, &final_ring_req);
746         ring_req = &rinfo->shadow[id].req;
747
748         num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
749         num_grant = 0;
750         /* Calculate the number of grant used */
751         for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
752                num_grant += gnttab_count_grant(sg->offset, sg->length);
753
754         require_extra_req = info->max_indirect_segments == 0 &&
755                 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
756         BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
757
758         rinfo->shadow[id].num_sg = num_sg;
759         if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
760             likely(!require_extra_req)) {
761                 /*
762                  * The indirect operation can only be a BLKIF_OP_READ or
763                  * BLKIF_OP_WRITE
764                  */
765                 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
766                 ring_req->operation = BLKIF_OP_INDIRECT;
767                 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
768                         BLKIF_OP_WRITE : BLKIF_OP_READ;
769                 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
770                 ring_req->u.indirect.handle = info->handle;
771                 ring_req->u.indirect.nr_segments = num_grant;
772         } else {
773                 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
774                 ring_req->u.rw.handle = info->handle;
775                 ring_req->operation = rq_data_dir(req) ?
776                         BLKIF_OP_WRITE : BLKIF_OP_READ;
777                 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
778                         /*
779                          * Ideally we can do an unordered flush-to-disk.
780                          * In case the backend onlysupports barriers, use that.
781                          * A barrier request a superset of FUA, so we can
782                          * implement it the same way.  (It's also a FLUSH+FUA,
783                          * since it is guaranteed ordered WRT previous writes.)
784                          */
785                         if (info->feature_flush && info->feature_fua)
786                                 ring_req->operation =
787                                         BLKIF_OP_WRITE_BARRIER;
788                         else if (info->feature_flush)
789                                 ring_req->operation =
790                                         BLKIF_OP_FLUSH_DISKCACHE;
791                         else
792                                 ring_req->operation = 0;
793                 }
794                 ring_req->u.rw.nr_segments = num_grant;
795                 if (unlikely(require_extra_req)) {
796                         extra_id = blkif_ring_get_request(rinfo, req,
797                                                           &final_extra_ring_req);
798                         extra_ring_req = &rinfo->shadow[extra_id].req;
799
800                         /*
801                          * Only the first request contains the scatter-gather
802                          * list.
803                          */
804                         rinfo->shadow[extra_id].num_sg = 0;
805
806                         blkif_setup_extra_req(ring_req, extra_ring_req);
807
808                         /* Link the 2 requests together */
809                         rinfo->shadow[extra_id].associated_id = id;
810                         rinfo->shadow[id].associated_id = extra_id;
811                 }
812         }
813
814         setup.ring_req = ring_req;
815         setup.id = id;
816
817         setup.require_extra_req = require_extra_req;
818         if (unlikely(require_extra_req))
819                 setup.extra_ring_req = extra_ring_req;
820
821         for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
822                 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
823
824                 if (setup.need_copy) {
825                         setup.bvec_off = sg->offset;
826                         setup.bvec_data = kmap_atomic(sg_page(sg));
827                 }
828
829                 gnttab_foreach_grant_in_range(sg_page(sg),
830                                               sg->offset,
831                                               sg->length,
832                                               blkif_setup_rw_req_grant,
833                                               &setup);
834
835                 if (setup.need_copy)
836                         kunmap_atomic(setup.bvec_data);
837         }
838         if (setup.segments)
839                 kunmap_atomic(setup.segments);
840
841         /* Copy request(s) to the ring page. */
842         *final_ring_req = *ring_req;
843         rinfo->shadow[id].status = REQ_WAITING;
844         if (unlikely(require_extra_req)) {
845                 *final_extra_ring_req = *extra_ring_req;
846                 rinfo->shadow[extra_id].status = REQ_WAITING;
847         }
848
849         if (new_persistent_gnts)
850                 gnttab_free_grant_references(setup.gref_head);
851
852         return 0;
853 }
854
855 /*
856  * Generate a Xen blkfront IO request from a blk layer request.  Reads
857  * and writes are handled as expected.
858  *
859  * @req: a request struct
860  */
861 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
862 {
863         if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
864                 return 1;
865
866         if (unlikely(req_op(req) == REQ_OP_DISCARD ||
867                      req_op(req) == REQ_OP_SECURE_ERASE))
868                 return blkif_queue_discard_req(req, rinfo);
869         else
870                 return blkif_queue_rw_req(req, rinfo);
871 }
872
873 static inline void flush_requests(struct blkfront_ring_info *rinfo)
874 {
875         int notify;
876
877         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
878
879         if (notify)
880                 notify_remote_via_irq(rinfo->irq);
881 }
882
883 static inline bool blkif_request_flush_invalid(struct request *req,
884                                                struct blkfront_info *info)
885 {
886         return (blk_rq_is_passthrough(req) ||
887                 ((req_op(req) == REQ_OP_FLUSH) &&
888                  !info->feature_flush) ||
889                 ((req->cmd_flags & REQ_FUA) &&
890                  !info->feature_fua));
891 }
892
893 static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
894                           const struct blk_mq_queue_data *qd)
895 {
896         unsigned long flags;
897         int qid = hctx->queue_num;
898         struct blkfront_info *info = hctx->queue->queuedata;
899         struct blkfront_ring_info *rinfo = NULL;
900
901         rinfo = get_rinfo(info, qid);
902         blk_mq_start_request(qd->rq);
903         spin_lock_irqsave(&rinfo->ring_lock, flags);
904         if (RING_FULL(&rinfo->ring))
905                 goto out_busy;
906
907         if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
908                 goto out_err;
909
910         if (blkif_queue_request(qd->rq, rinfo))
911                 goto out_busy;
912
913         flush_requests(rinfo);
914         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
915         return BLK_STS_OK;
916
917 out_err:
918         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
919         return BLK_STS_IOERR;
920
921 out_busy:
922         blk_mq_stop_hw_queue(hctx);
923         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
924         return BLK_STS_DEV_RESOURCE;
925 }
926
927 static void blkif_complete_rq(struct request *rq)
928 {
929         blk_mq_end_request(rq, blkif_req(rq)->error);
930 }
931
932 static const struct blk_mq_ops blkfront_mq_ops = {
933         .queue_rq = blkif_queue_rq,
934         .complete = blkif_complete_rq,
935 };
936
937 static void blkif_set_queue_limits(struct blkfront_info *info)
938 {
939         struct request_queue *rq = info->rq;
940         struct gendisk *gd = info->gd;
941         unsigned int segments = info->max_indirect_segments ? :
942                                 BLKIF_MAX_SEGMENTS_PER_REQUEST;
943
944         blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
945
946         if (info->feature_discard) {
947                 blk_queue_max_discard_sectors(rq, get_capacity(gd));
948                 rq->limits.discard_granularity = info->discard_granularity ?:
949                                                  info->physical_sector_size;
950                 rq->limits.discard_alignment = info->discard_alignment;
951                 if (info->feature_secdiscard)
952                         blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
953         }
954
955         /* Hard sector size and max sectors impersonate the equiv. hardware. */
956         blk_queue_logical_block_size(rq, info->sector_size);
957         blk_queue_physical_block_size(rq, info->physical_sector_size);
958         blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
959
960         /* Each segment in a request is up to an aligned page in size. */
961         blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
962         blk_queue_max_segment_size(rq, PAGE_SIZE);
963
964         /* Ensure a merged request will fit in a single I/O ring slot. */
965         blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
966
967         /* Make sure buffer addresses are sector-aligned. */
968         blk_queue_dma_alignment(rq, 511);
969 }
970
971 static const char *flush_info(struct blkfront_info *info)
972 {
973         if (info->feature_flush && info->feature_fua)
974                 return "barrier: enabled;";
975         else if (info->feature_flush)
976                 return "flush diskcache: enabled;";
977         else
978                 return "barrier or flush: disabled;";
979 }
980
981 static void xlvbd_flush(struct blkfront_info *info)
982 {
983         blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
984                               info->feature_fua ? true : false);
985         pr_info("blkfront: %s: %s %s %s %s %s\n",
986                 info->gd->disk_name, flush_info(info),
987                 "persistent grants:", info->feature_persistent ?
988                 "enabled;" : "disabled;", "indirect descriptors:",
989                 info->max_indirect_segments ? "enabled;" : "disabled;");
990 }
991
992 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
993 {
994         int major;
995         major = BLKIF_MAJOR(vdevice);
996         *minor = BLKIF_MINOR(vdevice);
997         switch (major) {
998                 case XEN_IDE0_MAJOR:
999                         *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
1000                         *minor = ((*minor / 64) * PARTS_PER_DISK) +
1001                                 EMULATED_HD_DISK_MINOR_OFFSET;
1002                         break;
1003                 case XEN_IDE1_MAJOR:
1004                         *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1005                         *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1006                                 EMULATED_HD_DISK_MINOR_OFFSET;
1007                         break;
1008                 case XEN_SCSI_DISK0_MAJOR:
1009                         *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1010                         *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1011                         break;
1012                 case XEN_SCSI_DISK1_MAJOR:
1013                 case XEN_SCSI_DISK2_MAJOR:
1014                 case XEN_SCSI_DISK3_MAJOR:
1015                 case XEN_SCSI_DISK4_MAJOR:
1016                 case XEN_SCSI_DISK5_MAJOR:
1017                 case XEN_SCSI_DISK6_MAJOR:
1018                 case XEN_SCSI_DISK7_MAJOR:
1019                         *offset = (*minor / PARTS_PER_DISK) + 
1020                                 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1021                                 EMULATED_SD_DISK_NAME_OFFSET;
1022                         *minor = *minor +
1023                                 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1024                                 EMULATED_SD_DISK_MINOR_OFFSET;
1025                         break;
1026                 case XEN_SCSI_DISK8_MAJOR:
1027                 case XEN_SCSI_DISK9_MAJOR:
1028                 case XEN_SCSI_DISK10_MAJOR:
1029                 case XEN_SCSI_DISK11_MAJOR:
1030                 case XEN_SCSI_DISK12_MAJOR:
1031                 case XEN_SCSI_DISK13_MAJOR:
1032                 case XEN_SCSI_DISK14_MAJOR:
1033                 case XEN_SCSI_DISK15_MAJOR:
1034                         *offset = (*minor / PARTS_PER_DISK) + 
1035                                 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1036                                 EMULATED_SD_DISK_NAME_OFFSET;
1037                         *minor = *minor +
1038                                 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1039                                 EMULATED_SD_DISK_MINOR_OFFSET;
1040                         break;
1041                 case XENVBD_MAJOR:
1042                         *offset = *minor / PARTS_PER_DISK;
1043                         break;
1044                 default:
1045                         printk(KERN_WARNING "blkfront: your disk configuration is "
1046                                         "incorrect, please use an xvd device instead\n");
1047                         return -ENODEV;
1048         }
1049         return 0;
1050 }
1051
1052 static char *encode_disk_name(char *ptr, unsigned int n)
1053 {
1054         if (n >= 26)
1055                 ptr = encode_disk_name(ptr, n / 26 - 1);
1056         *ptr = 'a' + n % 26;
1057         return ptr + 1;
1058 }
1059
1060 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1061                 struct blkfront_info *info, u16 sector_size,
1062                 unsigned int physical_sector_size)
1063 {
1064         struct gendisk *gd;
1065         int nr_minors = 1;
1066         int err;
1067         unsigned int offset;
1068         int minor;
1069         int nr_parts;
1070         char *ptr;
1071
1072         BUG_ON(info->gd != NULL);
1073         BUG_ON(info->rq != NULL);
1074
1075         if ((info->vdevice>>EXT_SHIFT) > 1) {
1076                 /* this is above the extended range; something is wrong */
1077                 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1078                 return -ENODEV;
1079         }
1080
1081         if (!VDEV_IS_EXTENDED(info->vdevice)) {
1082                 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1083                 if (err)
1084                         return err;
1085                 nr_parts = PARTS_PER_DISK;
1086         } else {
1087                 minor = BLKIF_MINOR_EXT(info->vdevice);
1088                 nr_parts = PARTS_PER_EXT_DISK;
1089                 offset = minor / nr_parts;
1090                 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1091                         printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1092                                         "emulated IDE disks,\n\t choose an xvd device name"
1093                                         "from xvde on\n", info->vdevice);
1094         }
1095         if (minor >> MINORBITS) {
1096                 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1097                         info->vdevice, minor);
1098                 return -ENODEV;
1099         }
1100
1101         if ((minor % nr_parts) == 0)
1102                 nr_minors = nr_parts;
1103
1104         err = xlbd_reserve_minors(minor, nr_minors);
1105         if (err)
1106                 return err;
1107
1108         memset(&info->tag_set, 0, sizeof(info->tag_set));
1109         info->tag_set.ops = &blkfront_mq_ops;
1110         info->tag_set.nr_hw_queues = info->nr_rings;
1111         if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
1112                 /*
1113                  * When indirect descriptior is not supported, the I/O request
1114                  * will be split between multiple request in the ring.
1115                  * To avoid problems when sending the request, divide by
1116                  * 2 the depth of the queue.
1117                  */
1118                 info->tag_set.queue_depth =  BLK_RING_SIZE(info) / 2;
1119         } else
1120                 info->tag_set.queue_depth = BLK_RING_SIZE(info);
1121         info->tag_set.numa_node = NUMA_NO_NODE;
1122         info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1123         info->tag_set.cmd_size = sizeof(struct blkif_req);
1124         info->tag_set.driver_data = info;
1125
1126         err = blk_mq_alloc_tag_set(&info->tag_set);
1127         if (err)
1128                 goto out_release_minors;
1129
1130         gd = blk_mq_alloc_disk(&info->tag_set, info);
1131         if (IS_ERR(gd)) {
1132                 err = PTR_ERR(gd);
1133                 goto out_free_tag_set;
1134         }
1135
1136         strcpy(gd->disk_name, DEV_NAME);
1137         ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1138         BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1139         if (nr_minors > 1)
1140                 *ptr = 0;
1141         else
1142                 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1143                          "%d", minor & (nr_parts - 1));
1144
1145         gd->major = XENVBD_MAJOR;
1146         gd->first_minor = minor;
1147         gd->minors = nr_minors;
1148         gd->fops = &xlvbd_block_fops;
1149         gd->private_data = info;
1150         set_capacity(gd, capacity);
1151
1152         info->rq = gd->queue;
1153         info->gd = gd;
1154         info->sector_size = sector_size;
1155         info->physical_sector_size = physical_sector_size;
1156         blkif_set_queue_limits(info);
1157
1158         xlvbd_flush(info);
1159
1160         if (info->vdisk_info & VDISK_READONLY)
1161                 set_disk_ro(gd, 1);
1162         if (info->vdisk_info & VDISK_REMOVABLE)
1163                 gd->flags |= GENHD_FL_REMOVABLE;
1164
1165         return 0;
1166
1167 out_free_tag_set:
1168         blk_mq_free_tag_set(&info->tag_set);
1169 out_release_minors:
1170         xlbd_release_minors(minor, nr_minors);
1171         return err;
1172 }
1173
1174 /* Already hold rinfo->ring_lock. */
1175 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1176 {
1177         if (!RING_FULL(&rinfo->ring))
1178                 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1179 }
1180
1181 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1182 {
1183         unsigned long flags;
1184
1185         spin_lock_irqsave(&rinfo->ring_lock, flags);
1186         kick_pending_request_queues_locked(rinfo);
1187         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1188 }
1189
1190 static void blkif_restart_queue(struct work_struct *work)
1191 {
1192         struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1193
1194         if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1195                 kick_pending_request_queues(rinfo);
1196 }
1197
1198 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1199 {
1200         struct grant *persistent_gnt, *n;
1201         struct blkfront_info *info = rinfo->dev_info;
1202         int i, j, segs;
1203
1204         /*
1205          * Remove indirect pages, this only happens when using indirect
1206          * descriptors but not persistent grants
1207          */
1208         if (!list_empty(&rinfo->indirect_pages)) {
1209                 struct page *indirect_page, *n;
1210
1211                 BUG_ON(info->feature_persistent);
1212                 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1213                         list_del(&indirect_page->lru);
1214                         __free_page(indirect_page);
1215                 }
1216         }
1217
1218         /* Remove all persistent grants. */
1219         if (!list_empty(&rinfo->grants)) {
1220                 list_for_each_entry_safe(persistent_gnt, n,
1221                                          &rinfo->grants, node) {
1222                         list_del(&persistent_gnt->node);
1223                         if (persistent_gnt->gref != GRANT_INVALID_REF) {
1224                                 gnttab_end_foreign_access(persistent_gnt->gref,
1225                                                           0UL);
1226                                 rinfo->persistent_gnts_c--;
1227                         }
1228                         if (info->feature_persistent)
1229                                 __free_page(persistent_gnt->page);
1230                         kfree(persistent_gnt);
1231                 }
1232         }
1233         BUG_ON(rinfo->persistent_gnts_c != 0);
1234
1235         for (i = 0; i < BLK_RING_SIZE(info); i++) {
1236                 /*
1237                  * Clear persistent grants present in requests already
1238                  * on the shared ring
1239                  */
1240                 if (!rinfo->shadow[i].request)
1241                         goto free_shadow;
1242
1243                 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1244                        rinfo->shadow[i].req.u.indirect.nr_segments :
1245                        rinfo->shadow[i].req.u.rw.nr_segments;
1246                 for (j = 0; j < segs; j++) {
1247                         persistent_gnt = rinfo->shadow[i].grants_used[j];
1248                         gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
1249                         if (info->feature_persistent)
1250                                 __free_page(persistent_gnt->page);
1251                         kfree(persistent_gnt);
1252                 }
1253
1254                 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1255                         /*
1256                          * If this is not an indirect operation don't try to
1257                          * free indirect segments
1258                          */
1259                         goto free_shadow;
1260
1261                 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1262                         persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1263                         gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
1264                         __free_page(persistent_gnt->page);
1265                         kfree(persistent_gnt);
1266                 }
1267
1268 free_shadow:
1269                 kvfree(rinfo->shadow[i].grants_used);
1270                 rinfo->shadow[i].grants_used = NULL;
1271                 kvfree(rinfo->shadow[i].indirect_grants);
1272                 rinfo->shadow[i].indirect_grants = NULL;
1273                 kvfree(rinfo->shadow[i].sg);
1274                 rinfo->shadow[i].sg = NULL;
1275         }
1276
1277         /* No more gnttab callback work. */
1278         gnttab_cancel_free_callback(&rinfo->callback);
1279
1280         /* Flush gnttab callback work. Must be done with no locks held. */
1281         flush_work(&rinfo->work);
1282
1283         /* Free resources associated with old device channel. */
1284         for (i = 0; i < info->nr_ring_pages; i++) {
1285                 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1286                         gnttab_end_foreign_access(rinfo->ring_ref[i], 0);
1287                         rinfo->ring_ref[i] = GRANT_INVALID_REF;
1288                 }
1289         }
1290         free_pages_exact(rinfo->ring.sring,
1291                          info->nr_ring_pages * XEN_PAGE_SIZE);
1292         rinfo->ring.sring = NULL;
1293
1294         if (rinfo->irq)
1295                 unbind_from_irqhandler(rinfo->irq, rinfo);
1296         rinfo->evtchn = rinfo->irq = 0;
1297 }
1298
1299 static void blkif_free(struct blkfront_info *info, int suspend)
1300 {
1301         unsigned int i;
1302         struct blkfront_ring_info *rinfo;
1303
1304         /* Prevent new requests being issued until we fix things up. */
1305         info->connected = suspend ?
1306                 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1307         /* No more blkif_request(). */
1308         if (info->rq)
1309                 blk_mq_stop_hw_queues(info->rq);
1310
1311         for_each_rinfo(info, rinfo, i)
1312                 blkif_free_ring(rinfo);
1313
1314         kvfree(info->rinfo);
1315         info->rinfo = NULL;
1316         info->nr_rings = 0;
1317 }
1318
1319 struct copy_from_grant {
1320         const struct blk_shadow *s;
1321         unsigned int grant_idx;
1322         unsigned int bvec_offset;
1323         char *bvec_data;
1324 };
1325
1326 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1327                                   unsigned int len, void *data)
1328 {
1329         struct copy_from_grant *info = data;
1330         char *shared_data;
1331         /* Convenient aliases */
1332         const struct blk_shadow *s = info->s;
1333
1334         shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1335
1336         memcpy(info->bvec_data + info->bvec_offset,
1337                shared_data + offset, len);
1338
1339         info->bvec_offset += len;
1340         info->grant_idx++;
1341
1342         kunmap_atomic(shared_data);
1343 }
1344
1345 static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1346 {
1347         switch (rsp)
1348         {
1349         case BLKIF_RSP_OKAY:
1350                 return REQ_DONE;
1351         case BLKIF_RSP_EOPNOTSUPP:
1352                 return REQ_EOPNOTSUPP;
1353         case BLKIF_RSP_ERROR:
1354         default:
1355                 return REQ_ERROR;
1356         }
1357 }
1358
1359 /*
1360  * Get the final status of the block request based on two ring response
1361  */
1362 static int blkif_get_final_status(enum blk_req_status s1,
1363                                   enum blk_req_status s2)
1364 {
1365         BUG_ON(s1 < REQ_DONE);
1366         BUG_ON(s2 < REQ_DONE);
1367
1368         if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1369                 return BLKIF_RSP_ERROR;
1370         else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1371                 return BLKIF_RSP_EOPNOTSUPP;
1372         return BLKIF_RSP_OKAY;
1373 }
1374
1375 /*
1376  * Return values:
1377  *  1 response processed.
1378  *  0 missing further responses.
1379  * -1 error while processing.
1380  */
1381 static int blkif_completion(unsigned long *id,
1382                             struct blkfront_ring_info *rinfo,
1383                             struct blkif_response *bret)
1384 {
1385         int i = 0;
1386         struct scatterlist *sg;
1387         int num_sg, num_grant;
1388         struct blkfront_info *info = rinfo->dev_info;
1389         struct blk_shadow *s = &rinfo->shadow[*id];
1390         struct copy_from_grant data = {
1391                 .grant_idx = 0,
1392         };
1393
1394         num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1395                 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1396
1397         /* The I/O request may be split in two. */
1398         if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1399                 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1400
1401                 /* Keep the status of the current response in shadow. */
1402                 s->status = blkif_rsp_to_req_status(bret->status);
1403
1404                 /* Wait the second response if not yet here. */
1405                 if (s2->status < REQ_DONE)
1406                         return 0;
1407
1408                 bret->status = blkif_get_final_status(s->status,
1409                                                       s2->status);
1410
1411                 /*
1412                  * All the grants is stored in the first shadow in order
1413                  * to make the completion code simpler.
1414                  */
1415                 num_grant += s2->req.u.rw.nr_segments;
1416
1417                 /*
1418                  * The two responses may not come in order. Only the
1419                  * first request will store the scatter-gather list.
1420                  */
1421                 if (s2->num_sg != 0) {
1422                         /* Update "id" with the ID of the first response. */
1423                         *id = s->associated_id;
1424                         s = s2;
1425                 }
1426
1427                 /*
1428                  * We don't need anymore the second request, so recycling
1429                  * it now.
1430                  */
1431                 if (add_id_to_freelist(rinfo, s->associated_id))
1432                         WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1433                              info->gd->disk_name, s->associated_id);
1434         }
1435
1436         data.s = s;
1437         num_sg = s->num_sg;
1438
1439         if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1440                 for_each_sg(s->sg, sg, num_sg, i) {
1441                         BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1442
1443                         data.bvec_offset = sg->offset;
1444                         data.bvec_data = kmap_atomic(sg_page(sg));
1445
1446                         gnttab_foreach_grant_in_range(sg_page(sg),
1447                                                       sg->offset,
1448                                                       sg->length,
1449                                                       blkif_copy_from_grant,
1450                                                       &data);
1451
1452                         kunmap_atomic(data.bvec_data);
1453                 }
1454         }
1455         /* Add the persistent grant into the list of free grants */
1456         for (i = 0; i < num_grant; i++) {
1457                 if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
1458                         /*
1459                          * If the grant is still mapped by the backend (the
1460                          * backend has chosen to make this grant persistent)
1461                          * we add it at the head of the list, so it will be
1462                          * reused first.
1463                          */
1464                         if (!info->feature_persistent) {
1465                                 pr_alert("backed has not unmapped grant: %u\n",
1466                                          s->grants_used[i]->gref);
1467                                 return -1;
1468                         }
1469                         list_add(&s->grants_used[i]->node, &rinfo->grants);
1470                         rinfo->persistent_gnts_c++;
1471                 } else {
1472                         /*
1473                          * If the grant is not mapped by the backend we add it
1474                          * to the tail of the list, so it will not be picked
1475                          * again unless we run out of persistent grants.
1476                          */
1477                         s->grants_used[i]->gref = GRANT_INVALID_REF;
1478                         list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1479                 }
1480         }
1481         if (s->req.operation == BLKIF_OP_INDIRECT) {
1482                 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1483                         if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
1484                                 if (!info->feature_persistent) {
1485                                         pr_alert("backed has not unmapped grant: %u\n",
1486                                                  s->indirect_grants[i]->gref);
1487                                         return -1;
1488                                 }
1489                                 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1490                                 rinfo->persistent_gnts_c++;
1491                         } else {
1492                                 struct page *indirect_page;
1493
1494                                 /*
1495                                  * Add the used indirect page back to the list of
1496                                  * available pages for indirect grefs.
1497                                  */
1498                                 if (!info->feature_persistent) {
1499                                         indirect_page = s->indirect_grants[i]->page;
1500                                         list_add(&indirect_page->lru, &rinfo->indirect_pages);
1501                                 }
1502                                 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1503                                 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1504                         }
1505                 }
1506         }
1507
1508         return 1;
1509 }
1510
1511 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1512 {
1513         struct request *req;
1514         struct blkif_response bret;
1515         RING_IDX i, rp;
1516         unsigned long flags;
1517         struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1518         struct blkfront_info *info = rinfo->dev_info;
1519         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1520
1521         if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1522                 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1523                 return IRQ_HANDLED;
1524         }
1525
1526         spin_lock_irqsave(&rinfo->ring_lock, flags);
1527  again:
1528         rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
1529         virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
1530         if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
1531                 pr_alert("%s: illegal number of responses %u\n",
1532                          info->gd->disk_name, rp - rinfo->ring.rsp_cons);
1533                 goto err;
1534         }
1535
1536         for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1537                 unsigned long id;
1538                 unsigned int op;
1539
1540                 eoiflag = 0;
1541
1542                 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
1543                 id = bret.id;
1544
1545                 /*
1546                  * The backend has messed up and given us an id that we would
1547                  * never have given to it (we stamp it up to BLK_RING_SIZE -
1548                  * look in get_id_from_freelist.
1549                  */
1550                 if (id >= BLK_RING_SIZE(info)) {
1551                         pr_alert("%s: response has incorrect id (%ld)\n",
1552                                  info->gd->disk_name, id);
1553                         goto err;
1554                 }
1555                 if (rinfo->shadow[id].status != REQ_WAITING) {
1556                         pr_alert("%s: response references no pending request\n",
1557                                  info->gd->disk_name);
1558                         goto err;
1559                 }
1560
1561                 rinfo->shadow[id].status = REQ_PROCESSING;
1562                 req  = rinfo->shadow[id].request;
1563
1564                 op = rinfo->shadow[id].req.operation;
1565                 if (op == BLKIF_OP_INDIRECT)
1566                         op = rinfo->shadow[id].req.u.indirect.indirect_op;
1567                 if (bret.operation != op) {
1568                         pr_alert("%s: response has wrong operation (%u instead of %u)\n",
1569                                  info->gd->disk_name, bret.operation, op);
1570                         goto err;
1571                 }
1572
1573                 if (bret.operation != BLKIF_OP_DISCARD) {
1574                         int ret;
1575
1576                         /*
1577                          * We may need to wait for an extra response if the
1578                          * I/O request is split in 2
1579                          */
1580                         ret = blkif_completion(&id, rinfo, &bret);
1581                         if (!ret)
1582                                 continue;
1583                         if (unlikely(ret < 0))
1584                                 goto err;
1585                 }
1586
1587                 if (add_id_to_freelist(rinfo, id)) {
1588                         WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1589                              info->gd->disk_name, op_name(bret.operation), id);
1590                         continue;
1591                 }
1592
1593                 if (bret.status == BLKIF_RSP_OKAY)
1594                         blkif_req(req)->error = BLK_STS_OK;
1595                 else
1596                         blkif_req(req)->error = BLK_STS_IOERR;
1597
1598                 switch (bret.operation) {
1599                 case BLKIF_OP_DISCARD:
1600                         if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1601                                 struct request_queue *rq = info->rq;
1602
1603                                 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1604                                            info->gd->disk_name, op_name(bret.operation));
1605                                 blkif_req(req)->error = BLK_STS_NOTSUPP;
1606                                 info->feature_discard = 0;
1607                                 info->feature_secdiscard = 0;
1608                                 blk_queue_max_discard_sectors(rq, 0);
1609                                 blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1610                         }
1611                         break;
1612                 case BLKIF_OP_FLUSH_DISKCACHE:
1613                 case BLKIF_OP_WRITE_BARRIER:
1614                         if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1615                                 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1616                                        info->gd->disk_name, op_name(bret.operation));
1617                                 blkif_req(req)->error = BLK_STS_NOTSUPP;
1618                         }
1619                         if (unlikely(bret.status == BLKIF_RSP_ERROR &&
1620                                      rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1621                                 pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
1622                                        info->gd->disk_name, op_name(bret.operation));
1623                                 blkif_req(req)->error = BLK_STS_NOTSUPP;
1624                         }
1625                         if (unlikely(blkif_req(req)->error)) {
1626                                 if (blkif_req(req)->error == BLK_STS_NOTSUPP)
1627                                         blkif_req(req)->error = BLK_STS_OK;
1628                                 info->feature_fua = 0;
1629                                 info->feature_flush = 0;
1630                                 xlvbd_flush(info);
1631                         }
1632                         fallthrough;
1633                 case BLKIF_OP_READ:
1634                 case BLKIF_OP_WRITE:
1635                         if (unlikely(bret.status != BLKIF_RSP_OKAY))
1636                                 dev_dbg_ratelimited(&info->xbdev->dev,
1637                                         "Bad return from blkdev data request: %#x\n",
1638                                         bret.status);
1639
1640                         break;
1641                 default:
1642                         BUG();
1643                 }
1644
1645                 if (likely(!blk_should_fake_timeout(req->q)))
1646                         blk_mq_complete_request(req);
1647         }
1648
1649         rinfo->ring.rsp_cons = i;
1650
1651         if (i != rinfo->ring.req_prod_pvt) {
1652                 int more_to_do;
1653                 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1654                 if (more_to_do)
1655                         goto again;
1656         } else
1657                 rinfo->ring.sring->rsp_event = i + 1;
1658
1659         kick_pending_request_queues_locked(rinfo);
1660
1661         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1662
1663         xen_irq_lateeoi(irq, eoiflag);
1664
1665         return IRQ_HANDLED;
1666
1667  err:
1668         info->connected = BLKIF_STATE_ERROR;
1669
1670         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1671
1672         /* No EOI in order to avoid further interrupts. */
1673
1674         pr_alert("%s disabled for further use\n", info->gd->disk_name);
1675         return IRQ_HANDLED;
1676 }
1677
1678
1679 static int setup_blkring(struct xenbus_device *dev,
1680                          struct blkfront_ring_info *rinfo)
1681 {
1682         struct blkif_sring *sring;
1683         int err, i;
1684         struct blkfront_info *info = rinfo->dev_info;
1685         unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1686         grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1687
1688         for (i = 0; i < info->nr_ring_pages; i++)
1689                 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1690
1691         sring = alloc_pages_exact(ring_size, GFP_NOIO);
1692         if (!sring) {
1693                 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1694                 return -ENOMEM;
1695         }
1696         SHARED_RING_INIT(sring);
1697         FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1698
1699         err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1700         if (err < 0) {
1701                 free_pages_exact(sring, ring_size);
1702                 rinfo->ring.sring = NULL;
1703                 goto fail;
1704         }
1705         for (i = 0; i < info->nr_ring_pages; i++)
1706                 rinfo->ring_ref[i] = gref[i];
1707
1708         err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1709         if (err)
1710                 goto fail;
1711
1712         err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1713                                                 0, "blkif", rinfo);
1714         if (err <= 0) {
1715                 xenbus_dev_fatal(dev, err,
1716                                  "bind_evtchn_to_irqhandler failed");
1717                 goto fail;
1718         }
1719         rinfo->irq = err;
1720
1721         return 0;
1722 fail:
1723         blkif_free(info, 0);
1724         return err;
1725 }
1726
1727 /*
1728  * Write out per-ring/queue nodes including ring-ref and event-channel, and each
1729  * ring buffer may have multi pages depending on ->nr_ring_pages.
1730  */
1731 static int write_per_ring_nodes(struct xenbus_transaction xbt,
1732                                 struct blkfront_ring_info *rinfo, const char *dir)
1733 {
1734         int err;
1735         unsigned int i;
1736         const char *message = NULL;
1737         struct blkfront_info *info = rinfo->dev_info;
1738
1739         if (info->nr_ring_pages == 1) {
1740                 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1741                 if (err) {
1742                         message = "writing ring-ref";
1743                         goto abort_transaction;
1744                 }
1745         } else {
1746                 for (i = 0; i < info->nr_ring_pages; i++) {
1747                         char ring_ref_name[RINGREF_NAME_LEN];
1748
1749                         snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1750                         err = xenbus_printf(xbt, dir, ring_ref_name,
1751                                             "%u", rinfo->ring_ref[i]);
1752                         if (err) {
1753                                 message = "writing ring-ref";
1754                                 goto abort_transaction;
1755                         }
1756                 }
1757         }
1758
1759         err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1760         if (err) {
1761                 message = "writing event-channel";
1762                 goto abort_transaction;
1763         }
1764
1765         return 0;
1766
1767 abort_transaction:
1768         xenbus_transaction_end(xbt, 1);
1769         if (message)
1770                 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1771
1772         return err;
1773 }
1774
1775 /* Common code used when first setting up, and when resuming. */
1776 static int talk_to_blkback(struct xenbus_device *dev,
1777                            struct blkfront_info *info)
1778 {
1779         const char *message = NULL;
1780         struct xenbus_transaction xbt;
1781         int err;
1782         unsigned int i, max_page_order;
1783         unsigned int ring_page_order;
1784         struct blkfront_ring_info *rinfo;
1785
1786         if (!info)
1787                 return -ENODEV;
1788
1789         max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1790                                               "max-ring-page-order", 0);
1791         ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1792         info->nr_ring_pages = 1 << ring_page_order;
1793
1794         err = negotiate_mq(info);
1795         if (err)
1796                 goto destroy_blkring;
1797
1798         for_each_rinfo(info, rinfo, i) {
1799                 /* Create shared ring, alloc event channel. */
1800                 err = setup_blkring(dev, rinfo);
1801                 if (err)
1802                         goto destroy_blkring;
1803         }
1804
1805 again:
1806         err = xenbus_transaction_start(&xbt);
1807         if (err) {
1808                 xenbus_dev_fatal(dev, err, "starting transaction");
1809                 goto destroy_blkring;
1810         }
1811
1812         if (info->nr_ring_pages > 1) {
1813                 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1814                                     ring_page_order);
1815                 if (err) {
1816                         message = "writing ring-page-order";
1817                         goto abort_transaction;
1818                 }
1819         }
1820
1821         /* We already got the number of queues/rings in _probe */
1822         if (info->nr_rings == 1) {
1823                 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
1824                 if (err)
1825                         goto destroy_blkring;
1826         } else {
1827                 char *path;
1828                 size_t pathsize;
1829
1830                 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1831                                     info->nr_rings);
1832                 if (err) {
1833                         message = "writing multi-queue-num-queues";
1834                         goto abort_transaction;
1835                 }
1836
1837                 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1838                 path = kmalloc(pathsize, GFP_KERNEL);
1839                 if (!path) {
1840                         err = -ENOMEM;
1841                         message = "ENOMEM while writing ring references";
1842                         goto abort_transaction;
1843                 }
1844
1845                 for_each_rinfo(info, rinfo, i) {
1846                         memset(path, 0, pathsize);
1847                         snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1848                         err = write_per_ring_nodes(xbt, rinfo, path);
1849                         if (err) {
1850                                 kfree(path);
1851                                 goto destroy_blkring;
1852                         }
1853                 }
1854                 kfree(path);
1855         }
1856         err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1857                             XEN_IO_PROTO_ABI_NATIVE);
1858         if (err) {
1859                 message = "writing protocol";
1860                 goto abort_transaction;
1861         }
1862         err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
1863                         info->feature_persistent);
1864         if (err)
1865                 dev_warn(&dev->dev,
1866                          "writing persistent grants feature to xenbus");
1867
1868         err = xenbus_transaction_end(xbt, 0);
1869         if (err) {
1870                 if (err == -EAGAIN)
1871                         goto again;
1872                 xenbus_dev_fatal(dev, err, "completing transaction");
1873                 goto destroy_blkring;
1874         }
1875
1876         for_each_rinfo(info, rinfo, i) {
1877                 unsigned int j;
1878
1879                 for (j = 0; j < BLK_RING_SIZE(info); j++)
1880                         rinfo->shadow[j].req.u.rw.id = j + 1;
1881                 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1882         }
1883         xenbus_switch_state(dev, XenbusStateInitialised);
1884
1885         return 0;
1886
1887  abort_transaction:
1888         xenbus_transaction_end(xbt, 1);
1889         if (message)
1890                 xenbus_dev_fatal(dev, err, "%s", message);
1891  destroy_blkring:
1892         blkif_free(info, 0);
1893         return err;
1894 }
1895
1896 static int negotiate_mq(struct blkfront_info *info)
1897 {
1898         unsigned int backend_max_queues;
1899         unsigned int i;
1900         struct blkfront_ring_info *rinfo;
1901
1902         BUG_ON(info->nr_rings);
1903
1904         /* Check if backend supports multiple queues. */
1905         backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1906                                                   "multi-queue-max-queues", 1);
1907         info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1908         /* We need at least one ring. */
1909         if (!info->nr_rings)
1910                 info->nr_rings = 1;
1911
1912         info->rinfo_size = struct_size(info->rinfo, shadow,
1913                                        BLK_RING_SIZE(info));
1914         info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
1915         if (!info->rinfo) {
1916                 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1917                 info->nr_rings = 0;
1918                 return -ENOMEM;
1919         }
1920
1921         for_each_rinfo(info, rinfo, i) {
1922                 INIT_LIST_HEAD(&rinfo->indirect_pages);
1923                 INIT_LIST_HEAD(&rinfo->grants);
1924                 rinfo->dev_info = info;
1925                 INIT_WORK(&rinfo->work, blkif_restart_queue);
1926                 spin_lock_init(&rinfo->ring_lock);
1927         }
1928         return 0;
1929 }
1930
1931 /* Enable the persistent grants feature. */
1932 static bool feature_persistent = true;
1933 module_param(feature_persistent, bool, 0644);
1934 MODULE_PARM_DESC(feature_persistent,
1935                 "Enables the persistent grants feature");
1936
1937 /*
1938  * Entry point to this code when a new device is created.  Allocate the basic
1939  * structures and the ring buffer for communication with the backend, and
1940  * inform the backend of the appropriate details for those.  Switch to
1941  * Initialised state.
1942  */
1943 static int blkfront_probe(struct xenbus_device *dev,
1944                           const struct xenbus_device_id *id)
1945 {
1946         int err, vdevice;
1947         struct blkfront_info *info;
1948
1949         /* FIXME: Use dynamic device id if this is not set. */
1950         err = xenbus_scanf(XBT_NIL, dev->nodename,
1951                            "virtual-device", "%i", &vdevice);
1952         if (err != 1) {
1953                 /* go looking in the extended area instead */
1954                 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1955                                    "%i", &vdevice);
1956                 if (err != 1) {
1957                         xenbus_dev_fatal(dev, err, "reading virtual-device");
1958                         return err;
1959                 }
1960         }
1961
1962         if (xen_hvm_domain()) {
1963                 char *type;
1964                 int len;
1965                 /* no unplug has been done: do not hook devices != xen vbds */
1966                 if (xen_has_pv_and_legacy_disk_devices()) {
1967                         int major;
1968
1969                         if (!VDEV_IS_EXTENDED(vdevice))
1970                                 major = BLKIF_MAJOR(vdevice);
1971                         else
1972                                 major = XENVBD_MAJOR;
1973
1974                         if (major != XENVBD_MAJOR) {
1975                                 printk(KERN_INFO
1976                                                 "%s: HVM does not support vbd %d as xen block device\n",
1977                                                 __func__, vdevice);
1978                                 return -ENODEV;
1979                         }
1980                 }
1981                 /* do not create a PV cdrom device if we are an HVM guest */
1982                 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1983                 if (IS_ERR(type))
1984                         return -ENODEV;
1985                 if (strncmp(type, "cdrom", 5) == 0) {
1986                         kfree(type);
1987                         return -ENODEV;
1988                 }
1989                 kfree(type);
1990         }
1991         info = kzalloc(sizeof(*info), GFP_KERNEL);
1992         if (!info) {
1993                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1994                 return -ENOMEM;
1995         }
1996
1997         info->xbdev = dev;
1998
1999         mutex_init(&info->mutex);
2000         info->vdevice = vdevice;
2001         info->connected = BLKIF_STATE_DISCONNECTED;
2002
2003         info->feature_persistent = feature_persistent;
2004
2005         /* Front end dir is a number, which is used as the id. */
2006         info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
2007         dev_set_drvdata(&dev->dev, info);
2008
2009         mutex_lock(&blkfront_mutex);
2010         list_add(&info->info_list, &info_list);
2011         mutex_unlock(&blkfront_mutex);
2012
2013         return 0;
2014 }
2015
2016 static int blkif_recover(struct blkfront_info *info)
2017 {
2018         unsigned int r_index;
2019         struct request *req, *n;
2020         int rc;
2021         struct bio *bio;
2022         unsigned int segs;
2023         struct blkfront_ring_info *rinfo;
2024
2025         blkfront_gather_backend_features(info);
2026         /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2027         blkif_set_queue_limits(info);
2028         segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2029         blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2030
2031         for_each_rinfo(info, rinfo, r_index) {
2032                 rc = blkfront_setup_indirect(rinfo);
2033                 if (rc)
2034                         return rc;
2035         }
2036         xenbus_switch_state(info->xbdev, XenbusStateConnected);
2037
2038         /* Now safe for us to use the shared ring */
2039         info->connected = BLKIF_STATE_CONNECTED;
2040
2041         for_each_rinfo(info, rinfo, r_index) {
2042                 /* Kick any other new requests queued since we resumed */
2043                 kick_pending_request_queues(rinfo);
2044         }
2045
2046         list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2047                 /* Requeue pending requests (flush or discard) */
2048                 list_del_init(&req->queuelist);
2049                 BUG_ON(req->nr_phys_segments > segs);
2050                 blk_mq_requeue_request(req, false);
2051         }
2052         blk_mq_start_stopped_hw_queues(info->rq, true);
2053         blk_mq_kick_requeue_list(info->rq);
2054
2055         while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2056                 /* Traverse the list of pending bios and re-queue them */
2057                 submit_bio(bio);
2058         }
2059
2060         return 0;
2061 }
2062
2063 /*
2064  * We are reconnecting to the backend, due to a suspend/resume, or a backend
2065  * driver restart.  We tear down our blkif structure and recreate it, but
2066  * leave the device-layer structures intact so that this is transparent to the
2067  * rest of the kernel.
2068  */
2069 static int blkfront_resume(struct xenbus_device *dev)
2070 {
2071         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2072         int err = 0;
2073         unsigned int i, j;
2074         struct blkfront_ring_info *rinfo;
2075
2076         dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2077
2078         bio_list_init(&info->bio_list);
2079         INIT_LIST_HEAD(&info->requests);
2080         for_each_rinfo(info, rinfo, i) {
2081                 struct bio_list merge_bio;
2082                 struct blk_shadow *shadow = rinfo->shadow;
2083
2084                 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2085                         /* Not in use? */
2086                         if (!shadow[j].request)
2087                                 continue;
2088
2089                         /*
2090                          * Get the bios in the request so we can re-queue them.
2091                          */
2092                         if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2093                             req_op(shadow[j].request) == REQ_OP_DISCARD ||
2094                             req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2095                             shadow[j].request->cmd_flags & REQ_FUA) {
2096                                 /*
2097                                  * Flush operations don't contain bios, so
2098                                  * we need to requeue the whole request
2099                                  *
2100                                  * XXX: but this doesn't make any sense for a
2101                                  * write with the FUA flag set..
2102                                  */
2103                                 list_add(&shadow[j].request->queuelist, &info->requests);
2104                                 continue;
2105                         }
2106                         merge_bio.head = shadow[j].request->bio;
2107                         merge_bio.tail = shadow[j].request->biotail;
2108                         bio_list_merge(&info->bio_list, &merge_bio);
2109                         shadow[j].request->bio = NULL;
2110                         blk_mq_end_request(shadow[j].request, BLK_STS_OK);
2111                 }
2112         }
2113
2114         blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2115
2116         err = talk_to_blkback(dev, info);
2117         if (!err)
2118                 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2119
2120         /*
2121          * We have to wait for the backend to switch to
2122          * connected state, since we want to read which
2123          * features it supports.
2124          */
2125
2126         return err;
2127 }
2128
2129 static void blkfront_closing(struct blkfront_info *info)
2130 {
2131         struct xenbus_device *xbdev = info->xbdev;
2132         struct blkfront_ring_info *rinfo;
2133         unsigned int i;
2134
2135         if (xbdev->state == XenbusStateClosing)
2136                 return;
2137
2138         /* No more blkif_request(). */
2139         blk_mq_stop_hw_queues(info->rq);
2140         blk_mark_disk_dead(info->gd);
2141         set_capacity(info->gd, 0);
2142
2143         for_each_rinfo(info, rinfo, i) {
2144                 /* No more gnttab callback work. */
2145                 gnttab_cancel_free_callback(&rinfo->callback);
2146
2147                 /* Flush gnttab callback work. Must be done with no locks held. */
2148                 flush_work(&rinfo->work);
2149         }
2150
2151         xenbus_frontend_closed(xbdev);
2152 }
2153
2154 static void blkfront_setup_discard(struct blkfront_info *info)
2155 {
2156         info->feature_discard = 1;
2157         info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
2158                                                          "discard-granularity",
2159                                                          0);
2160         info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
2161                                                        "discard-alignment", 0);
2162         info->feature_secdiscard =
2163                 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2164                                        0);
2165 }
2166
2167 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2168 {
2169         unsigned int psegs, grants, memflags;
2170         int err, i;
2171         struct blkfront_info *info = rinfo->dev_info;
2172
2173         memflags = memalloc_noio_save();
2174
2175         if (info->max_indirect_segments == 0) {
2176                 if (!HAS_EXTRA_REQ)
2177                         grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2178                 else {
2179                         /*
2180                          * When an extra req is required, the maximum
2181                          * grants supported is related to the size of the
2182                          * Linux block segment.
2183                          */
2184                         grants = GRANTS_PER_PSEG;
2185                 }
2186         }
2187         else
2188                 grants = info->max_indirect_segments;
2189         psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2190
2191         err = fill_grant_buffer(rinfo,
2192                                 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2193         if (err)
2194                 goto out_of_memory;
2195
2196         if (!info->feature_persistent && info->max_indirect_segments) {
2197                 /*
2198                  * We are using indirect descriptors but not persistent
2199                  * grants, we need to allocate a set of pages that can be
2200                  * used for mapping indirect grefs
2201                  */
2202                 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2203
2204                 BUG_ON(!list_empty(&rinfo->indirect_pages));
2205                 for (i = 0; i < num; i++) {
2206                         struct page *indirect_page = alloc_page(GFP_KERNEL);
2207                         if (!indirect_page)
2208                                 goto out_of_memory;
2209                         list_add(&indirect_page->lru, &rinfo->indirect_pages);
2210                 }
2211         }
2212
2213         for (i = 0; i < BLK_RING_SIZE(info); i++) {
2214                 rinfo->shadow[i].grants_used =
2215                         kvcalloc(grants,
2216                                  sizeof(rinfo->shadow[i].grants_used[0]),
2217                                  GFP_KERNEL);
2218                 rinfo->shadow[i].sg = kvcalloc(psegs,
2219                                                sizeof(rinfo->shadow[i].sg[0]),
2220                                                GFP_KERNEL);
2221                 if (info->max_indirect_segments)
2222                         rinfo->shadow[i].indirect_grants =
2223                                 kvcalloc(INDIRECT_GREFS(grants),
2224                                          sizeof(rinfo->shadow[i].indirect_grants[0]),
2225                                          GFP_KERNEL);
2226                 if ((rinfo->shadow[i].grants_used == NULL) ||
2227                         (rinfo->shadow[i].sg == NULL) ||
2228                      (info->max_indirect_segments &&
2229                      (rinfo->shadow[i].indirect_grants == NULL)))
2230                         goto out_of_memory;
2231                 sg_init_table(rinfo->shadow[i].sg, psegs);
2232         }
2233
2234         memalloc_noio_restore(memflags);
2235
2236         return 0;
2237
2238 out_of_memory:
2239         for (i = 0; i < BLK_RING_SIZE(info); i++) {
2240                 kvfree(rinfo->shadow[i].grants_used);
2241                 rinfo->shadow[i].grants_used = NULL;
2242                 kvfree(rinfo->shadow[i].sg);
2243                 rinfo->shadow[i].sg = NULL;
2244                 kvfree(rinfo->shadow[i].indirect_grants);
2245                 rinfo->shadow[i].indirect_grants = NULL;
2246         }
2247         if (!list_empty(&rinfo->indirect_pages)) {
2248                 struct page *indirect_page, *n;
2249                 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2250                         list_del(&indirect_page->lru);
2251                         __free_page(indirect_page);
2252                 }
2253         }
2254
2255         memalloc_noio_restore(memflags);
2256
2257         return -ENOMEM;
2258 }
2259
2260 /*
2261  * Gather all backend feature-*
2262  */
2263 static void blkfront_gather_backend_features(struct blkfront_info *info)
2264 {
2265         unsigned int indirect_segments;
2266
2267         info->feature_flush = 0;
2268         info->feature_fua = 0;
2269
2270         /*
2271          * If there's no "feature-barrier" defined, then it means
2272          * we're dealing with a very old backend which writes
2273          * synchronously; nothing to do.
2274          *
2275          * If there are barriers, then we use flush.
2276          */
2277         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2278                 info->feature_flush = 1;
2279                 info->feature_fua = 1;
2280         }
2281
2282         /*
2283          * And if there is "feature-flush-cache" use that above
2284          * barriers.
2285          */
2286         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2287                                  0)) {
2288                 info->feature_flush = 1;
2289                 info->feature_fua = 0;
2290         }
2291
2292         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2293                 blkfront_setup_discard(info);
2294
2295         if (info->feature_persistent)
2296                 info->feature_persistent =
2297                         !!xenbus_read_unsigned(info->xbdev->otherend,
2298                                                "feature-persistent", 0);
2299
2300         indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2301                                         "feature-max-indirect-segments", 0);
2302         if (indirect_segments > xen_blkif_max_segments)
2303                 indirect_segments = xen_blkif_max_segments;
2304         if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2305                 indirect_segments = 0;
2306         info->max_indirect_segments = indirect_segments;
2307
2308         if (info->feature_persistent) {
2309                 mutex_lock(&blkfront_mutex);
2310                 schedule_delayed_work(&blkfront_work, HZ * 10);
2311                 mutex_unlock(&blkfront_mutex);
2312         }
2313 }
2314
2315 /*
2316  * Invoked when the backend is finally 'ready' (and has told produced
2317  * the details about the physical device - #sectors, size, etc).
2318  */
2319 static void blkfront_connect(struct blkfront_info *info)
2320 {
2321         unsigned long long sectors;
2322         unsigned long sector_size;
2323         unsigned int physical_sector_size;
2324         int err, i;
2325         struct blkfront_ring_info *rinfo;
2326
2327         switch (info->connected) {
2328         case BLKIF_STATE_CONNECTED:
2329                 /*
2330                  * Potentially, the back-end may be signalling
2331                  * a capacity change; update the capacity.
2332                  */
2333                 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2334                                    "sectors", "%Lu", &sectors);
2335                 if (XENBUS_EXIST_ERR(err))
2336                         return;
2337                 printk(KERN_INFO "Setting capacity to %Lu\n",
2338                        sectors);
2339                 set_capacity_and_notify(info->gd, sectors);
2340
2341                 return;
2342         case BLKIF_STATE_SUSPENDED:
2343                 /*
2344                  * If we are recovering from suspension, we need to wait
2345                  * for the backend to announce it's features before
2346                  * reconnecting, at least we need to know if the backend
2347                  * supports indirect descriptors, and how many.
2348                  */
2349                 blkif_recover(info);
2350                 return;
2351
2352         default:
2353                 break;
2354         }
2355
2356         dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2357                 __func__, info->xbdev->otherend);
2358
2359         err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2360                             "sectors", "%llu", &sectors,
2361                             "info", "%u", &info->vdisk_info,
2362                             "sector-size", "%lu", &sector_size,
2363                             NULL);
2364         if (err) {
2365                 xenbus_dev_fatal(info->xbdev, err,
2366                                  "reading backend fields at %s",
2367                                  info->xbdev->otherend);
2368                 return;
2369         }
2370
2371         /*
2372          * physical-sector-size is a newer field, so old backends may not
2373          * provide this. Assume physical sector size to be the same as
2374          * sector_size in that case.
2375          */
2376         physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2377                                                     "physical-sector-size",
2378                                                     sector_size);
2379         blkfront_gather_backend_features(info);
2380         for_each_rinfo(info, rinfo, i) {
2381                 err = blkfront_setup_indirect(rinfo);
2382                 if (err) {
2383                         xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2384                                          info->xbdev->otherend);
2385                         blkif_free(info, 0);
2386                         break;
2387                 }
2388         }
2389
2390         err = xlvbd_alloc_gendisk(sectors, info, sector_size,
2391                                   physical_sector_size);
2392         if (err) {
2393                 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2394                                  info->xbdev->otherend);
2395                 goto fail;
2396         }
2397
2398         xenbus_switch_state(info->xbdev, XenbusStateConnected);
2399
2400         /* Kick pending requests. */
2401         info->connected = BLKIF_STATE_CONNECTED;
2402         for_each_rinfo(info, rinfo, i)
2403                 kick_pending_request_queues(rinfo);
2404
2405         err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
2406         if (err) {
2407                 blk_cleanup_disk(info->gd);
2408                 blk_mq_free_tag_set(&info->tag_set);
2409                 info->rq = NULL;
2410                 goto fail;
2411         }
2412
2413         info->is_ready = 1;
2414         return;
2415
2416 fail:
2417         blkif_free(info, 0);
2418         return;
2419 }
2420
2421 /*
2422  * Callback received when the backend's state changes.
2423  */
2424 static void blkback_changed(struct xenbus_device *dev,
2425                             enum xenbus_state backend_state)
2426 {
2427         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2428
2429         dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2430
2431         switch (backend_state) {
2432         case XenbusStateInitWait:
2433                 if (dev->state != XenbusStateInitialising)
2434                         break;
2435                 if (talk_to_blkback(dev, info))
2436                         break;
2437                 break;
2438         case XenbusStateInitialising:
2439         case XenbusStateInitialised:
2440         case XenbusStateReconfiguring:
2441         case XenbusStateReconfigured:
2442         case XenbusStateUnknown:
2443                 break;
2444
2445         case XenbusStateConnected:
2446                 /*
2447                  * talk_to_blkback sets state to XenbusStateInitialised
2448                  * and blkfront_connect sets it to XenbusStateConnected
2449                  * (if connection went OK).
2450                  *
2451                  * If the backend (or toolstack) decides to poke at backend
2452                  * state (and re-trigger the watch by setting the state repeatedly
2453                  * to XenbusStateConnected (4)) we need to deal with this.
2454                  * This is allowed as this is used to communicate to the guest
2455                  * that the size of disk has changed!
2456                  */
2457                 if ((dev->state != XenbusStateInitialised) &&
2458                     (dev->state != XenbusStateConnected)) {
2459                         if (talk_to_blkback(dev, info))
2460                                 break;
2461                 }
2462
2463                 blkfront_connect(info);
2464                 break;
2465
2466         case XenbusStateClosed:
2467                 if (dev->state == XenbusStateClosed)
2468                         break;
2469                 fallthrough;
2470         case XenbusStateClosing:
2471                 blkfront_closing(info);
2472                 break;
2473         }
2474 }
2475
2476 static int blkfront_remove(struct xenbus_device *xbdev)
2477 {
2478         struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2479
2480         dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2481
2482         del_gendisk(info->gd);
2483
2484         mutex_lock(&blkfront_mutex);
2485         list_del(&info->info_list);
2486         mutex_unlock(&blkfront_mutex);
2487
2488         blkif_free(info, 0);
2489         xlbd_release_minors(info->gd->first_minor, info->gd->minors);
2490         blk_cleanup_disk(info->gd);
2491         blk_mq_free_tag_set(&info->tag_set);
2492
2493         kfree(info);
2494         return 0;
2495 }
2496
2497 static int blkfront_is_ready(struct xenbus_device *dev)
2498 {
2499         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2500
2501         return info->is_ready && info->xbdev;
2502 }
2503
2504 static const struct block_device_operations xlvbd_block_fops =
2505 {
2506         .owner = THIS_MODULE,
2507         .getgeo = blkif_getgeo,
2508         .ioctl = blkif_ioctl,
2509         .compat_ioctl = blkdev_compat_ptr_ioctl,
2510 };
2511
2512
2513 static const struct xenbus_device_id blkfront_ids[] = {
2514         { "vbd" },
2515         { "" }
2516 };
2517
2518 static struct xenbus_driver blkfront_driver = {
2519         .ids  = blkfront_ids,
2520         .probe = blkfront_probe,
2521         .remove = blkfront_remove,
2522         .resume = blkfront_resume,
2523         .otherend_changed = blkback_changed,
2524         .is_ready = blkfront_is_ready,
2525 };
2526
2527 static void purge_persistent_grants(struct blkfront_info *info)
2528 {
2529         unsigned int i;
2530         unsigned long flags;
2531         struct blkfront_ring_info *rinfo;
2532
2533         for_each_rinfo(info, rinfo, i) {
2534                 struct grant *gnt_list_entry, *tmp;
2535                 LIST_HEAD(grants);
2536
2537                 spin_lock_irqsave(&rinfo->ring_lock, flags);
2538
2539                 if (rinfo->persistent_gnts_c == 0) {
2540                         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2541                         continue;
2542                 }
2543
2544                 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2545                                          node) {
2546                         if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2547                             !gnttab_try_end_foreign_access(gnt_list_entry->gref))
2548                                 continue;
2549
2550                         list_del(&gnt_list_entry->node);
2551                         rinfo->persistent_gnts_c--;
2552                         gnt_list_entry->gref = GRANT_INVALID_REF;
2553                         list_add_tail(&gnt_list_entry->node, &grants);
2554                 }
2555
2556                 list_splice_tail(&grants, &rinfo->grants);
2557
2558                 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2559         }
2560 }
2561
2562 static void blkfront_delay_work(struct work_struct *work)
2563 {
2564         struct blkfront_info *info;
2565         bool need_schedule_work = false;
2566
2567         mutex_lock(&blkfront_mutex);
2568
2569         list_for_each_entry(info, &info_list, info_list) {
2570                 if (info->feature_persistent) {
2571                         need_schedule_work = true;
2572                         mutex_lock(&info->mutex);
2573                         purge_persistent_grants(info);
2574                         mutex_unlock(&info->mutex);
2575                 }
2576         }
2577
2578         if (need_schedule_work)
2579                 schedule_delayed_work(&blkfront_work, HZ * 10);
2580
2581         mutex_unlock(&blkfront_mutex);
2582 }
2583
2584 static int __init xlblk_init(void)
2585 {
2586         int ret;
2587         int nr_cpus = num_online_cpus();
2588
2589         if (!xen_domain())
2590                 return -ENODEV;
2591
2592         if (!xen_has_pv_disk_devices())
2593                 return -ENODEV;
2594
2595         if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2596                 pr_warn("xen_blk: can't get major %d with name %s\n",
2597                         XENVBD_MAJOR, DEV_NAME);
2598                 return -ENODEV;
2599         }
2600
2601         if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2602                 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2603
2604         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2605                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2606                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2607                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2608         }
2609
2610         if (xen_blkif_max_queues > nr_cpus) {
2611                 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2612                         xen_blkif_max_queues, nr_cpus);
2613                 xen_blkif_max_queues = nr_cpus;
2614         }
2615
2616         INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
2617
2618         ret = xenbus_register_frontend(&blkfront_driver);
2619         if (ret) {
2620                 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2621                 return ret;
2622         }
2623
2624         return 0;
2625 }
2626 module_init(xlblk_init);
2627
2628
2629 static void __exit xlblk_exit(void)
2630 {
2631         cancel_delayed_work_sync(&blkfront_work);
2632
2633         xenbus_unregister_driver(&blkfront_driver);
2634         unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2635         kfree(minors);
2636 }
2637 module_exit(xlblk_exit);
2638
2639 MODULE_DESCRIPTION("Xen virtual block device frontend");
2640 MODULE_LICENSE("GPL");
2641 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2642 MODULE_ALIAS("xen:vbd");
2643 MODULE_ALIAS("xenblk");