+static void loop_exit_fastfs(struct loop_device *lo)
+{
+ struct inode *inode = lo->lo_backing_file->f_mapping->host;
+
+ /*
+ * drop what page cache we instantiated filling holes
+ */
+ invalidate_inode_pages2(lo->lo_backing_file->f_mapping);
+
+ blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_NONE, NULL);
+
+ mutex_lock(&inode->i_mutex);
+ inode->i_flags &= ~S_SWAPFILE;
+ mutex_unlock(&inode->i_mutex);
+}
+
+static inline u64 lo_bio_offset(struct loop_device *lo, struct bio *bio)
+{
+ return (u64)lo->lo_offset + ((u64)bio->bi_sector << 9);
+}
+
+/*
+ * Find extent mapping this lo device block to the file block on the real
+ * device
+ */
+static struct extent_map *loop_lookup_extent(struct loop_device *lo,
+ u64 offset, gfp_t gfp_mask)
+{
+ struct address_space *mapping = lo->lo_backing_file->f_mapping;
+ u64 len = 1 << lo->blkbits;
+
+ return mapping->a_ops->map_extent(mapping, NULL, 0, offset, len, 0,
+ gfp_mask);
+}
+
+static void end_bio_hole_filling(struct bio *bio, int err)
+{
+ struct address_space *mapping = bio->bi_bdev->bd_inode->i_mapping;
+ struct bio *orig_bio = bio->bi_private;
+
+ if (mapping->a_ops->extent_io_complete) {
+ u64 start = orig_bio->bi_sector << 9;
+ u64 len = bio->bi_size;
+
+ mapping->a_ops->extent_io_complete(mapping, start, len);
+ }
+
+ bio_put(bio);
+ bio_endio(orig_bio, err);
+}
+
+static void fill_extent_hole(struct loop_device *lo, struct bio *bio)
+{
+ struct address_space *mapping = lo->lo_backing_file->f_mapping;
+ struct bio *new_bio;
+ struct extent_map *em;
+ u64 len = bio->bi_size;
+ u64 start = lo_bio_offset(lo, bio);
+ u64 disk_block;
+ u64 extent_off;
+
+ /*
+ * change the sector so we can find the correct file offset in our
+ * endio
+ */
+ bio->bi_sector = start >> 9;
+
+ mutex_lock(&mapping->host->i_mutex);
+
+ em = mapping->a_ops->map_extent(mapping, NULL, 0,
+ start, len, 1, GFP_KERNEL);
+ mark_inode_dirty(mapping->host);
+ mutex_unlock(&mapping->host->i_mutex);
+
+ if (em && !IS_ERR(em)) {
+ disk_block = em->block_start;
+ extent_off = start - em->start;
+
+ /*
+ * bio_clone() is mempool backed, so if __GFP_WAIT is set
+ * it wont ever fail
+ */
+ new_bio = bio_clone(bio, GFP_NOIO);
+ new_bio->bi_sector = (disk_block + extent_off) >> 9;
+ new_bio->bi_bdev = em->bdev;
+ new_bio->bi_private = bio;
+ new_bio->bi_size = bio->bi_size;
+ new_bio->bi_end_io = end_bio_hole_filling;
+ free_extent_map(em);
+
+ generic_make_request(new_bio);
+ } else
+ bio_endio(bio, -EIO);
+}
+
+static void loop_bio_destructor(struct bio *bio)
+{
+ struct completion *c = (struct completion *) bio->bi_flags;
+
+ complete(c);
+}
+
+/*
+ * Alloc a hint bio to tell the loop thread to read file blocks for a given
+ * range
+ */
+static void loop_schedule_extent_mapping(struct loop_device *lo,
+ struct bio *old_bio)
+{
+ DECLARE_COMPLETION_ONSTACK(comp);
+ struct bio *bio, stackbio;
+ int do_sync = 0;
+
+ bio = bio_alloc(GFP_ATOMIC, 0);
+ if (!bio) {
+ bio = &stackbio;
+ bio_init(bio);
+ bio->bi_destructor = loop_bio_destructor;
+ bio->bi_flags = (unsigned long) ∁
+ do_sync = 1;
+ }
+
+ bio->bi_rw = LOOP_EXTENT_RW_MAGIC;
+ bio->bi_private = old_bio;
+
+ loop_add_bio(lo, bio);
+
+ if (do_sync) {
+ spin_unlock_irq(&lo->lo_lock);
+ wait_for_completion(&comp);
+ spin_lock_irq(&lo->lo_lock);
+ }
+}
+
+static void loop_handle_extent_hole(struct loop_device *lo, struct bio *bio,
+ int sync)
+{
+ /*
+ * for a read, just zero the data and end the io
+ */
+ if (bio_data_dir(bio) == READ) {
+ struct bio_vec *bvec;
+ unsigned long flags;
+ int i;
+
+ bio_for_each_segment(bvec, bio, i) {
+ char *dst = bvec_kmap_irq(bvec, &flags);
+
+ memset(dst, 0, bvec->bv_len);
+ bvec_kunmap_irq(dst, &flags);
+ }
+ bio_endio(bio, 0);
+ } else {
+ /*
+ * let the page cache handling path do this bio, and then
+ * lookup the mapped blocks after the io has been issued to
+ * instantiate extents.
+ */
+ if (!sync)
+ loop_add_bio(lo, bio);
+ else
+ fill_extent_hole(lo, bio);
+ }
+}
+
+static inline int lo_is_switch_bio(struct bio *bio)
+{
+ return !bio->bi_bdev && bio->bi_rw == LOOP_SWITCH_RW_MAGIC;
+}
+
+static inline int lo_is_map_bio(struct bio *bio)
+{
+ return !bio->bi_bdev && bio->bi_rw == LOOP_EXTENT_RW_MAGIC;
+}
+
+static int __loop_redirect_bio(struct loop_device *lo, struct extent_map *em,
+ struct bio *bio, int sync)
+{
+ u64 extent_off;
+ u64 disk_block;
+
+ /*
+ * handle sparse io
+ */
+ if (em->block_start == EXTENT_MAP_HOLE) {
+ loop_handle_extent_hole(lo, bio, sync);
+ free_extent_map(em);
+ return 0;
+ }
+
+ /*
+ * not a hole, redirect
+ */
+ disk_block = em->block_start;
+ extent_off = lo_bio_offset(lo, bio) - em->start;
+ bio->bi_bdev = em->bdev;
+ bio->bi_sector = (disk_block + extent_off) >> 9;
+ free_extent_map(em);
+ return 1;
+}
+
+/*
+ * Change mapping of the bio, so that it points to the real bdev and offset
+ */
+static int loop_redirect_bio(struct loop_device *lo, struct bio *bio)
+{
+ u64 start = lo_bio_offset(lo, bio);
+ struct extent_map *em;
+
+ em = loop_lookup_extent(lo, start, GFP_ATOMIC);
+ if (IS_ERR(em)) {
+ bio_endio(bio, PTR_ERR(em));
+ return 0;
+ } else if (!em) {
+ loop_schedule_extent_mapping(lo, bio);
+ return 0;
+ }
+
+ return __loop_redirect_bio(lo, em, bio, 0);
+}
+
+/*
+ * Wait on bio's on our list to complete before sending a barrier bio
+ * to the below device. Called with lo_lock held.
+ */
+static void loop_wait_on_bios(struct loop_device *lo)
+{
+ __lo_throttle(&lo->lo_bio_wait, &lo->lo_lock, !lo->lo_bio);
+}
+
+static void loop_wait_on_switch(struct loop_device *lo)
+{
+ __lo_throttle(&lo->lo_bio_wait, &lo->lo_lock, !lo->lo_switch);
+}
+