2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/smp_lock.h>
65 #include <linux/swap.h>
66 #include <linux/slab.h>
67 #include <linux/loop.h>
68 #include <linux/compat.h>
69 #include <linux/suspend.h>
70 #include <linux/freezer.h>
71 #include <linux/writeback.h>
72 #include <linux/buffer_head.h> /* for invalidate_bdev() */
73 #include <linux/completion.h>
74 #include <linux/highmem.h>
75 #include <linux/gfp.h>
76 #include <linux/kthread.h>
77 #include <linux/splice.h>
78 #include <linux/extent_map.h>
80 #include <asm/uaccess.h>
82 static LIST_HEAD(loop_devices);
83 static DEFINE_MUTEX(loop_devices_mutex);
86 static int part_shift;
91 static int transfer_none(struct loop_device *lo, int cmd,
92 struct page *raw_page, unsigned raw_off,
93 struct page *loop_page, unsigned loop_off,
94 int size, sector_t real_block)
96 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
97 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
100 memcpy(loop_buf, raw_buf, size);
102 memcpy(raw_buf, loop_buf, size);
104 kunmap_atomic(raw_buf, KM_USER0);
105 kunmap_atomic(loop_buf, KM_USER1);
110 static int transfer_xor(struct loop_device *lo, int cmd,
111 struct page *raw_page, unsigned raw_off,
112 struct page *loop_page, unsigned loop_off,
113 int size, sector_t real_block)
115 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
116 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
117 char *in, *out, *key;
128 key = lo->lo_encrypt_key;
129 keysize = lo->lo_encrypt_key_size;
130 for (i = 0; i < size; i++)
131 *out++ = *in++ ^ key[(i & 511) % keysize];
133 kunmap_atomic(raw_buf, KM_USER0);
134 kunmap_atomic(loop_buf, KM_USER1);
139 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
141 if (unlikely(info->lo_encrypt_key_size <= 0))
146 static struct loop_func_table none_funcs = {
147 .number = LO_CRYPT_NONE,
148 .transfer = transfer_none,
151 static struct loop_func_table xor_funcs = {
152 .number = LO_CRYPT_XOR,
153 .transfer = transfer_xor,
157 /* xfer_funcs[0] is special - its release function is never called */
158 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
163 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
165 loff_t size, offset, loopsize;
167 /* Compute loopsize in bytes */
168 size = i_size_read(file->f_mapping->host);
169 offset = lo->lo_offset;
170 loopsize = size - offset;
171 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
172 loopsize = lo->lo_sizelimit;
175 * Unfortunately, if we want to do I/O on the device,
176 * the number of 512-byte sectors has to fit into a sector_t.
178 return loopsize >> 9;
182 figure_loop_size(struct loop_device *lo)
184 loff_t size = get_loop_size(lo, lo->lo_backing_file);
185 sector_t x = (sector_t)size;
187 if (unlikely((loff_t)x != size))
190 set_capacity(lo->lo_disk, x);
195 lo_do_transfer(struct loop_device *lo, int cmd,
196 struct page *rpage, unsigned roffs,
197 struct page *lpage, unsigned loffs,
198 int size, sector_t rblock)
200 if (unlikely(!lo->transfer))
203 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
207 * do_lo_send_aops - helper for writing data to a loop device
209 * This is the fast version for backing filesystems which implement the address
210 * space operations write_begin and write_end.
212 static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
213 loff_t pos, struct page *unused)
215 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
216 struct address_space *mapping = file->f_mapping;
218 unsigned offset, bv_offs;
221 mutex_lock(&mapping->host->i_mutex);
222 index = pos >> PAGE_CACHE_SHIFT;
223 offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
224 bv_offs = bvec->bv_offset;
228 unsigned size, copied;
233 IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
234 size = PAGE_CACHE_SIZE - offset;
238 ret = pagecache_write_begin(file, mapping, pos, size, 0,
243 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
244 bvec->bv_page, bv_offs, size, IV);
246 if (unlikely(transfer_result))
249 ret = pagecache_write_end(file, mapping, pos, size, copied,
251 if (ret < 0 || ret != copied)
254 if (unlikely(transfer_result))
265 mutex_unlock(&mapping->host->i_mutex);
273 * __do_lo_send_write - helper for writing data to a loop device
275 * This helper just factors out common code between do_lo_send_direct_write()
276 * and do_lo_send_write().
278 static int __do_lo_send_write(struct file *file,
279 u8 *buf, const int len, loff_t pos)
282 mm_segment_t old_fs = get_fs();
285 bw = file->f_op->write(file, buf, len, &pos);
287 if (likely(bw == len))
289 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
290 (unsigned long long)pos, len);
297 * do_lo_send_direct_write - helper for writing data to a loop device
299 * This is the fast, non-transforming version for backing filesystems which do
300 * not implement the address space operations write_begin and write_end.
301 * It uses the write file operation which should be present on all writeable
304 static int do_lo_send_direct_write(struct loop_device *lo,
305 struct bio_vec *bvec, loff_t pos, struct page *page)
307 ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
308 kmap(bvec->bv_page) + bvec->bv_offset,
310 kunmap(bvec->bv_page);
316 * do_lo_send_write - helper for writing data to a loop device
318 * This is the slow, transforming version for filesystems which do not
319 * implement the address space operations write_begin and write_end. It
320 * uses the write file operation which should be present on all writeable
323 * Using fops->write is slower than using aops->{prepare,commit}_write in the
324 * transforming case because we need to double buffer the data as we cannot do
325 * the transformations in place as we do not have direct access to the
326 * destination pages of the backing file.
328 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
329 loff_t pos, struct page *page)
331 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
332 bvec->bv_offset, bvec->bv_len, pos >> 9);
334 return __do_lo_send_write(lo->lo_backing_file,
335 page_address(page), bvec->bv_len,
337 printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
338 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
344 static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
346 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
348 struct bio_vec *bvec;
349 struct page *page = NULL;
352 do_lo_send = do_lo_send_aops;
353 if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
354 do_lo_send = do_lo_send_direct_write;
355 if (lo->transfer != transfer_none) {
356 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
360 do_lo_send = do_lo_send_write;
363 bio_for_each_segment(bvec, bio, i) {
364 ret = do_lo_send(lo, bvec, pos, page);
376 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
381 struct lo_read_data {
382 struct loop_device *lo;
389 lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
390 struct splice_desc *sd)
392 struct lo_read_data *p = sd->u.data;
393 struct loop_device *lo = p->lo;
394 struct page *page = buf->page;
399 ret = buf->ops->confirm(pipe, buf);
403 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
409 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
410 printk(KERN_ERR "loop: transfer error block %ld\n",
415 flush_dcache_page(p->page);
424 lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
426 return __splice_from_pipe(pipe, sd, lo_splice_actor);
430 do_lo_receive(struct loop_device *lo,
431 struct bio_vec *bvec, int bsize, loff_t pos)
433 struct lo_read_data cookie;
434 struct splice_desc sd;
439 cookie.page = bvec->bv_page;
440 cookie.offset = bvec->bv_offset;
441 cookie.bsize = bsize;
444 sd.total_len = bvec->bv_len;
449 file = lo->lo_backing_file;
450 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
459 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
461 struct bio_vec *bvec;
464 bio_for_each_segment(bvec, bio, i) {
465 ret = do_lo_receive(lo, bvec, bsize, pos);
473 static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
478 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
479 if (bio_rw(bio) == WRITE)
480 ret = lo_send(lo, bio, pos);
482 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
486 #define __lo_throttle(wq, lock, condition) \
488 DEFINE_WAIT(__wait); \
490 prepare_to_wait((wq), &__wait, TASK_UNINTERRUPTIBLE); \
493 spin_unlock_irq((lock)); \
494 wake_up(&lo->lo_event); \
496 spin_lock_irq((lock)); \
498 finish_wait((wq), &__wait); \
501 #define LO_BIO_THROTTLE 128
502 #define LO_BIO_THROTTLE_LOW (LO_BIO_THROTTLE / 2)
505 * A normal block device will throttle on request allocation. Do the same
506 * for loop to prevent millions of bio's queued internally.
508 static void loop_bio_throttle(struct loop_device *lo, struct bio *bio)
510 __lo_throttle(&lo->lo_bio_wait, &lo->lo_lock,
511 lo->lo_bio_cnt < LO_BIO_THROTTLE);
514 static void loop_bio_timer(unsigned long data)
516 struct loop_device *lo = (struct loop_device *) data;
518 wake_up(&lo->lo_event);
522 * Add bio to back of pending list and wakeup thread
524 static void loop_add_bio(struct loop_device *lo, struct bio *bio)
526 loop_bio_throttle(lo, bio);
528 if (lo->lo_biotail) {
529 lo->lo_biotail->bi_next = bio;
530 lo->lo_biotail = bio;
532 lo->lo_bio = lo->lo_biotail = bio;
537 if (lo->lo_bio_cnt > 8) {
538 if (timer_pending(&lo->lo_bio_timer))
539 del_timer(&lo->lo_bio_timer);
541 if (waitqueue_active(&lo->lo_event))
542 wake_up(&lo->lo_event);
543 } else if (!timer_pending(&lo->lo_bio_timer)) {
544 lo->lo_bio_timer.expires = jiffies + 1;
545 add_timer(&lo->lo_bio_timer);
550 * Grab first pending buffer
552 static struct bio *loop_get_bio(struct loop_device *lo)
556 if ((bio = lo->lo_bio)) {
557 if (bio == lo->lo_biotail)
558 lo->lo_biotail = NULL;
559 lo->lo_bio = bio->bi_next;
566 static void loop_exit_fastfs(struct loop_device *lo)
568 struct inode *inode = lo->lo_backing_file->f_mapping->host;
571 * drop what page cache we instantiated filling holes
573 invalidate_inode_pages2(lo->lo_backing_file->f_mapping);
575 blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_NONE, NULL);
577 mutex_lock(&inode->i_mutex);
578 inode->i_flags &= ~S_SWAPFILE;
579 mutex_unlock(&inode->i_mutex);
582 static inline u64 lo_bio_offset(struct loop_device *lo, struct bio *bio)
584 return (u64)lo->lo_offset + ((u64)bio->bi_sector << 9);
588 * Find extent mapping this lo device block to the file block on the real
591 static struct extent_map *loop_lookup_extent(struct loop_device *lo,
592 u64 offset, gfp_t gfp_mask)
594 struct address_space *mapping = lo->lo_backing_file->f_mapping;
595 u64 len = 1 << lo->blkbits;
597 return mapping->a_ops->map_extent(mapping, NULL, 0, offset, len, 0,
601 static void end_bio_hole_filling(struct bio *bio, int err)
603 struct address_space *mapping = bio->bi_bdev->bd_inode->i_mapping;
604 struct bio *orig_bio = bio->bi_private;
606 if (mapping->a_ops->extent_io_complete) {
607 u64 start = orig_bio->bi_sector << 9;
608 u64 len = bio->bi_size;
610 mapping->a_ops->extent_io_complete(mapping, start, len);
614 bio_endio(orig_bio, err);
617 static void fill_extent_hole(struct loop_device *lo, struct bio *bio)
619 struct address_space *mapping = lo->lo_backing_file->f_mapping;
621 struct extent_map *em;
622 u64 len = bio->bi_size;
623 u64 start = lo_bio_offset(lo, bio);
628 * change the sector so we can find the correct file offset in our
631 bio->bi_sector = start >> 9;
633 mutex_lock(&mapping->host->i_mutex);
635 em = mapping->a_ops->map_extent(mapping, NULL, 0,
636 start, len, 1, GFP_KERNEL);
637 mark_inode_dirty(mapping->host);
638 mutex_unlock(&mapping->host->i_mutex);
640 if (em && !IS_ERR(em)) {
641 disk_block = em->block_start;
642 extent_off = start - em->start;
645 * bio_clone() is mempool backed, so if __GFP_WAIT is set
648 new_bio = bio_clone(bio, GFP_NOIO);
649 new_bio->bi_sector = (disk_block + extent_off) >> 9;
650 new_bio->bi_bdev = em->bdev;
651 new_bio->bi_private = bio;
652 new_bio->bi_size = bio->bi_size;
653 new_bio->bi_end_io = end_bio_hole_filling;
656 generic_make_request(new_bio);
658 bio_endio(bio, -EIO);
661 static void loop_bio_destructor(struct bio *bio)
663 struct completion *c = (struct completion *) bio->bi_flags;
669 * Alloc a hint bio to tell the loop thread to read file blocks for a given
672 static void loop_schedule_extent_mapping(struct loop_device *lo,
675 DECLARE_COMPLETION_ONSTACK(comp);
676 struct bio *bio, stackbio;
679 bio = bio_alloc(GFP_ATOMIC, 0);
683 bio->bi_destructor = loop_bio_destructor;
684 bio->bi_flags = (unsigned long) ∁
688 bio->bi_rw = LOOP_EXTENT_RW_MAGIC;
689 bio->bi_private = old_bio;
691 loop_add_bio(lo, bio);
694 spin_unlock_irq(&lo->lo_lock);
695 wait_for_completion(&comp);
696 spin_lock_irq(&lo->lo_lock);
700 static void loop_handle_extent_hole(struct loop_device *lo, struct bio *bio,
704 * for a read, just zero the data and end the io
706 if (bio_data_dir(bio) == READ) {
707 struct bio_vec *bvec;
711 bio_for_each_segment(bvec, bio, i) {
712 char *dst = bvec_kmap_irq(bvec, &flags);
714 memset(dst, 0, bvec->bv_len);
715 bvec_kunmap_irq(dst, &flags);
720 * let the page cache handling path do this bio, and then
721 * lookup the mapped blocks after the io has been issued to
722 * instantiate extents.
725 loop_add_bio(lo, bio);
727 fill_extent_hole(lo, bio);
731 static inline int lo_is_switch_bio(struct bio *bio)
733 return !bio->bi_bdev && bio->bi_rw == LOOP_SWITCH_RW_MAGIC;
736 static inline int lo_is_map_bio(struct bio *bio)
738 return !bio->bi_bdev && bio->bi_rw == LOOP_EXTENT_RW_MAGIC;
741 static int __loop_redirect_bio(struct loop_device *lo, struct extent_map *em,
742 struct bio *bio, int sync)
750 if (em->block_start == EXTENT_MAP_HOLE) {
751 loop_handle_extent_hole(lo, bio, sync);
757 * not a hole, redirect
759 disk_block = em->block_start;
760 extent_off = lo_bio_offset(lo, bio) - em->start;
761 bio->bi_bdev = em->bdev;
762 bio->bi_sector = (disk_block + extent_off) >> 9;
768 * Change mapping of the bio, so that it points to the real bdev and offset
770 static int loop_redirect_bio(struct loop_device *lo, struct bio *bio)
772 u64 start = lo_bio_offset(lo, bio);
773 struct extent_map *em;
775 em = loop_lookup_extent(lo, start, GFP_ATOMIC);
777 bio_endio(bio, PTR_ERR(em));
780 loop_schedule_extent_mapping(lo, bio);
784 return __loop_redirect_bio(lo, em, bio, 0);
788 * Wait on bio's on our list to complete before sending a barrier bio
789 * to the below device. Called with lo_lock held.
791 static void loop_wait_on_bios(struct loop_device *lo)
793 __lo_throttle(&lo->lo_bio_wait, &lo->lo_lock, !lo->lo_bio);
796 static void loop_wait_on_switch(struct loop_device *lo)
798 __lo_throttle(&lo->lo_bio_wait, &lo->lo_lock, !lo->lo_switch);
801 static int loop_make_request(struct request_queue *q, struct bio *old_bio)
803 struct loop_device *lo = q->queuedata;
804 int rw = bio_rw(old_bio);
809 BUG_ON(!lo || (rw != READ && rw != WRITE));
811 spin_lock_irq(&lo->lo_lock);
812 if (lo->lo_state != Lo_bound)
814 if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
816 if (lo->lo_flags & LO_FLAGS_FASTFS) {
818 * If we get a barrier bio, then we just need to wait for
819 * existing bio's to be complete. This can only happen
820 * on the 'new' extent mapped loop, since that is the only
821 * one that supports barriers.
823 if (bio_barrier(old_bio))
824 loop_wait_on_bios(lo);
827 * if file switch is in progress, wait for it to complete
829 if (!lo_is_switch_bio(old_bio) && lo->lo_switch)
830 loop_wait_on_switch(lo);
832 if (loop_redirect_bio(lo, old_bio))
836 loop_add_bio(lo, old_bio);
837 spin_unlock_irq(&lo->lo_lock);
841 bio_io_error(old_bio);
843 spin_unlock_irq(&lo->lo_lock);
847 spin_unlock_irq(&lo->lo_lock);
852 * kick off io on the underlying address space
854 static void loop_unplug(struct request_queue *q)
856 struct loop_device *lo = q->queuedata;
858 queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
859 blk_run_address_space(lo->lo_backing_file->f_mapping);
862 static void loop_unplug_fastfs(struct request_queue *q)
864 struct loop_device *lo = q->queuedata;
865 struct request_queue *rq = bdev_get_queue(lo->fs_bdev);
868 local_irq_save(flags);
870 if (blk_remove_plug(q) && rq->unplug_fn)
873 local_irq_restore(flags);
876 struct switch_request {
878 struct completion wait;
881 static void do_loop_switch(struct loop_device *, struct switch_request *);
882 static int loop_init_fastfs(struct loop_device *);
884 static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
886 if (lo_is_map_bio(bio)) {
887 struct bio *org_bio = bio->bi_private;
888 struct extent_map *em;
890 em = loop_lookup_extent(lo, lo_bio_offset(lo, org_bio),
893 if (__loop_redirect_bio(lo, em, org_bio, 1))
894 generic_make_request(org_bio);
897 } else if (lo_is_switch_bio(bio)) {
898 do_loop_switch(lo, bio->bi_private);
901 if (lo->lo_flags & LO_FLAGS_FASTFS) {
902 /* we only get here when filling holes */
903 fill_extent_hole(lo, bio);
905 int ret = do_bio_filebacked(lo, bio);
913 * worker thread that handles reads/writes to file backed loop devices,
914 * to avoid blocking in our make_request_fn. it also does loop decrypting
915 * on reads for block backed loop, as that is too heavy to do from
916 * b_end_io context where irqs may be disabled.
918 * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before
919 * calling kthread_stop(). Therefore once kthread_should_stop() is
920 * true, make_request will not place any more requests. Therefore
921 * once kthread_should_stop() is true and lo_bio is NULL, we are
922 * done with the loop.
924 static int loop_thread(void *data)
926 struct loop_device *lo = data;
929 set_user_nice(current, -20);
931 while (!kthread_should_stop() || lo->lo_bio) {
933 wait_event_interruptible(lo->lo_event,
934 lo->lo_bio || kthread_should_stop());
937 spin_lock_irq(&lo->lo_lock);
938 bio = loop_get_bio(lo);
939 spin_unlock_irq(&lo->lo_lock);
943 loop_handle_bio(lo, bio);
945 spin_lock_irq(&lo->lo_lock);
946 if (--lo->lo_bio_cnt < LO_BIO_THROTTLE_LOW || !lo->lo_bio)
947 wake_up(&lo->lo_bio_wait);
948 spin_unlock_irq(&lo->lo_lock);
955 * loop_switch performs the hard work of switching a backing store.
956 * First it needs to flush existing IO, it does this by sending a magic
957 * BIO down the pipe. The completion of this BIO does the actual switch.
959 static int loop_switch(struct loop_device *lo, struct file *file)
961 struct switch_request w;
962 struct bio *bio = bio_alloc(GFP_KERNEL, 0);
965 init_completion(&w.wait);
967 bio->bi_private = &w;
969 bio->bi_rw = LOOP_SWITCH_RW_MAGIC;
971 loop_make_request(lo->lo_queue, bio);
972 wait_for_completion(&w.wait);
977 * Helper to flush the IOs in loop, but keeping loop thread running
979 static int loop_flush(struct loop_device *lo)
981 /* loop not yet configured, no running thread, nothing to flush */
985 return loop_switch(lo, NULL);
989 * Do the actual switch; called from the BIO completion routine
991 static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
993 struct file *file = p->file;
994 struct file *old_file = lo->lo_backing_file;
995 struct address_space *mapping;
996 const int fastfs = lo->lo_flags & LO_FLAGS_FASTFS;
998 /* if no new file, only flush of queued bios requested */
1003 loop_exit_fastfs(lo);
1005 mapping = file->f_mapping;
1006 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
1007 lo->lo_backing_file = file;
1008 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
1009 mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
1010 lo->old_gfp_mask = mapping_gfp_mask(mapping);
1011 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1014 loop_init_fastfs(lo);
1017 wake_up(&lo->lo_bio_wait);
1024 * loop_change_fd switched the backing store of a loopback device to
1025 * a new file. This is useful for operating system installers to free up
1026 * the original file and in High Availability environments to switch to
1027 * an alternative location for the content in case of server meltdown.
1028 * This can only work if the loop device is used read-only, and if the
1029 * new backing store is the same size and type as the old backing store.
1031 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
1034 struct file *file, *old_file;
1035 struct inode *inode;
1039 if (lo->lo_state != Lo_bound)
1042 /* the loop device has to be read-only */
1044 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
1052 inode = file->f_mapping->host;
1053 old_file = lo->lo_backing_file;
1057 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
1060 /* new backing store needs to support loop (eg splice_read) */
1061 if (!inode->i_fop->splice_read)
1064 /* size of the new backing store needs to be the same */
1065 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
1068 /* and ... switch */
1069 error = loop_switch(lo, file);
1075 ioctl_by_bdev(bdev, BLKRRPART, 0);
1085 * See if adding this bvec would cause us to spill into a new extent. If so,
1086 * disallow the add to start a new bio. This ensures that the bio we receive
1087 * in loop_make_request() never spans two extents or more.
1089 static int loop_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm,
1090 struct bio_vec *bvec)
1092 struct loop_device *lo = q->queuedata;
1093 struct extent_map *em;
1099 return bvec->bv_len;
1101 start = (u64) lo->lo_offset + ((u64)bvm->bi_sector << 9);
1102 len = bvm->bi_size + bvec->bv_len;
1105 em = loop_lookup_extent(lo, start, GFP_ATOMIC);
1106 if (em && !IS_ERR(em)) {
1108 * have extent, disallow if outside that extent
1110 if (start + len > em->start + em->len || start < em->start)
1113 free_extent_map(em);
1121 * Initialize the members pertaining to extent mapping. We will populate
1122 * the tree lazily on demand, as a full scan of a big file can take some
1125 static int loop_init_fastfs(struct loop_device *lo)
1127 struct file *file = lo->lo_backing_file;
1128 struct inode *inode = file->f_mapping->host;
1129 struct request_queue *fs_q;
1132 if (!S_ISREG(inode->i_mode))
1136 * Need a working extent_map
1138 if (inode->i_mapping->a_ops->map_extent == NULL)
1141 * invalidate all page cache belonging to this file, it could become
1142 * stale when we directly overwrite blocks.
1144 ret = invalidate_inode_pages2(file->f_mapping);
1149 * disable truncate on this file
1151 mutex_lock(&inode->i_mutex);
1152 inode->i_flags |= S_SWAPFILE;
1153 mutex_unlock(&inode->i_mutex);
1155 lo->blkbits = inode->i_blkbits;
1156 lo->fs_bdev = file->f_mapping->host->i_sb->s_bdev;
1157 lo->lo_flags |= LO_FLAGS_FASTFS;
1158 lo->lo_queue->unplug_fn = loop_unplug_fastfs;
1160 blk_queue_merge_bvec(lo->lo_queue, loop_merge_bvec);
1161 blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
1163 fs_q = bdev_get_queue(lo->fs_bdev);
1164 blk_queue_stack_limits(lo->lo_queue, fs_q);
1166 printk(KERN_INFO "loop%d: fast redirect\n", lo->lo_number);
1170 static inline int is_loop_device(struct file *file)
1172 struct inode *i = file->f_mapping->host;
1174 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
1177 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
1178 struct block_device *bdev, unsigned int arg)
1180 struct file *file, *f;
1181 struct inode *inode;
1182 struct address_space *mapping;
1183 unsigned lo_blocksize;
1188 /* This is safe, since we have a reference from open(). */
1189 __module_get(THIS_MODULE);
1197 if (lo->lo_state != Lo_unbound)
1200 /* Avoid recursion */
1202 while (is_loop_device(f)) {
1203 struct loop_device *l;
1205 if (f->f_mapping->host->i_bdev == bdev)
1208 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
1209 if (l->lo_state == Lo_unbound) {
1213 f = l->lo_backing_file;
1216 mapping = file->f_mapping;
1217 inode = mapping->host;
1220 if (!(file->f_mode & FMODE_WRITE))
1221 lo_flags |= LO_FLAGS_READ_ONLY;
1224 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1225 const struct address_space_operations *aops = mapping->a_ops;
1227 * If we can't read - sorry. If we only can't write - well,
1228 * it's going to be read-only.
1230 if (!file->f_op->splice_read)
1232 if (aops->write_begin)
1233 lo_flags |= LO_FLAGS_USE_AOPS;
1234 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
1235 lo_flags |= LO_FLAGS_READ_ONLY;
1237 lo_blocksize = S_ISBLK(inode->i_mode) ?
1238 inode->i_bdev->bd_block_size : PAGE_SIZE;
1245 size = get_loop_size(lo, file);
1247 if ((loff_t)(sector_t)size != size) {
1252 if (!(mode & FMODE_WRITE))
1253 lo_flags |= LO_FLAGS_READ_ONLY;
1255 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
1257 lo->lo_blocksize = lo_blocksize;
1258 lo->lo_device = bdev;
1259 lo->lo_flags = lo_flags;
1260 lo->lo_backing_file = file;
1261 lo->transfer = transfer_none;
1263 lo->lo_sizelimit = 0;
1264 lo->old_gfp_mask = mapping_gfp_mask(mapping);
1265 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1267 lo->lo_bio = lo->lo_biotail = NULL;
1270 * set queue make_request_fn, and add limits based on lower level
1273 blk_queue_make_request(lo->lo_queue, loop_make_request);
1274 lo->lo_queue->queuedata = lo;
1275 lo->lo_queue->unplug_fn = loop_unplug;
1277 set_capacity(lo->lo_disk, size);
1278 bd_set_size(bdev, size << 9);
1280 set_blocksize(bdev, lo_blocksize);
1283 * This needs to be done after setup with another ioctl,
1284 * not automatically like this.
1286 loop_init_fastfs(lo);
1288 lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
1290 if (IS_ERR(lo->lo_thread)) {
1291 error = PTR_ERR(lo->lo_thread);
1294 lo->lo_state = Lo_bound;
1295 wake_up_process(lo->lo_thread);
1297 ioctl_by_bdev(bdev, BLKRRPART, 0);
1301 lo->lo_thread = NULL;
1302 lo->lo_device = NULL;
1303 lo->lo_backing_file = NULL;
1305 set_capacity(lo->lo_disk, 0);
1306 invalidate_bdev(bdev);
1307 bd_set_size(bdev, 0);
1308 mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
1309 lo->lo_state = Lo_unbound;
1313 /* This is safe: open() is still holding a reference. */
1314 module_put(THIS_MODULE);
1319 loop_release_xfer(struct loop_device *lo)
1322 struct loop_func_table *xfer = lo->lo_encryption;
1326 err = xfer->release(lo);
1327 lo->transfer = NULL;
1328 lo->lo_encryption = NULL;
1329 module_put(xfer->owner);
1335 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
1336 const struct loop_info64 *i)
1341 struct module *owner = xfer->owner;
1343 if (!try_module_get(owner))
1346 err = xfer->init(lo, i);
1350 lo->lo_encryption = xfer;
1355 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
1357 struct file *filp = lo->lo_backing_file;
1358 gfp_t gfp = lo->old_gfp_mask;
1360 if (lo->lo_state != Lo_bound)
1363 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
1369 spin_lock_irq(&lo->lo_lock);
1370 lo->lo_state = Lo_rundown;
1371 spin_unlock_irq(&lo->lo_lock);
1373 kthread_stop(lo->lo_thread);
1375 if (lo->lo_flags & LO_FLAGS_FASTFS)
1376 loop_exit_fastfs(lo);
1378 lo->lo_queue->unplug_fn = NULL;
1379 lo->lo_backing_file = NULL;
1381 loop_release_xfer(lo);
1382 lo->transfer = NULL;
1384 lo->lo_device = NULL;
1385 lo->lo_encryption = NULL;
1387 lo->lo_sizelimit = 0;
1388 lo->lo_encrypt_key_size = 0;
1390 lo->lo_thread = NULL;
1391 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1392 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1393 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1395 invalidate_bdev(bdev);
1396 set_capacity(lo->lo_disk, 0);
1398 bd_set_size(bdev, 0);
1399 mapping_set_gfp_mask(filp->f_mapping, gfp);
1400 lo->lo_state = Lo_unbound;
1402 /* This is safe: open() is still holding a reference. */
1403 module_put(THIS_MODULE);
1405 ioctl_by_bdev(bdev, BLKRRPART, 0);
1410 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1413 struct loop_func_table *xfer;
1414 uid_t uid = current_uid();
1416 if (lo->lo_encrypt_key_size &&
1417 lo->lo_key_owner != uid &&
1418 !capable(CAP_SYS_ADMIN))
1420 if (lo->lo_state != Lo_bound)
1422 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1425 err = loop_release_xfer(lo);
1429 if (info->lo_encrypt_type) {
1430 unsigned int type = info->lo_encrypt_type;
1432 if (lo->lo_flags & LO_FLAGS_FASTFS)
1435 if (type >= MAX_LO_CRYPT)
1437 xfer = xfer_funcs[type];
1444 * for remaps, offset must be a multiple of full blocks
1446 if ((lo->lo_flags & LO_FLAGS_FASTFS) &&
1447 (((1 << lo->blkbits) - 1) & info->lo_offset))
1450 err = loop_init_xfer(lo, xfer, info);
1454 if (lo->lo_offset != info->lo_offset ||
1455 lo->lo_sizelimit != info->lo_sizelimit) {
1456 lo->lo_offset = info->lo_offset;
1457 lo->lo_sizelimit = info->lo_sizelimit;
1458 if (figure_loop_size(lo))
1462 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1463 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1464 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1465 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1469 lo->transfer = xfer->transfer;
1470 lo->ioctl = xfer->ioctl;
1472 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1473 (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1474 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1476 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1477 lo->lo_init[0] = info->lo_init[0];
1478 lo->lo_init[1] = info->lo_init[1];
1479 if (info->lo_encrypt_key_size) {
1480 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1481 info->lo_encrypt_key_size);
1482 lo->lo_key_owner = uid;
1489 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1491 struct file *file = lo->lo_backing_file;
1495 if (lo->lo_state != Lo_bound)
1497 error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
1500 memset(info, 0, sizeof(*info));
1501 info->lo_number = lo->lo_number;
1502 info->lo_device = huge_encode_dev(stat.dev);
1503 info->lo_inode = stat.ino;
1504 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1505 info->lo_offset = lo->lo_offset;
1506 info->lo_sizelimit = lo->lo_sizelimit;
1507 info->lo_flags = lo->lo_flags;
1508 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1509 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1510 info->lo_encrypt_type =
1511 lo->lo_encryption ? lo->lo_encryption->number : 0;
1512 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1513 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1514 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1515 lo->lo_encrypt_key_size);
1521 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1523 memset(info64, 0, sizeof(*info64));
1524 info64->lo_number = info->lo_number;
1525 info64->lo_device = info->lo_device;
1526 info64->lo_inode = info->lo_inode;
1527 info64->lo_rdevice = info->lo_rdevice;
1528 info64->lo_offset = info->lo_offset;
1529 info64->lo_sizelimit = 0;
1530 info64->lo_encrypt_type = info->lo_encrypt_type;
1531 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1532 info64->lo_flags = info->lo_flags;
1533 info64->lo_init[0] = info->lo_init[0];
1534 info64->lo_init[1] = info->lo_init[1];
1535 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1536 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1538 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1539 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1543 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1545 memset(info, 0, sizeof(*info));
1546 info->lo_number = info64->lo_number;
1547 info->lo_device = info64->lo_device;
1548 info->lo_inode = info64->lo_inode;
1549 info->lo_rdevice = info64->lo_rdevice;
1550 info->lo_offset = info64->lo_offset;
1551 info->lo_encrypt_type = info64->lo_encrypt_type;
1552 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1553 info->lo_flags = info64->lo_flags;
1554 info->lo_init[0] = info64->lo_init[0];
1555 info->lo_init[1] = info64->lo_init[1];
1556 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1557 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1559 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1560 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1562 /* error in case values were truncated */
1563 if (info->lo_device != info64->lo_device ||
1564 info->lo_rdevice != info64->lo_rdevice ||
1565 info->lo_inode != info64->lo_inode ||
1566 info->lo_offset != info64->lo_offset)
1573 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1575 struct loop_info info;
1576 struct loop_info64 info64;
1578 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1580 loop_info64_from_old(&info, &info64);
1581 return loop_set_status(lo, &info64);
1585 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1587 struct loop_info64 info64;
1589 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1591 return loop_set_status(lo, &info64);
1595 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1596 struct loop_info info;
1597 struct loop_info64 info64;
1603 err = loop_get_status(lo, &info64);
1605 err = loop_info64_to_old(&info64, &info);
1606 if (!err && copy_to_user(arg, &info, sizeof(info)))
1613 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1614 struct loop_info64 info64;
1620 err = loop_get_status(lo, &info64);
1621 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1627 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1628 unsigned int cmd, unsigned long arg)
1630 struct loop_device *lo = bdev->bd_disk->private_data;
1633 mutex_lock(&lo->lo_ctl_mutex);
1636 err = loop_set_fd(lo, mode, bdev, arg);
1638 case LOOP_CHANGE_FD:
1639 err = loop_change_fd(lo, bdev, arg);
1642 err = loop_clr_fd(lo, bdev);
1644 case LOOP_SET_STATUS:
1645 err = loop_set_status_old(lo, (struct loop_info __user *) arg);
1647 case LOOP_GET_STATUS:
1648 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1650 case LOOP_SET_STATUS64:
1651 err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
1653 case LOOP_GET_STATUS64:
1654 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1656 case LOOP_SET_FASTFS:
1657 err = loop_init_fastfs(lo);
1660 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1662 mutex_unlock(&lo->lo_ctl_mutex);
1666 #ifdef CONFIG_COMPAT
1667 struct compat_loop_info {
1668 compat_int_t lo_number; /* ioctl r/o */
1669 compat_dev_t lo_device; /* ioctl r/o */
1670 compat_ulong_t lo_inode; /* ioctl r/o */
1671 compat_dev_t lo_rdevice; /* ioctl r/o */
1672 compat_int_t lo_offset;
1673 compat_int_t lo_encrypt_type;
1674 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1675 compat_int_t lo_flags; /* ioctl r/o */
1676 char lo_name[LO_NAME_SIZE];
1677 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1678 compat_ulong_t lo_init[2];
1683 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1684 * - noinlined to reduce stack space usage in main part of driver
1687 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1688 struct loop_info64 *info64)
1690 struct compat_loop_info info;
1692 if (copy_from_user(&info, arg, sizeof(info)))
1695 memset(info64, 0, sizeof(*info64));
1696 info64->lo_number = info.lo_number;
1697 info64->lo_device = info.lo_device;
1698 info64->lo_inode = info.lo_inode;
1699 info64->lo_rdevice = info.lo_rdevice;
1700 info64->lo_offset = info.lo_offset;
1701 info64->lo_sizelimit = 0;
1702 info64->lo_encrypt_type = info.lo_encrypt_type;
1703 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1704 info64->lo_flags = info.lo_flags;
1705 info64->lo_init[0] = info.lo_init[0];
1706 info64->lo_init[1] = info.lo_init[1];
1707 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1708 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1710 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1711 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1716 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1717 * - noinlined to reduce stack space usage in main part of driver
1720 loop_info64_to_compat(const struct loop_info64 *info64,
1721 struct compat_loop_info __user *arg)
1723 struct compat_loop_info info;
1725 memset(&info, 0, sizeof(info));
1726 info.lo_number = info64->lo_number;
1727 info.lo_device = info64->lo_device;
1728 info.lo_inode = info64->lo_inode;
1729 info.lo_rdevice = info64->lo_rdevice;
1730 info.lo_offset = info64->lo_offset;
1731 info.lo_encrypt_type = info64->lo_encrypt_type;
1732 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1733 info.lo_flags = info64->lo_flags;
1734 info.lo_init[0] = info64->lo_init[0];
1735 info.lo_init[1] = info64->lo_init[1];
1736 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1737 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1739 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1740 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1742 /* error in case values were truncated */
1743 if (info.lo_device != info64->lo_device ||
1744 info.lo_rdevice != info64->lo_rdevice ||
1745 info.lo_inode != info64->lo_inode ||
1746 info.lo_offset != info64->lo_offset ||
1747 info.lo_init[0] != info64->lo_init[0] ||
1748 info.lo_init[1] != info64->lo_init[1])
1751 if (copy_to_user(arg, &info, sizeof(info)))
1757 loop_set_status_compat(struct loop_device *lo,
1758 const struct compat_loop_info __user *arg)
1760 struct loop_info64 info64;
1763 ret = loop_info64_from_compat(arg, &info64);
1766 return loop_set_status(lo, &info64);
1770 loop_get_status_compat(struct loop_device *lo,
1771 struct compat_loop_info __user *arg)
1773 struct loop_info64 info64;
1779 err = loop_get_status(lo, &info64);
1781 err = loop_info64_to_compat(&info64, arg);
1785 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1786 unsigned int cmd, unsigned long arg)
1788 struct loop_device *lo = bdev->bd_disk->private_data;
1792 case LOOP_SET_STATUS:
1793 mutex_lock(&lo->lo_ctl_mutex);
1794 err = loop_set_status_compat(
1795 lo, (const struct compat_loop_info __user *) arg);
1796 mutex_unlock(&lo->lo_ctl_mutex);
1798 case LOOP_GET_STATUS:
1799 mutex_lock(&lo->lo_ctl_mutex);
1800 err = loop_get_status_compat(
1801 lo, (struct compat_loop_info __user *) arg);
1802 mutex_unlock(&lo->lo_ctl_mutex);
1805 case LOOP_GET_STATUS64:
1806 case LOOP_SET_STATUS64:
1807 arg = (unsigned long) compat_ptr(arg);
1809 case LOOP_CHANGE_FD:
1810 err = lo_ioctl(bdev, mode, cmd, arg);
1820 static int lo_open(struct block_device *bdev, fmode_t mode)
1822 struct loop_device *lo = bdev->bd_disk->private_data;
1824 mutex_lock(&lo->lo_ctl_mutex);
1826 mutex_unlock(&lo->lo_ctl_mutex);
1831 static int lo_release(struct gendisk *disk, fmode_t mode)
1833 struct loop_device *lo = disk->private_data;
1835 mutex_lock(&lo->lo_ctl_mutex);
1837 if (--lo->lo_refcnt)
1840 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1842 * In autoclear mode, stop the loop thread
1843 * and remove configuration after last close.
1845 loop_clr_fd(lo, NULL);
1848 * Otherwise keep thread (if running) and config,
1849 * but flush possible ongoing bios in thread.
1855 mutex_unlock(&lo->lo_ctl_mutex);
1860 static struct block_device_operations lo_fops = {
1861 .owner = THIS_MODULE,
1863 .release = lo_release,
1865 #ifdef CONFIG_COMPAT
1866 .compat_ioctl = lo_compat_ioctl,
1871 * And now the modules code and kernel interface.
1873 static int max_loop;
1874 module_param(max_loop, int, 0);
1875 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1876 module_param(max_part, int, 0);
1877 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1878 MODULE_LICENSE("GPL");
1879 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1881 int loop_register_transfer(struct loop_func_table *funcs)
1883 unsigned int n = funcs->number;
1885 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1887 xfer_funcs[n] = funcs;
1891 int loop_unregister_transfer(int number)
1893 unsigned int n = number;
1894 struct loop_device *lo;
1895 struct loop_func_table *xfer;
1897 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1900 xfer_funcs[n] = NULL;
1902 list_for_each_entry(lo, &loop_devices, lo_list) {
1903 mutex_lock(&lo->lo_ctl_mutex);
1905 if (lo->lo_encryption == xfer)
1906 loop_release_xfer(lo);
1908 mutex_unlock(&lo->lo_ctl_mutex);
1914 EXPORT_SYMBOL(loop_register_transfer);
1915 EXPORT_SYMBOL(loop_unregister_transfer);
1917 static struct loop_device *loop_alloc(int i)
1919 struct loop_device *lo;
1920 struct gendisk *disk;
1922 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1926 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1930 disk = lo->lo_disk = alloc_disk(1 << part_shift);
1932 goto out_free_queue;
1934 mutex_init(&lo->lo_ctl_mutex);
1936 lo->lo_thread = NULL;
1937 init_waitqueue_head(&lo->lo_event);
1938 init_waitqueue_head(&lo->lo_bio_wait);
1939 setup_timer(&lo->lo_bio_timer, loop_bio_timer, (unsigned long) lo);
1940 spin_lock_init(&lo->lo_lock);
1941 disk->major = LOOP_MAJOR;
1942 disk->first_minor = i << part_shift;
1943 disk->fops = &lo_fops;
1944 disk->private_data = lo;
1945 disk->queue = lo->lo_queue;
1946 sprintf(disk->disk_name, "loop%d", i);
1950 blk_cleanup_queue(lo->lo_queue);
1957 static void loop_free(struct loop_device *lo)
1959 blk_cleanup_queue(lo->lo_queue);
1960 put_disk(lo->lo_disk);
1961 list_del(&lo->lo_list);
1965 static struct loop_device *loop_init_one(int i)
1967 struct loop_device *lo;
1969 list_for_each_entry(lo, &loop_devices, lo_list) {
1970 if (lo->lo_number == i)
1976 add_disk(lo->lo_disk);
1977 list_add_tail(&lo->lo_list, &loop_devices);
1982 static void loop_del_one(struct loop_device *lo)
1984 del_gendisk(lo->lo_disk);
1988 static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1990 struct loop_device *lo;
1991 struct kobject *kobj;
1993 mutex_lock(&loop_devices_mutex);
1994 lo = loop_init_one(dev & MINORMASK);
1995 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
1996 mutex_unlock(&loop_devices_mutex);
2002 static int __init loop_init(void)
2005 unsigned long range;
2006 struct loop_device *lo, *next;
2009 * loop module now has a feature to instantiate underlying device
2010 * structure on-demand, provided that there is an access dev node.
2011 * However, this will not work well with user space tool that doesn't
2012 * know about such "feature". In order to not break any existing
2013 * tool, we do the following:
2015 * (1) if max_loop is specified, create that many upfront, and this
2016 * also becomes a hard limit.
2017 * (2) if max_loop is not specified, create 8 loop device on module
2018 * load, user can further extend loop device by create dev node
2019 * themselves and have kernel automatically instantiate actual
2025 part_shift = fls(max_part);
2027 if (max_loop > 1UL << (MINORBITS - part_shift))
2035 range = 1UL << (MINORBITS - part_shift);
2038 if (register_blkdev(LOOP_MAJOR, "loop"))
2041 for (i = 0; i < nr; i++) {
2045 list_add_tail(&lo->lo_list, &loop_devices);
2048 /* point of no return */
2050 list_for_each_entry(lo, &loop_devices, lo_list)
2051 add_disk(lo->lo_disk);
2053 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
2054 THIS_MODULE, loop_probe, NULL, NULL);
2056 printk(KERN_INFO "loop: module loaded\n");
2060 printk(KERN_INFO "loop: out of memory\n");
2062 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
2065 unregister_blkdev(LOOP_MAJOR, "loop");
2069 static void __exit loop_exit(void)
2071 unsigned long range;
2072 struct loop_device *lo, *next;
2074 range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
2076 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
2079 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
2080 unregister_blkdev(LOOP_MAJOR, "loop");
2083 module_init(loop_init);
2084 module_exit(loop_exit);
2087 static int __init max_loop_setup(char *str)
2089 max_loop = simple_strtol(str, NULL, 0);
2093 __setup("max_loop=", max_loop_setup);