2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/loop.h>
67 #include <linux/compat.h>
68 #include <linux/suspend.h>
69 #include <linux/freezer.h>
70 #include <linux/writeback.h>
71 #include <linux/buffer_head.h> /* for invalidate_bdev() */
72 #include <linux/completion.h>
73 #include <linux/highmem.h>
74 #include <linux/kthread.h>
75 #include <linux/splice.h>
77 #include <asm/uaccess.h>
79 static LIST_HEAD(loop_devices);
80 static DEFINE_MUTEX(loop_devices_mutex);
83 static int part_shift;
88 static int transfer_none(struct loop_device *lo, int cmd,
89 struct page *raw_page, unsigned raw_off,
90 struct page *loop_page, unsigned loop_off,
91 int size, sector_t real_block)
93 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
94 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
97 memcpy(loop_buf, raw_buf, size);
99 memcpy(raw_buf, loop_buf, size);
101 kunmap_atomic(raw_buf, KM_USER0);
102 kunmap_atomic(loop_buf, KM_USER1);
107 static int transfer_xor(struct loop_device *lo, int cmd,
108 struct page *raw_page, unsigned raw_off,
109 struct page *loop_page, unsigned loop_off,
110 int size, sector_t real_block)
112 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
113 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
114 char *in, *out, *key;
125 key = lo->lo_encrypt_key;
126 keysize = lo->lo_encrypt_key_size;
127 for (i = 0; i < size; i++)
128 *out++ = *in++ ^ key[(i & 511) % keysize];
130 kunmap_atomic(raw_buf, KM_USER0);
131 kunmap_atomic(loop_buf, KM_USER1);
136 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
138 if (unlikely(info->lo_encrypt_key_size <= 0))
143 static struct loop_func_table none_funcs = {
144 .number = LO_CRYPT_NONE,
145 .transfer = transfer_none,
148 static struct loop_func_table xor_funcs = {
149 .number = LO_CRYPT_XOR,
150 .transfer = transfer_xor,
154 /* xfer_funcs[0] is special - its release function is never called */
155 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
160 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
162 loff_t size, offset, loopsize;
164 /* Compute loopsize in bytes */
165 size = i_size_read(file->f_mapping->host);
166 offset = lo->lo_offset;
167 loopsize = size - offset;
168 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
169 loopsize = lo->lo_sizelimit;
172 * Unfortunately, if we want to do I/O on the device,
173 * the number of 512-byte sectors has to fit into a sector_t.
175 return loopsize >> 9;
179 figure_loop_size(struct loop_device *lo)
181 loff_t size = get_loop_size(lo, lo->lo_backing_file);
182 sector_t x = (sector_t)size;
184 if (unlikely((loff_t)x != size))
187 set_capacity(lo->lo_disk, x);
192 lo_do_transfer(struct loop_device *lo, int cmd,
193 struct page *rpage, unsigned roffs,
194 struct page *lpage, unsigned loffs,
195 int size, sector_t rblock)
197 if (unlikely(!lo->transfer))
200 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
204 * do_lo_send_aops - helper for writing data to a loop device
206 * This is the fast version for backing filesystems which implement the address
207 * space operations write_begin and write_end.
209 static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
210 loff_t pos, struct page *unused)
212 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
213 struct address_space *mapping = file->f_mapping;
215 unsigned offset, bv_offs;
218 mutex_lock(&mapping->host->i_mutex);
219 index = pos >> PAGE_CACHE_SHIFT;
220 offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
221 bv_offs = bvec->bv_offset;
225 unsigned size, copied;
230 IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
231 size = PAGE_CACHE_SIZE - offset;
235 ret = pagecache_write_begin(file, mapping, pos, size, 0,
240 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
241 bvec->bv_page, bv_offs, size, IV);
243 if (unlikely(transfer_result))
246 ret = pagecache_write_end(file, mapping, pos, size, copied,
248 if (ret < 0 || ret != copied)
251 if (unlikely(transfer_result))
262 mutex_unlock(&mapping->host->i_mutex);
270 * __do_lo_send_write - helper for writing data to a loop device
272 * This helper just factors out common code between do_lo_send_direct_write()
273 * and do_lo_send_write().
275 static int __do_lo_send_write(struct file *file,
276 u8 *buf, const int len, loff_t pos)
279 mm_segment_t old_fs = get_fs();
282 bw = file->f_op->write(file, buf, len, &pos);
284 if (likely(bw == len))
286 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
287 (unsigned long long)pos, len);
294 * do_lo_send_direct_write - helper for writing data to a loop device
296 * This is the fast, non-transforming version for backing filesystems which do
297 * not implement the address space operations write_begin and write_end.
298 * It uses the write file operation which should be present on all writeable
301 static int do_lo_send_direct_write(struct loop_device *lo,
302 struct bio_vec *bvec, loff_t pos, struct page *page)
304 ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
305 kmap(bvec->bv_page) + bvec->bv_offset,
307 kunmap(bvec->bv_page);
313 * do_lo_send_write - helper for writing data to a loop device
315 * This is the slow, transforming version for filesystems which do not
316 * implement the address space operations write_begin and write_end. It
317 * uses the write file operation which should be present on all writeable
320 * Using fops->write is slower than using aops->{prepare,commit}_write in the
321 * transforming case because we need to double buffer the data as we cannot do
322 * the transformations in place as we do not have direct access to the
323 * destination pages of the backing file.
325 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
326 loff_t pos, struct page *page)
328 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
329 bvec->bv_offset, bvec->bv_len, pos >> 9);
331 return __do_lo_send_write(lo->lo_backing_file,
332 page_address(page), bvec->bv_len,
334 printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
335 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
341 static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
343 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
345 struct bio_vec *bvec;
346 struct page *page = NULL;
349 do_lo_send = do_lo_send_aops;
350 if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
351 do_lo_send = do_lo_send_direct_write;
352 if (lo->transfer != transfer_none) {
353 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
357 do_lo_send = do_lo_send_write;
360 bio_for_each_segment(bvec, bio, i) {
361 ret = do_lo_send(lo, bvec, pos, page);
373 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
378 struct lo_read_data {
379 struct loop_device *lo;
386 lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
387 struct splice_desc *sd)
389 struct lo_read_data *p = sd->u.data;
390 struct loop_device *lo = p->lo;
391 struct page *page = buf->page;
395 ret = buf->ops->confirm(pipe, buf);
399 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
405 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
406 printk(KERN_ERR "loop: transfer error block %ld\n",
411 flush_dcache_page(p->page);
420 lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
422 return __splice_from_pipe(pipe, sd, lo_splice_actor);
426 do_lo_receive(struct loop_device *lo,
427 struct bio_vec *bvec, int bsize, loff_t pos)
429 struct lo_read_data cookie;
430 struct splice_desc sd;
435 cookie.page = bvec->bv_page;
436 cookie.offset = bvec->bv_offset;
437 cookie.bsize = bsize;
440 sd.total_len = bvec->bv_len;
445 file = lo->lo_backing_file;
446 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
455 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
457 struct bio_vec *bvec;
460 bio_for_each_segment(bvec, bio, i) {
461 ret = do_lo_receive(lo, bvec, bsize, pos);
469 static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
474 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
476 if (bio_rw(bio) == WRITE) {
477 bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
478 struct file *file = lo->lo_backing_file;
481 if (unlikely(!file->f_op->fsync)) {
486 ret = vfs_fsync(file, file->f_path.dentry, 0);
493 ret = lo_send(lo, bio, pos);
495 if (barrier && !ret) {
496 ret = vfs_fsync(file, file->f_path.dentry, 0);
501 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
508 * Add bio to back of pending list
510 static void loop_add_bio(struct loop_device *lo, struct bio *bio)
512 bio_list_add(&lo->lo_bio_list, bio);
516 * Grab first pending buffer
518 static struct bio *loop_get_bio(struct loop_device *lo)
520 return bio_list_pop(&lo->lo_bio_list);
523 static int loop_make_request(struct request_queue *q, struct bio *old_bio)
525 struct loop_device *lo = q->queuedata;
526 int rw = bio_rw(old_bio);
531 BUG_ON(!lo || (rw != READ && rw != WRITE));
533 spin_lock_irq(&lo->lo_lock);
534 if (lo->lo_state != Lo_bound)
536 if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
538 loop_add_bio(lo, old_bio);
539 wake_up(&lo->lo_event);
540 spin_unlock_irq(&lo->lo_lock);
544 spin_unlock_irq(&lo->lo_lock);
545 bio_io_error(old_bio);
550 * kick off io on the underlying address space
552 static void loop_unplug(struct request_queue *q)
554 struct loop_device *lo = q->queuedata;
556 queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
557 blk_run_address_space(lo->lo_backing_file->f_mapping);
560 struct switch_request {
562 struct completion wait;
565 static void do_loop_switch(struct loop_device *, struct switch_request *);
567 static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
569 if (unlikely(!bio->bi_bdev)) {
570 do_loop_switch(lo, bio->bi_private);
573 int ret = do_bio_filebacked(lo, bio);
579 * worker thread that handles reads/writes to file backed loop devices,
580 * to avoid blocking in our make_request_fn. it also does loop decrypting
581 * on reads for block backed loop, as that is too heavy to do from
582 * b_end_io context where irqs may be disabled.
584 * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before
585 * calling kthread_stop(). Therefore once kthread_should_stop() is
586 * true, make_request will not place any more requests. Therefore
587 * once kthread_should_stop() is true and lo_bio is NULL, we are
588 * done with the loop.
590 static int loop_thread(void *data)
592 struct loop_device *lo = data;
595 set_user_nice(current, -20);
597 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
599 wait_event_interruptible(lo->lo_event,
600 !bio_list_empty(&lo->lo_bio_list) ||
601 kthread_should_stop());
603 if (bio_list_empty(&lo->lo_bio_list))
605 spin_lock_irq(&lo->lo_lock);
606 bio = loop_get_bio(lo);
607 spin_unlock_irq(&lo->lo_lock);
610 loop_handle_bio(lo, bio);
617 * loop_switch performs the hard work of switching a backing store.
618 * First it needs to flush existing IO, it does this by sending a magic
619 * BIO down the pipe. The completion of this BIO does the actual switch.
621 static int loop_switch(struct loop_device *lo, struct file *file)
623 struct switch_request w;
624 struct bio *bio = bio_alloc(GFP_KERNEL, 0);
627 init_completion(&w.wait);
629 bio->bi_private = &w;
631 loop_make_request(lo->lo_queue, bio);
632 wait_for_completion(&w.wait);
637 * Helper to flush the IOs in loop, but keeping loop thread running
639 static int loop_flush(struct loop_device *lo)
641 /* loop not yet configured, no running thread, nothing to flush */
645 return loop_switch(lo, NULL);
649 * Do the actual switch; called from the BIO completion routine
651 static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
653 struct file *file = p->file;
654 struct file *old_file = lo->lo_backing_file;
655 struct address_space *mapping;
657 /* if no new file, only flush of queued bios requested */
661 mapping = file->f_mapping;
662 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
663 lo->lo_backing_file = file;
664 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
665 mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
666 lo->old_gfp_mask = mapping_gfp_mask(mapping);
667 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
674 * loop_change_fd switched the backing store of a loopback device to
675 * a new file. This is useful for operating system installers to free up
676 * the original file and in High Availability environments to switch to
677 * an alternative location for the content in case of server meltdown.
678 * This can only work if the loop device is used read-only, and if the
679 * new backing store is the same size and type as the old backing store.
681 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
684 struct file *file, *old_file;
689 if (lo->lo_state != Lo_bound)
692 /* the loop device has to be read-only */
694 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
702 inode = file->f_mapping->host;
703 old_file = lo->lo_backing_file;
707 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
710 /* size of the new backing store needs to be the same */
711 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
715 error = loop_switch(lo, file);
721 ioctl_by_bdev(bdev, BLKRRPART, 0);
730 static inline int is_loop_device(struct file *file)
732 struct inode *i = file->f_mapping->host;
734 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
737 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
738 struct block_device *bdev, unsigned int arg)
740 struct file *file, *f;
742 struct address_space *mapping;
743 unsigned lo_blocksize;
748 /* This is safe, since we have a reference from open(). */
749 __module_get(THIS_MODULE);
757 if (lo->lo_state != Lo_unbound)
760 /* Avoid recursion */
762 while (is_loop_device(f)) {
763 struct loop_device *l;
765 if (f->f_mapping->host->i_bdev == bdev)
768 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
769 if (l->lo_state == Lo_unbound) {
773 f = l->lo_backing_file;
776 mapping = file->f_mapping;
777 inode = mapping->host;
779 if (!(file->f_mode & FMODE_WRITE))
780 lo_flags |= LO_FLAGS_READ_ONLY;
783 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
784 const struct address_space_operations *aops = mapping->a_ops;
786 if (aops->write_begin)
787 lo_flags |= LO_FLAGS_USE_AOPS;
788 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
789 lo_flags |= LO_FLAGS_READ_ONLY;
791 lo_blocksize = S_ISBLK(inode->i_mode) ?
792 inode->i_bdev->bd_block_size : PAGE_SIZE;
799 size = get_loop_size(lo, file);
801 if ((loff_t)(sector_t)size != size) {
806 if (!(mode & FMODE_WRITE))
807 lo_flags |= LO_FLAGS_READ_ONLY;
809 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
811 lo->lo_blocksize = lo_blocksize;
812 lo->lo_device = bdev;
813 lo->lo_flags = lo_flags;
814 lo->lo_backing_file = file;
815 lo->transfer = transfer_none;
817 lo->lo_sizelimit = 0;
818 lo->old_gfp_mask = mapping_gfp_mask(mapping);
819 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
821 bio_list_init(&lo->lo_bio_list);
824 * set queue make_request_fn, and add limits based on lower level
827 blk_queue_make_request(lo->lo_queue, loop_make_request);
828 lo->lo_queue->queuedata = lo;
829 lo->lo_queue->unplug_fn = loop_unplug;
831 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
832 blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
834 set_capacity(lo->lo_disk, size);
835 bd_set_size(bdev, size << 9);
837 set_blocksize(bdev, lo_blocksize);
839 lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
841 if (IS_ERR(lo->lo_thread)) {
842 error = PTR_ERR(lo->lo_thread);
845 lo->lo_state = Lo_bound;
846 wake_up_process(lo->lo_thread);
848 ioctl_by_bdev(bdev, BLKRRPART, 0);
852 lo->lo_thread = NULL;
853 lo->lo_device = NULL;
854 lo->lo_backing_file = NULL;
856 set_capacity(lo->lo_disk, 0);
857 invalidate_bdev(bdev);
858 bd_set_size(bdev, 0);
859 mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
860 lo->lo_state = Lo_unbound;
864 /* This is safe: open() is still holding a reference. */
865 module_put(THIS_MODULE);
870 loop_release_xfer(struct loop_device *lo)
873 struct loop_func_table *xfer = lo->lo_encryption;
877 err = xfer->release(lo);
879 lo->lo_encryption = NULL;
880 module_put(xfer->owner);
886 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
887 const struct loop_info64 *i)
892 struct module *owner = xfer->owner;
894 if (!try_module_get(owner))
897 err = xfer->init(lo, i);
901 lo->lo_encryption = xfer;
906 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
908 struct file *filp = lo->lo_backing_file;
909 gfp_t gfp = lo->old_gfp_mask;
911 if (lo->lo_state != Lo_bound)
914 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
920 spin_lock_irq(&lo->lo_lock);
921 lo->lo_state = Lo_rundown;
922 spin_unlock_irq(&lo->lo_lock);
924 kthread_stop(lo->lo_thread);
926 lo->lo_queue->unplug_fn = NULL;
927 lo->lo_backing_file = NULL;
929 loop_release_xfer(lo);
932 lo->lo_device = NULL;
933 lo->lo_encryption = NULL;
935 lo->lo_sizelimit = 0;
936 lo->lo_encrypt_key_size = 0;
938 lo->lo_thread = NULL;
939 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
940 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
941 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
943 invalidate_bdev(bdev);
944 set_capacity(lo->lo_disk, 0);
946 bd_set_size(bdev, 0);
947 mapping_set_gfp_mask(filp->f_mapping, gfp);
948 lo->lo_state = Lo_unbound;
949 /* This is safe: open() is still holding a reference. */
950 module_put(THIS_MODULE);
951 if (max_part > 0 && bdev)
952 ioctl_by_bdev(bdev, BLKRRPART, 0);
953 mutex_unlock(&lo->lo_ctl_mutex);
955 * Need not hold lo_ctl_mutex to fput backing file.
956 * Calling fput holding lo_ctl_mutex triggers a circular
957 * lock dependency possibility warning as fput can take
958 * bd_mutex which is usually taken before lo_ctl_mutex.
965 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
968 struct loop_func_table *xfer;
969 uid_t uid = current_uid();
971 if (lo->lo_encrypt_key_size &&
972 lo->lo_key_owner != uid &&
973 !capable(CAP_SYS_ADMIN))
975 if (lo->lo_state != Lo_bound)
977 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
980 err = loop_release_xfer(lo);
984 if (info->lo_encrypt_type) {
985 unsigned int type = info->lo_encrypt_type;
987 if (type >= MAX_LO_CRYPT)
989 xfer = xfer_funcs[type];
995 err = loop_init_xfer(lo, xfer, info);
999 if (lo->lo_offset != info->lo_offset ||
1000 lo->lo_sizelimit != info->lo_sizelimit) {
1001 lo->lo_offset = info->lo_offset;
1002 lo->lo_sizelimit = info->lo_sizelimit;
1003 if (figure_loop_size(lo))
1007 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1008 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1009 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1010 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1014 lo->transfer = xfer->transfer;
1015 lo->ioctl = xfer->ioctl;
1017 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1018 (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1019 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1021 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1022 lo->lo_init[0] = info->lo_init[0];
1023 lo->lo_init[1] = info->lo_init[1];
1024 if (info->lo_encrypt_key_size) {
1025 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1026 info->lo_encrypt_key_size);
1027 lo->lo_key_owner = uid;
1034 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1036 struct file *file = lo->lo_backing_file;
1040 if (lo->lo_state != Lo_bound)
1042 error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
1045 memset(info, 0, sizeof(*info));
1046 info->lo_number = lo->lo_number;
1047 info->lo_device = huge_encode_dev(stat.dev);
1048 info->lo_inode = stat.ino;
1049 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1050 info->lo_offset = lo->lo_offset;
1051 info->lo_sizelimit = lo->lo_sizelimit;
1052 info->lo_flags = lo->lo_flags;
1053 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1054 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1055 info->lo_encrypt_type =
1056 lo->lo_encryption ? lo->lo_encryption->number : 0;
1057 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1058 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1059 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1060 lo->lo_encrypt_key_size);
1066 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1068 memset(info64, 0, sizeof(*info64));
1069 info64->lo_number = info->lo_number;
1070 info64->lo_device = info->lo_device;
1071 info64->lo_inode = info->lo_inode;
1072 info64->lo_rdevice = info->lo_rdevice;
1073 info64->lo_offset = info->lo_offset;
1074 info64->lo_sizelimit = 0;
1075 info64->lo_encrypt_type = info->lo_encrypt_type;
1076 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1077 info64->lo_flags = info->lo_flags;
1078 info64->lo_init[0] = info->lo_init[0];
1079 info64->lo_init[1] = info->lo_init[1];
1080 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1081 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1083 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1084 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1088 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1090 memset(info, 0, sizeof(*info));
1091 info->lo_number = info64->lo_number;
1092 info->lo_device = info64->lo_device;
1093 info->lo_inode = info64->lo_inode;
1094 info->lo_rdevice = info64->lo_rdevice;
1095 info->lo_offset = info64->lo_offset;
1096 info->lo_encrypt_type = info64->lo_encrypt_type;
1097 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1098 info->lo_flags = info64->lo_flags;
1099 info->lo_init[0] = info64->lo_init[0];
1100 info->lo_init[1] = info64->lo_init[1];
1101 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1102 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1104 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1105 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1107 /* error in case values were truncated */
1108 if (info->lo_device != info64->lo_device ||
1109 info->lo_rdevice != info64->lo_rdevice ||
1110 info->lo_inode != info64->lo_inode ||
1111 info->lo_offset != info64->lo_offset)
1118 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1120 struct loop_info info;
1121 struct loop_info64 info64;
1123 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1125 loop_info64_from_old(&info, &info64);
1126 return loop_set_status(lo, &info64);
1130 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1132 struct loop_info64 info64;
1134 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1136 return loop_set_status(lo, &info64);
1140 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1141 struct loop_info info;
1142 struct loop_info64 info64;
1148 err = loop_get_status(lo, &info64);
1150 err = loop_info64_to_old(&info64, &info);
1151 if (!err && copy_to_user(arg, &info, sizeof(info)))
1158 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1159 struct loop_info64 info64;
1165 err = loop_get_status(lo, &info64);
1166 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1172 static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1179 if (unlikely(lo->lo_state != Lo_bound))
1181 err = figure_loop_size(lo);
1184 sec = get_capacity(lo->lo_disk);
1185 /* the width of sector_t may be narrow for bit-shift */
1188 mutex_lock(&bdev->bd_mutex);
1189 bd_set_size(bdev, sz);
1190 mutex_unlock(&bdev->bd_mutex);
1196 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1197 unsigned int cmd, unsigned long arg)
1199 struct loop_device *lo = bdev->bd_disk->private_data;
1202 mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1205 err = loop_set_fd(lo, mode, bdev, arg);
1207 case LOOP_CHANGE_FD:
1208 err = loop_change_fd(lo, bdev, arg);
1211 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1212 err = loop_clr_fd(lo, bdev);
1216 case LOOP_SET_STATUS:
1217 err = loop_set_status_old(lo, (struct loop_info __user *) arg);
1219 case LOOP_GET_STATUS:
1220 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1222 case LOOP_SET_STATUS64:
1223 err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
1225 case LOOP_GET_STATUS64:
1226 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1228 case LOOP_SET_CAPACITY:
1230 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1231 err = loop_set_capacity(lo, bdev);
1234 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1236 mutex_unlock(&lo->lo_ctl_mutex);
1242 #ifdef CONFIG_COMPAT
1243 struct compat_loop_info {
1244 compat_int_t lo_number; /* ioctl r/o */
1245 compat_dev_t lo_device; /* ioctl r/o */
1246 compat_ulong_t lo_inode; /* ioctl r/o */
1247 compat_dev_t lo_rdevice; /* ioctl r/o */
1248 compat_int_t lo_offset;
1249 compat_int_t lo_encrypt_type;
1250 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1251 compat_int_t lo_flags; /* ioctl r/o */
1252 char lo_name[LO_NAME_SIZE];
1253 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1254 compat_ulong_t lo_init[2];
1259 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1260 * - noinlined to reduce stack space usage in main part of driver
1263 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1264 struct loop_info64 *info64)
1266 struct compat_loop_info info;
1268 if (copy_from_user(&info, arg, sizeof(info)))
1271 memset(info64, 0, sizeof(*info64));
1272 info64->lo_number = info.lo_number;
1273 info64->lo_device = info.lo_device;
1274 info64->lo_inode = info.lo_inode;
1275 info64->lo_rdevice = info.lo_rdevice;
1276 info64->lo_offset = info.lo_offset;
1277 info64->lo_sizelimit = 0;
1278 info64->lo_encrypt_type = info.lo_encrypt_type;
1279 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1280 info64->lo_flags = info.lo_flags;
1281 info64->lo_init[0] = info.lo_init[0];
1282 info64->lo_init[1] = info.lo_init[1];
1283 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1284 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1286 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1287 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1292 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1293 * - noinlined to reduce stack space usage in main part of driver
1296 loop_info64_to_compat(const struct loop_info64 *info64,
1297 struct compat_loop_info __user *arg)
1299 struct compat_loop_info info;
1301 memset(&info, 0, sizeof(info));
1302 info.lo_number = info64->lo_number;
1303 info.lo_device = info64->lo_device;
1304 info.lo_inode = info64->lo_inode;
1305 info.lo_rdevice = info64->lo_rdevice;
1306 info.lo_offset = info64->lo_offset;
1307 info.lo_encrypt_type = info64->lo_encrypt_type;
1308 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1309 info.lo_flags = info64->lo_flags;
1310 info.lo_init[0] = info64->lo_init[0];
1311 info.lo_init[1] = info64->lo_init[1];
1312 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1313 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1315 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1316 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1318 /* error in case values were truncated */
1319 if (info.lo_device != info64->lo_device ||
1320 info.lo_rdevice != info64->lo_rdevice ||
1321 info.lo_inode != info64->lo_inode ||
1322 info.lo_offset != info64->lo_offset ||
1323 info.lo_init[0] != info64->lo_init[0] ||
1324 info.lo_init[1] != info64->lo_init[1])
1327 if (copy_to_user(arg, &info, sizeof(info)))
1333 loop_set_status_compat(struct loop_device *lo,
1334 const struct compat_loop_info __user *arg)
1336 struct loop_info64 info64;
1339 ret = loop_info64_from_compat(arg, &info64);
1342 return loop_set_status(lo, &info64);
1346 loop_get_status_compat(struct loop_device *lo,
1347 struct compat_loop_info __user *arg)
1349 struct loop_info64 info64;
1355 err = loop_get_status(lo, &info64);
1357 err = loop_info64_to_compat(&info64, arg);
1361 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1362 unsigned int cmd, unsigned long arg)
1364 struct loop_device *lo = bdev->bd_disk->private_data;
1368 case LOOP_SET_STATUS:
1369 mutex_lock(&lo->lo_ctl_mutex);
1370 err = loop_set_status_compat(
1371 lo, (const struct compat_loop_info __user *) arg);
1372 mutex_unlock(&lo->lo_ctl_mutex);
1374 case LOOP_GET_STATUS:
1375 mutex_lock(&lo->lo_ctl_mutex);
1376 err = loop_get_status_compat(
1377 lo, (struct compat_loop_info __user *) arg);
1378 mutex_unlock(&lo->lo_ctl_mutex);
1380 case LOOP_SET_CAPACITY:
1382 case LOOP_GET_STATUS64:
1383 case LOOP_SET_STATUS64:
1384 arg = (unsigned long) compat_ptr(arg);
1386 case LOOP_CHANGE_FD:
1387 err = lo_ioctl(bdev, mode, cmd, arg);
1397 static int lo_open(struct block_device *bdev, fmode_t mode)
1399 struct loop_device *lo = bdev->bd_disk->private_data;
1401 mutex_lock(&lo->lo_ctl_mutex);
1403 mutex_unlock(&lo->lo_ctl_mutex);
1408 static int lo_release(struct gendisk *disk, fmode_t mode)
1410 struct loop_device *lo = disk->private_data;
1413 mutex_lock(&lo->lo_ctl_mutex);
1415 if (--lo->lo_refcnt)
1418 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1420 * In autoclear mode, stop the loop thread
1421 * and remove configuration after last close.
1423 err = loop_clr_fd(lo, NULL);
1428 * Otherwise keep thread (if running) and config,
1429 * but flush possible ongoing bios in thread.
1435 mutex_unlock(&lo->lo_ctl_mutex);
1440 static const struct block_device_operations lo_fops = {
1441 .owner = THIS_MODULE,
1443 .release = lo_release,
1445 #ifdef CONFIG_COMPAT
1446 .compat_ioctl = lo_compat_ioctl,
1451 * And now the modules code and kernel interface.
1453 static int max_loop;
1454 module_param(max_loop, int, 0);
1455 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1456 module_param(max_part, int, 0);
1457 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1458 MODULE_LICENSE("GPL");
1459 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1461 int loop_register_transfer(struct loop_func_table *funcs)
1463 unsigned int n = funcs->number;
1465 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1467 xfer_funcs[n] = funcs;
1471 int loop_unregister_transfer(int number)
1473 unsigned int n = number;
1474 struct loop_device *lo;
1475 struct loop_func_table *xfer;
1477 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1480 xfer_funcs[n] = NULL;
1482 list_for_each_entry(lo, &loop_devices, lo_list) {
1483 mutex_lock(&lo->lo_ctl_mutex);
1485 if (lo->lo_encryption == xfer)
1486 loop_release_xfer(lo);
1488 mutex_unlock(&lo->lo_ctl_mutex);
1494 EXPORT_SYMBOL(loop_register_transfer);
1495 EXPORT_SYMBOL(loop_unregister_transfer);
1497 static struct loop_device *loop_alloc(int i)
1499 struct loop_device *lo;
1500 struct gendisk *disk;
1502 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1506 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1510 disk = lo->lo_disk = alloc_disk(1 << part_shift);
1512 goto out_free_queue;
1514 mutex_init(&lo->lo_ctl_mutex);
1516 lo->lo_thread = NULL;
1517 init_waitqueue_head(&lo->lo_event);
1518 spin_lock_init(&lo->lo_lock);
1519 disk->major = LOOP_MAJOR;
1520 disk->first_minor = i << part_shift;
1521 disk->fops = &lo_fops;
1522 disk->private_data = lo;
1523 disk->queue = lo->lo_queue;
1524 sprintf(disk->disk_name, "loop%d", i);
1528 blk_cleanup_queue(lo->lo_queue);
1535 static void loop_free(struct loop_device *lo)
1537 blk_cleanup_queue(lo->lo_queue);
1538 put_disk(lo->lo_disk);
1539 list_del(&lo->lo_list);
1543 static struct loop_device *loop_init_one(int i)
1545 struct loop_device *lo;
1547 list_for_each_entry(lo, &loop_devices, lo_list) {
1548 if (lo->lo_number == i)
1554 add_disk(lo->lo_disk);
1555 list_add_tail(&lo->lo_list, &loop_devices);
1560 static void loop_del_one(struct loop_device *lo)
1562 del_gendisk(lo->lo_disk);
1566 static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1568 struct loop_device *lo;
1569 struct kobject *kobj;
1571 mutex_lock(&loop_devices_mutex);
1572 lo = loop_init_one(dev & MINORMASK);
1573 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
1574 mutex_unlock(&loop_devices_mutex);
1580 static int __init loop_init(void)
1583 unsigned long range;
1584 struct loop_device *lo, *next;
1587 * loop module now has a feature to instantiate underlying device
1588 * structure on-demand, provided that there is an access dev node.
1589 * However, this will not work well with user space tool that doesn't
1590 * know about such "feature". In order to not break any existing
1591 * tool, we do the following:
1593 * (1) if max_loop is specified, create that many upfront, and this
1594 * also becomes a hard limit.
1595 * (2) if max_loop is not specified, create 8 loop device on module
1596 * load, user can further extend loop device by create dev node
1597 * themselves and have kernel automatically instantiate actual
1603 part_shift = fls(max_part);
1605 if (max_loop > 1UL << (MINORBITS - part_shift))
1613 range = 1UL << (MINORBITS - part_shift);
1616 if (register_blkdev(LOOP_MAJOR, "loop"))
1619 for (i = 0; i < nr; i++) {
1623 list_add_tail(&lo->lo_list, &loop_devices);
1626 /* point of no return */
1628 list_for_each_entry(lo, &loop_devices, lo_list)
1629 add_disk(lo->lo_disk);
1631 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1632 THIS_MODULE, loop_probe, NULL, NULL);
1634 printk(KERN_INFO "loop: module loaded\n");
1638 printk(KERN_INFO "loop: out of memory\n");
1640 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1643 unregister_blkdev(LOOP_MAJOR, "loop");
1647 static void __exit loop_exit(void)
1649 unsigned long range;
1650 struct loop_device *lo, *next;
1652 range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
1654 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1657 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1658 unregister_blkdev(LOOP_MAJOR, "loop");
1661 module_init(loop_init);
1662 module_exit(loop_exit);
1665 static int __init max_loop_setup(char *str)
1667 max_loop = simple_strtol(str, NULL, 0);
1671 __setup("max_loop=", max_loop_setup);