2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpuhotplug.h>
39 static DEFINE_IDR(zram_index_idr);
40 /* idr index must be protected */
41 static DEFINE_MUTEX(zram_index_mutex);
43 static int zram_major;
44 static const char *default_compressor = "lzo";
46 /* Module params (documentation at end) */
47 static unsigned int num_devices = 1;
49 * Pages that compress to sizes equals or greater than this are stored
50 * uncompressed in memory.
52 static size_t huge_class_size;
54 static void zram_free_page(struct zram *zram, size_t index);
56 static int zram_slot_trylock(struct zram *zram, u32 index)
58 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
61 static void zram_slot_lock(struct zram *zram, u32 index)
63 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
66 static void zram_slot_unlock(struct zram *zram, u32 index)
68 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
71 static inline bool init_done(struct zram *zram)
73 return zram->disksize;
76 static inline bool zram_allocated(struct zram *zram, u32 index)
79 return (zram->table[index].flags >> (ZRAM_FLAG_SHIFT + 1)) ||
80 zram->table[index].handle;
83 static inline struct zram *dev_to_zram(struct device *dev)
85 return (struct zram *)dev_to_disk(dev)->private_data;
88 static unsigned long zram_get_handle(struct zram *zram, u32 index)
90 return zram->table[index].handle;
93 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
95 zram->table[index].handle = handle;
98 /* flag operations require table entry bit_spin_lock() being held */
99 static bool zram_test_flag(struct zram *zram, u32 index,
100 enum zram_pageflags flag)
102 return zram->table[index].flags & BIT(flag);
105 static void zram_set_flag(struct zram *zram, u32 index,
106 enum zram_pageflags flag)
108 zram->table[index].flags |= BIT(flag);
111 static void zram_clear_flag(struct zram *zram, u32 index,
112 enum zram_pageflags flag)
114 zram->table[index].flags &= ~BIT(flag);
117 static inline void zram_set_element(struct zram *zram, u32 index,
118 unsigned long element)
120 zram->table[index].element = element;
123 static unsigned long zram_get_element(struct zram *zram, u32 index)
125 return zram->table[index].element;
128 static size_t zram_get_obj_size(struct zram *zram, u32 index)
130 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
133 static void zram_set_obj_size(struct zram *zram,
134 u32 index, size_t size)
136 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
138 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
141 #if PAGE_SIZE != 4096
142 static inline bool is_partial_io(struct bio_vec *bvec)
144 return bvec->bv_len != PAGE_SIZE;
147 static inline bool is_partial_io(struct bio_vec *bvec)
154 * Check if request is within bounds and aligned on zram logical blocks.
156 static inline bool valid_io_request(struct zram *zram,
157 sector_t start, unsigned int size)
161 /* unaligned request */
162 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
164 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
167 end = start + (size >> SECTOR_SHIFT);
168 bound = zram->disksize >> SECTOR_SHIFT;
169 /* out of range range */
170 if (unlikely(start >= bound || end > bound || start > end))
173 /* I/O request is valid */
177 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
179 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
180 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
183 static inline void update_used_max(struct zram *zram,
184 const unsigned long pages)
186 unsigned long old_max, cur_max;
188 old_max = atomic_long_read(&zram->stats.max_used_pages);
193 old_max = atomic_long_cmpxchg(
194 &zram->stats.max_used_pages, cur_max, pages);
195 } while (old_max != cur_max);
198 static inline void zram_fill_page(void *ptr, unsigned long len,
201 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
202 memset_l(ptr, value, len / sizeof(unsigned long));
205 static bool page_same_filled(void *ptr, unsigned long *element)
211 page = (unsigned long *)ptr;
214 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
215 if (val != page[pos])
224 static ssize_t initstate_show(struct device *dev,
225 struct device_attribute *attr, char *buf)
228 struct zram *zram = dev_to_zram(dev);
230 down_read(&zram->init_lock);
231 val = init_done(zram);
232 up_read(&zram->init_lock);
234 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
237 static ssize_t disksize_show(struct device *dev,
238 struct device_attribute *attr, char *buf)
240 struct zram *zram = dev_to_zram(dev);
242 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
245 static ssize_t mem_limit_store(struct device *dev,
246 struct device_attribute *attr, const char *buf, size_t len)
250 struct zram *zram = dev_to_zram(dev);
252 limit = memparse(buf, &tmp);
253 if (buf == tmp) /* no chars parsed, invalid input */
256 down_write(&zram->init_lock);
257 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
258 up_write(&zram->init_lock);
263 static ssize_t mem_used_max_store(struct device *dev,
264 struct device_attribute *attr, const char *buf, size_t len)
268 struct zram *zram = dev_to_zram(dev);
270 err = kstrtoul(buf, 10, &val);
274 down_read(&zram->init_lock);
275 if (init_done(zram)) {
276 atomic_long_set(&zram->stats.max_used_pages,
277 zs_get_total_pages(zram->mem_pool));
279 up_read(&zram->init_lock);
284 static ssize_t idle_store(struct device *dev,
285 struct device_attribute *attr, const char *buf, size_t len)
287 struct zram *zram = dev_to_zram(dev);
288 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
293 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
297 /* ignore trailing new line */
298 if (mode_buf[sz - 1] == '\n')
299 mode_buf[sz - 1] = 0x00;
301 if (strcmp(mode_buf, "all"))
304 down_read(&zram->init_lock);
305 if (!init_done(zram)) {
306 up_read(&zram->init_lock);
310 for (index = 0; index < nr_pages; index++) {
311 zram_slot_lock(zram, index);
312 if (!zram_allocated(zram, index))
315 zram_set_flag(zram, index, ZRAM_IDLE);
317 zram_slot_unlock(zram, index);
320 up_read(&zram->init_lock);
325 #ifdef CONFIG_ZRAM_WRITEBACK
326 static void reset_bdev(struct zram *zram)
328 struct block_device *bdev;
330 if (!zram->backing_dev)
334 if (zram->old_block_size)
335 set_blocksize(bdev, zram->old_block_size);
336 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
337 /* hope filp_close flush all of IO */
338 filp_close(zram->backing_dev, NULL);
339 zram->backing_dev = NULL;
340 zram->old_block_size = 0;
342 zram->disk->queue->backing_dev_info->capabilities |=
343 BDI_CAP_SYNCHRONOUS_IO;
344 kvfree(zram->bitmap);
348 static ssize_t backing_dev_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
351 struct zram *zram = dev_to_zram(dev);
352 struct file *file = zram->backing_dev;
356 down_read(&zram->init_lock);
357 if (!zram->backing_dev) {
358 memcpy(buf, "none\n", 5);
359 up_read(&zram->init_lock);
363 p = file_path(file, buf, PAGE_SIZE - 1);
370 memmove(buf, p, ret);
373 up_read(&zram->init_lock);
377 static ssize_t backing_dev_store(struct device *dev,
378 struct device_attribute *attr, const char *buf, size_t len)
382 struct file *backing_dev = NULL;
384 struct address_space *mapping;
385 unsigned int bitmap_sz, old_block_size = 0;
386 unsigned long nr_pages, *bitmap = NULL;
387 struct block_device *bdev = NULL;
389 struct zram *zram = dev_to_zram(dev);
391 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
395 down_write(&zram->init_lock);
396 if (init_done(zram)) {
397 pr_info("Can't setup backing device for initialized device\n");
402 strlcpy(file_name, buf, PATH_MAX);
403 /* ignore trailing newline */
404 sz = strlen(file_name);
405 if (sz > 0 && file_name[sz - 1] == '\n')
406 file_name[sz - 1] = 0x00;
408 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
409 if (IS_ERR(backing_dev)) {
410 err = PTR_ERR(backing_dev);
415 mapping = backing_dev->f_mapping;
416 inode = mapping->host;
418 /* Support only block device in this moment */
419 if (!S_ISBLK(inode->i_mode)) {
424 bdev = bdgrab(I_BDEV(inode));
425 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
431 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
432 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
433 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
439 old_block_size = block_size(bdev);
440 err = set_blocksize(bdev, PAGE_SIZE);
446 zram->old_block_size = old_block_size;
448 zram->backing_dev = backing_dev;
449 zram->bitmap = bitmap;
450 zram->nr_pages = nr_pages;
452 * With writeback feature, zram does asynchronous IO so it's no longer
453 * synchronous device so let's remove synchronous io flag. Othewise,
454 * upper layer(e.g., swap) could wait IO completion rather than
455 * (submit and return), which will cause system sluggish.
456 * Furthermore, when the IO function returns(e.g., swap_readpage),
457 * upper layer expects IO was done so it could deallocate the page
458 * freely but in fact, IO is going on so finally could cause
459 * use-after-free when the IO is really done.
461 zram->disk->queue->backing_dev_info->capabilities &=
462 ~BDI_CAP_SYNCHRONOUS_IO;
463 up_write(&zram->init_lock);
465 pr_info("setup backing device %s\n", file_name);
474 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
477 filp_close(backing_dev, NULL);
479 up_write(&zram->init_lock);
486 static unsigned long alloc_block_bdev(struct zram *zram)
488 unsigned long blk_idx = 1;
490 /* skip 0 bit to confuse zram.handle = 0 */
491 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
492 if (blk_idx == zram->nr_pages)
495 if (test_and_set_bit(blk_idx, zram->bitmap))
501 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
505 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
506 WARN_ON_ONCE(!was_set);
509 static void zram_page_end_io(struct bio *bio)
511 struct page *page = bio_first_page_all(bio);
513 page_endio(page, op_is_write(bio_op(bio)),
514 blk_status_to_errno(bio->bi_status));
519 * Returns 1 if the submission is successful.
521 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
522 unsigned long entry, struct bio *parent)
526 bio = bio_alloc(GFP_ATOMIC, 1);
530 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
531 bio_set_dev(bio, zram->bdev);
532 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
538 bio->bi_opf = REQ_OP_READ;
539 bio->bi_end_io = zram_page_end_io;
541 bio->bi_opf = parent->bi_opf;
542 bio_chain(bio, parent);
550 struct work_struct work;
556 #if PAGE_SIZE != 4096
557 static void zram_sync_read(struct work_struct *work)
560 struct zram_work *zw = container_of(work, struct zram_work, work);
561 struct zram *zram = zw->zram;
562 unsigned long entry = zw->entry;
563 struct bio *bio = zw->bio;
565 read_from_bdev_async(zram, &bvec, entry, bio);
569 * Block layer want one ->make_request_fn to be active at a time
570 * so if we use chained IO with parent IO in same context,
571 * it's a deadlock. To avoid, it, it uses worker thread context.
573 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
574 unsigned long entry, struct bio *bio)
576 struct zram_work work;
582 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
583 queue_work(system_unbound_wq, &work.work);
584 flush_work(&work.work);
585 destroy_work_on_stack(&work.work);
590 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
591 unsigned long entry, struct bio *bio)
598 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
599 unsigned long entry, struct bio *parent, bool sync)
602 return read_from_bdev_sync(zram, bvec, entry, parent);
604 return read_from_bdev_async(zram, bvec, entry, parent);
607 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
608 u32 index, struct bio *parent,
609 unsigned long *pentry)
614 bio = bio_alloc(GFP_ATOMIC, 1);
618 entry = alloc_block_bdev(zram);
624 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
625 bio_set_dev(bio, zram->bdev);
626 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
629 free_block_bdev(zram, entry);
634 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
635 bio->bi_end_io = zram_page_end_io;
637 bio->bi_opf = parent->bi_opf;
638 bio_chain(bio, parent);
648 static inline void reset_bdev(struct zram *zram) {};
649 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
650 u32 index, struct bio *parent,
651 unsigned long *pentry)
657 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
658 unsigned long entry, struct bio *parent, bool sync)
663 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
666 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
668 static struct dentry *zram_debugfs_root;
670 static void zram_debugfs_create(void)
672 zram_debugfs_root = debugfs_create_dir("zram", NULL);
675 static void zram_debugfs_destroy(void)
677 debugfs_remove_recursive(zram_debugfs_root);
680 static void zram_accessed(struct zram *zram, u32 index)
682 zram_clear_flag(zram, index, ZRAM_IDLE);
683 zram->table[index].ac_time = ktime_get_boottime();
686 static ssize_t read_block_state(struct file *file, char __user *buf,
687 size_t count, loff_t *ppos)
690 ssize_t index, written = 0;
691 struct zram *zram = file->private_data;
692 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
693 struct timespec64 ts;
695 kbuf = kvmalloc(count, GFP_KERNEL);
699 down_read(&zram->init_lock);
700 if (!init_done(zram)) {
701 up_read(&zram->init_lock);
706 for (index = *ppos; index < nr_pages; index++) {
709 zram_slot_lock(zram, index);
710 if (!zram_allocated(zram, index))
713 ts = ktime_to_timespec64(zram->table[index].ac_time);
714 copied = snprintf(kbuf + written, count,
715 "%12zd %12lld.%06lu %c%c%c%c\n",
716 index, (s64)ts.tv_sec,
717 ts.tv_nsec / NSEC_PER_USEC,
718 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
719 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
720 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
721 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
723 if (count < copied) {
724 zram_slot_unlock(zram, index);
730 zram_slot_unlock(zram, index);
734 up_read(&zram->init_lock);
735 if (copy_to_user(buf, kbuf, written))
742 static const struct file_operations proc_zram_block_state_op = {
744 .read = read_block_state,
745 .llseek = default_llseek,
748 static void zram_debugfs_register(struct zram *zram)
750 if (!zram_debugfs_root)
753 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
755 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
756 zram, &proc_zram_block_state_op);
759 static void zram_debugfs_unregister(struct zram *zram)
761 debugfs_remove_recursive(zram->debugfs_dir);
764 static void zram_debugfs_create(void) {};
765 static void zram_debugfs_destroy(void) {};
766 static void zram_accessed(struct zram *zram, u32 index)
768 zram_clear_flag(zram, index, ZRAM_IDLE);
770 static void zram_debugfs_register(struct zram *zram) {};
771 static void zram_debugfs_unregister(struct zram *zram) {};
775 * We switched to per-cpu streams and this attr is not needed anymore.
776 * However, we will keep it around for some time, because:
777 * a) we may revert per-cpu streams in the future
778 * b) it's visible to user space and we need to follow our 2 years
779 * retirement rule; but we already have a number of 'soon to be
780 * altered' attrs, so max_comp_streams need to wait for the next
783 static ssize_t max_comp_streams_show(struct device *dev,
784 struct device_attribute *attr, char *buf)
786 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
789 static ssize_t max_comp_streams_store(struct device *dev,
790 struct device_attribute *attr, const char *buf, size_t len)
795 static ssize_t comp_algorithm_show(struct device *dev,
796 struct device_attribute *attr, char *buf)
799 struct zram *zram = dev_to_zram(dev);
801 down_read(&zram->init_lock);
802 sz = zcomp_available_show(zram->compressor, buf);
803 up_read(&zram->init_lock);
808 static ssize_t comp_algorithm_store(struct device *dev,
809 struct device_attribute *attr, const char *buf, size_t len)
811 struct zram *zram = dev_to_zram(dev);
812 char compressor[ARRAY_SIZE(zram->compressor)];
815 strlcpy(compressor, buf, sizeof(compressor));
816 /* ignore trailing newline */
817 sz = strlen(compressor);
818 if (sz > 0 && compressor[sz - 1] == '\n')
819 compressor[sz - 1] = 0x00;
821 if (!zcomp_available_algorithm(compressor))
824 down_write(&zram->init_lock);
825 if (init_done(zram)) {
826 up_write(&zram->init_lock);
827 pr_info("Can't change algorithm for initialized device\n");
831 strcpy(zram->compressor, compressor);
832 up_write(&zram->init_lock);
836 static ssize_t compact_store(struct device *dev,
837 struct device_attribute *attr, const char *buf, size_t len)
839 struct zram *zram = dev_to_zram(dev);
841 down_read(&zram->init_lock);
842 if (!init_done(zram)) {
843 up_read(&zram->init_lock);
847 zs_compact(zram->mem_pool);
848 up_read(&zram->init_lock);
853 static ssize_t io_stat_show(struct device *dev,
854 struct device_attribute *attr, char *buf)
856 struct zram *zram = dev_to_zram(dev);
859 down_read(&zram->init_lock);
860 ret = scnprintf(buf, PAGE_SIZE,
861 "%8llu %8llu %8llu %8llu\n",
862 (u64)atomic64_read(&zram->stats.failed_reads),
863 (u64)atomic64_read(&zram->stats.failed_writes),
864 (u64)atomic64_read(&zram->stats.invalid_io),
865 (u64)atomic64_read(&zram->stats.notify_free));
866 up_read(&zram->init_lock);
871 static ssize_t mm_stat_show(struct device *dev,
872 struct device_attribute *attr, char *buf)
874 struct zram *zram = dev_to_zram(dev);
875 struct zs_pool_stats pool_stats;
876 u64 orig_size, mem_used = 0;
880 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
882 down_read(&zram->init_lock);
883 if (init_done(zram)) {
884 mem_used = zs_get_total_pages(zram->mem_pool);
885 zs_pool_stats(zram->mem_pool, &pool_stats);
888 orig_size = atomic64_read(&zram->stats.pages_stored);
889 max_used = atomic_long_read(&zram->stats.max_used_pages);
891 ret = scnprintf(buf, PAGE_SIZE,
892 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
893 orig_size << PAGE_SHIFT,
894 (u64)atomic64_read(&zram->stats.compr_data_size),
895 mem_used << PAGE_SHIFT,
896 zram->limit_pages << PAGE_SHIFT,
897 max_used << PAGE_SHIFT,
898 (u64)atomic64_read(&zram->stats.same_pages),
899 pool_stats.pages_compacted,
900 (u64)atomic64_read(&zram->stats.huge_pages));
901 up_read(&zram->init_lock);
906 static ssize_t debug_stat_show(struct device *dev,
907 struct device_attribute *attr, char *buf)
910 struct zram *zram = dev_to_zram(dev);
913 down_read(&zram->init_lock);
914 ret = scnprintf(buf, PAGE_SIZE,
915 "version: %d\n%8llu %8llu\n",
917 (u64)atomic64_read(&zram->stats.writestall),
918 (u64)atomic64_read(&zram->stats.miss_free));
919 up_read(&zram->init_lock);
924 static DEVICE_ATTR_RO(io_stat);
925 static DEVICE_ATTR_RO(mm_stat);
926 static DEVICE_ATTR_RO(debug_stat);
928 static void zram_meta_free(struct zram *zram, u64 disksize)
930 size_t num_pages = disksize >> PAGE_SHIFT;
933 /* Free all pages that are still in this zram device */
934 for (index = 0; index < num_pages; index++)
935 zram_free_page(zram, index);
937 zs_destroy_pool(zram->mem_pool);
941 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
945 num_pages = disksize >> PAGE_SHIFT;
946 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
950 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
951 if (!zram->mem_pool) {
956 if (!huge_class_size)
957 huge_class_size = zs_huge_class_size(zram->mem_pool);
962 * To protect concurrent access to the same index entry,
963 * caller should hold this table index entry's bit_spinlock to
964 * indicate this index entry is accessing.
966 static void zram_free_page(struct zram *zram, size_t index)
968 unsigned long handle;
970 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
971 zram->table[index].ac_time = 0;
973 if (zram_test_flag(zram, index, ZRAM_IDLE))
974 zram_clear_flag(zram, index, ZRAM_IDLE);
976 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
977 zram_clear_flag(zram, index, ZRAM_HUGE);
978 atomic64_dec(&zram->stats.huge_pages);
981 if (zram_test_flag(zram, index, ZRAM_WB)) {
982 zram_clear_flag(zram, index, ZRAM_WB);
983 free_block_bdev(zram, zram_get_element(zram, index));
988 * No memory is allocated for same element filled pages.
989 * Simply clear same page flag.
991 if (zram_test_flag(zram, index, ZRAM_SAME)) {
992 zram_clear_flag(zram, index, ZRAM_SAME);
993 atomic64_dec(&zram->stats.same_pages);
997 handle = zram_get_handle(zram, index);
1001 zs_free(zram->mem_pool, handle);
1003 atomic64_sub(zram_get_obj_size(zram, index),
1004 &zram->stats.compr_data_size);
1006 atomic64_dec(&zram->stats.pages_stored);
1007 zram_set_handle(zram, index, 0);
1008 zram_set_obj_size(zram, index, 0);
1009 WARN_ON_ONCE(zram->table[index].flags & ~(1UL << ZRAM_LOCK));
1012 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1013 struct bio *bio, bool partial_io)
1016 unsigned long handle;
1020 zram_slot_lock(zram, index);
1021 if (zram_test_flag(zram, index, ZRAM_WB)) {
1022 struct bio_vec bvec;
1024 zram_slot_unlock(zram, index);
1026 bvec.bv_page = page;
1027 bvec.bv_len = PAGE_SIZE;
1029 return read_from_bdev(zram, &bvec,
1030 zram_get_element(zram, index),
1034 handle = zram_get_handle(zram, index);
1035 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1036 unsigned long value;
1039 value = handle ? zram_get_element(zram, index) : 0;
1040 mem = kmap_atomic(page);
1041 zram_fill_page(mem, PAGE_SIZE, value);
1043 zram_slot_unlock(zram, index);
1047 size = zram_get_obj_size(zram, index);
1049 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1050 if (size == PAGE_SIZE) {
1051 dst = kmap_atomic(page);
1052 memcpy(dst, src, PAGE_SIZE);
1056 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
1058 dst = kmap_atomic(page);
1059 ret = zcomp_decompress(zstrm, src, size, dst);
1061 zcomp_stream_put(zram->comp);
1063 zs_unmap_object(zram->mem_pool, handle);
1064 zram_slot_unlock(zram, index);
1066 /* Should NEVER happen. Return bio error if it does. */
1068 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1073 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1074 u32 index, int offset, struct bio *bio)
1079 page = bvec->bv_page;
1080 if (is_partial_io(bvec)) {
1081 /* Use a temporary buffer to decompress the page */
1082 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1087 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1091 if (is_partial_io(bvec)) {
1092 void *dst = kmap_atomic(bvec->bv_page);
1093 void *src = kmap_atomic(page);
1095 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
1100 if (is_partial_io(bvec))
1106 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1107 u32 index, struct bio *bio)
1110 unsigned long alloced_pages;
1111 unsigned long handle = 0;
1112 unsigned int comp_len = 0;
1113 void *src, *dst, *mem;
1114 struct zcomp_strm *zstrm;
1115 struct page *page = bvec->bv_page;
1116 unsigned long element = 0;
1117 enum zram_pageflags flags = 0;
1118 bool allow_wb = true;
1120 mem = kmap_atomic(page);
1121 if (page_same_filled(mem, &element)) {
1123 /* Free memory associated with this sector now. */
1125 atomic64_inc(&zram->stats.same_pages);
1131 zstrm = zcomp_stream_get(zram->comp);
1132 src = kmap_atomic(page);
1133 ret = zcomp_compress(zstrm, src, &comp_len);
1136 if (unlikely(ret)) {
1137 zcomp_stream_put(zram->comp);
1138 pr_err("Compression failed! err=%d\n", ret);
1139 zs_free(zram->mem_pool, handle);
1143 if (unlikely(comp_len >= huge_class_size)) {
1144 comp_len = PAGE_SIZE;
1145 if (zram->backing_dev && allow_wb) {
1146 zcomp_stream_put(zram->comp);
1147 ret = write_to_bdev(zram, bvec, index, bio, &element);
1154 goto compress_again;
1159 * handle allocation has 2 paths:
1160 * a) fast path is executed with preemption disabled (for
1161 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1162 * since we can't sleep;
1163 * b) slow path enables preemption and attempts to allocate
1164 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1165 * put per-cpu compression stream and, thus, to re-do
1166 * the compression once handle is allocated.
1168 * if we have a 'non-null' handle here then we are coming
1169 * from the slow path and handle has already been allocated.
1172 handle = zs_malloc(zram->mem_pool, comp_len,
1173 __GFP_KSWAPD_RECLAIM |
1178 zcomp_stream_put(zram->comp);
1179 atomic64_inc(&zram->stats.writestall);
1180 handle = zs_malloc(zram->mem_pool, comp_len,
1181 GFP_NOIO | __GFP_HIGHMEM |
1184 goto compress_again;
1188 alloced_pages = zs_get_total_pages(zram->mem_pool);
1189 update_used_max(zram, alloced_pages);
1191 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1192 zcomp_stream_put(zram->comp);
1193 zs_free(zram->mem_pool, handle);
1197 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1199 src = zstrm->buffer;
1200 if (comp_len == PAGE_SIZE)
1201 src = kmap_atomic(page);
1202 memcpy(dst, src, comp_len);
1203 if (comp_len == PAGE_SIZE)
1206 zcomp_stream_put(zram->comp);
1207 zs_unmap_object(zram->mem_pool, handle);
1208 atomic64_add(comp_len, &zram->stats.compr_data_size);
1211 * Free memory associated with this sector
1212 * before overwriting unused sectors.
1214 zram_slot_lock(zram, index);
1215 zram_free_page(zram, index);
1217 if (comp_len == PAGE_SIZE) {
1218 zram_set_flag(zram, index, ZRAM_HUGE);
1219 atomic64_inc(&zram->stats.huge_pages);
1223 zram_set_flag(zram, index, flags);
1224 zram_set_element(zram, index, element);
1226 zram_set_handle(zram, index, handle);
1227 zram_set_obj_size(zram, index, comp_len);
1229 zram_slot_unlock(zram, index);
1232 atomic64_inc(&zram->stats.pages_stored);
1236 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1237 u32 index, int offset, struct bio *bio)
1240 struct page *page = NULL;
1245 if (is_partial_io(bvec)) {
1248 * This is a partial IO. We need to read the full page
1249 * before to write the changes.
1251 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1255 ret = __zram_bvec_read(zram, page, index, bio, true);
1259 src = kmap_atomic(bvec->bv_page);
1260 dst = kmap_atomic(page);
1261 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1266 vec.bv_len = PAGE_SIZE;
1270 ret = __zram_bvec_write(zram, &vec, index, bio);
1272 if (is_partial_io(bvec))
1278 * zram_bio_discard - handler on discard request
1279 * @index: physical block index in PAGE_SIZE units
1280 * @offset: byte offset within physical block
1282 static void zram_bio_discard(struct zram *zram, u32 index,
1283 int offset, struct bio *bio)
1285 size_t n = bio->bi_iter.bi_size;
1288 * zram manages data in physical block size units. Because logical block
1289 * size isn't identical with physical block size on some arch, we
1290 * could get a discard request pointing to a specific offset within a
1291 * certain physical block. Although we can handle this request by
1292 * reading that physiclal block and decompressing and partially zeroing
1293 * and re-compressing and then re-storing it, this isn't reasonable
1294 * because our intent with a discard request is to save memory. So
1295 * skipping this logical block is appropriate here.
1298 if (n <= (PAGE_SIZE - offset))
1301 n -= (PAGE_SIZE - offset);
1305 while (n >= PAGE_SIZE) {
1306 zram_slot_lock(zram, index);
1307 zram_free_page(zram, index);
1308 zram_slot_unlock(zram, index);
1309 atomic64_inc(&zram->stats.notify_free);
1316 * Returns errno if it has some problem. Otherwise return 0 or 1.
1317 * Returns 0 if IO request was done synchronously
1318 * Returns 1 if IO request was successfully submitted.
1320 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1321 int offset, unsigned int op, struct bio *bio)
1323 unsigned long start_time = jiffies;
1324 struct request_queue *q = zram->disk->queue;
1327 generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
1328 &zram->disk->part0);
1330 if (!op_is_write(op)) {
1331 atomic64_inc(&zram->stats.num_reads);
1332 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1333 flush_dcache_page(bvec->bv_page);
1335 atomic64_inc(&zram->stats.num_writes);
1336 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1339 generic_end_io_acct(q, op, &zram->disk->part0, start_time);
1341 zram_slot_lock(zram, index);
1342 zram_accessed(zram, index);
1343 zram_slot_unlock(zram, index);
1345 if (unlikely(ret < 0)) {
1346 if (!op_is_write(op))
1347 atomic64_inc(&zram->stats.failed_reads);
1349 atomic64_inc(&zram->stats.failed_writes);
1355 static void __zram_make_request(struct zram *zram, struct bio *bio)
1359 struct bio_vec bvec;
1360 struct bvec_iter iter;
1362 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1363 offset = (bio->bi_iter.bi_sector &
1364 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1366 switch (bio_op(bio)) {
1367 case REQ_OP_DISCARD:
1368 case REQ_OP_WRITE_ZEROES:
1369 zram_bio_discard(zram, index, offset, bio);
1376 bio_for_each_segment(bvec, bio, iter) {
1377 struct bio_vec bv = bvec;
1378 unsigned int unwritten = bvec.bv_len;
1381 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1383 if (zram_bvec_rw(zram, &bv, index, offset,
1384 bio_op(bio), bio) < 0)
1387 bv.bv_offset += bv.bv_len;
1388 unwritten -= bv.bv_len;
1390 update_position(&index, &offset, &bv);
1391 } while (unwritten);
1402 * Handler function for all zram I/O requests.
1404 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1406 struct zram *zram = queue->queuedata;
1408 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1409 bio->bi_iter.bi_size)) {
1410 atomic64_inc(&zram->stats.invalid_io);
1414 __zram_make_request(zram, bio);
1415 return BLK_QC_T_NONE;
1419 return BLK_QC_T_NONE;
1422 static void zram_slot_free_notify(struct block_device *bdev,
1423 unsigned long index)
1427 zram = bdev->bd_disk->private_data;
1429 atomic64_inc(&zram->stats.notify_free);
1430 if (!zram_slot_trylock(zram, index)) {
1431 atomic64_inc(&zram->stats.miss_free);
1435 zram_free_page(zram, index);
1436 zram_slot_unlock(zram, index);
1439 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1440 struct page *page, unsigned int op)
1447 if (PageTransHuge(page))
1449 zram = bdev->bd_disk->private_data;
1451 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1452 atomic64_inc(&zram->stats.invalid_io);
1457 index = sector >> SECTORS_PER_PAGE_SHIFT;
1458 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1461 bv.bv_len = PAGE_SIZE;
1464 ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1467 * If I/O fails, just return error(ie, non-zero) without
1468 * calling page_endio.
1469 * It causes resubmit the I/O with bio request by upper functions
1470 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1471 * bio->bi_end_io does things to handle the error
1472 * (e.g., SetPageError, set_page_dirty and extra works).
1474 if (unlikely(ret < 0))
1479 page_endio(page, op_is_write(op), 0);
1490 static void zram_reset_device(struct zram *zram)
1495 down_write(&zram->init_lock);
1497 zram->limit_pages = 0;
1499 if (!init_done(zram)) {
1500 up_write(&zram->init_lock);
1505 disksize = zram->disksize;
1508 set_capacity(zram->disk, 0);
1509 part_stat_set_all(&zram->disk->part0, 0);
1511 up_write(&zram->init_lock);
1512 /* I/O operation under all of CPU are done so let's free */
1513 zram_meta_free(zram, disksize);
1514 memset(&zram->stats, 0, sizeof(zram->stats));
1515 zcomp_destroy(comp);
1519 static ssize_t disksize_store(struct device *dev,
1520 struct device_attribute *attr, const char *buf, size_t len)
1524 struct zram *zram = dev_to_zram(dev);
1527 disksize = memparse(buf, NULL);
1531 down_write(&zram->init_lock);
1532 if (init_done(zram)) {
1533 pr_info("Cannot change disksize for initialized device\n");
1538 disksize = PAGE_ALIGN(disksize);
1539 if (!zram_meta_alloc(zram, disksize)) {
1544 comp = zcomp_create(zram->compressor);
1546 pr_err("Cannot initialise %s compressing backend\n",
1548 err = PTR_ERR(comp);
1553 zram->disksize = disksize;
1554 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1556 revalidate_disk(zram->disk);
1557 up_write(&zram->init_lock);
1562 zram_meta_free(zram, disksize);
1564 up_write(&zram->init_lock);
1568 static ssize_t reset_store(struct device *dev,
1569 struct device_attribute *attr, const char *buf, size_t len)
1572 unsigned short do_reset;
1574 struct block_device *bdev;
1576 ret = kstrtou16(buf, 10, &do_reset);
1583 zram = dev_to_zram(dev);
1584 bdev = bdget_disk(zram->disk, 0);
1588 mutex_lock(&bdev->bd_mutex);
1589 /* Do not reset an active device or claimed device */
1590 if (bdev->bd_openers || zram->claim) {
1591 mutex_unlock(&bdev->bd_mutex);
1596 /* From now on, anyone can't open /dev/zram[0-9] */
1598 mutex_unlock(&bdev->bd_mutex);
1600 /* Make sure all the pending I/O are finished */
1602 zram_reset_device(zram);
1603 revalidate_disk(zram->disk);
1606 mutex_lock(&bdev->bd_mutex);
1607 zram->claim = false;
1608 mutex_unlock(&bdev->bd_mutex);
1613 static int zram_open(struct block_device *bdev, fmode_t mode)
1618 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1620 zram = bdev->bd_disk->private_data;
1621 /* zram was claimed to reset so open request fails */
1628 static const struct block_device_operations zram_devops = {
1630 .swap_slot_free_notify = zram_slot_free_notify,
1631 .rw_page = zram_rw_page,
1632 .owner = THIS_MODULE
1635 static DEVICE_ATTR_WO(compact);
1636 static DEVICE_ATTR_RW(disksize);
1637 static DEVICE_ATTR_RO(initstate);
1638 static DEVICE_ATTR_WO(reset);
1639 static DEVICE_ATTR_WO(mem_limit);
1640 static DEVICE_ATTR_WO(mem_used_max);
1641 static DEVICE_ATTR_WO(idle);
1642 static DEVICE_ATTR_RW(max_comp_streams);
1643 static DEVICE_ATTR_RW(comp_algorithm);
1644 #ifdef CONFIG_ZRAM_WRITEBACK
1645 static DEVICE_ATTR_RW(backing_dev);
1648 static struct attribute *zram_disk_attrs[] = {
1649 &dev_attr_disksize.attr,
1650 &dev_attr_initstate.attr,
1651 &dev_attr_reset.attr,
1652 &dev_attr_compact.attr,
1653 &dev_attr_mem_limit.attr,
1654 &dev_attr_mem_used_max.attr,
1655 &dev_attr_idle.attr,
1656 &dev_attr_max_comp_streams.attr,
1657 &dev_attr_comp_algorithm.attr,
1658 #ifdef CONFIG_ZRAM_WRITEBACK
1659 &dev_attr_backing_dev.attr,
1661 &dev_attr_io_stat.attr,
1662 &dev_attr_mm_stat.attr,
1663 &dev_attr_debug_stat.attr,
1667 static const struct attribute_group zram_disk_attr_group = {
1668 .attrs = zram_disk_attrs,
1671 static const struct attribute_group *zram_disk_attr_groups[] = {
1672 &zram_disk_attr_group,
1677 * Allocate and initialize new zram device. the function returns
1678 * '>= 0' device_id upon success, and negative value otherwise.
1680 static int zram_add(void)
1683 struct request_queue *queue;
1686 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1690 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1695 init_rwsem(&zram->init_lock);
1697 queue = blk_alloc_queue(GFP_KERNEL);
1699 pr_err("Error allocating disk queue for device %d\n",
1705 blk_queue_make_request(queue, zram_make_request);
1707 /* gendisk structure */
1708 zram->disk = alloc_disk(1);
1710 pr_err("Error allocating disk structure for device %d\n",
1713 goto out_free_queue;
1716 zram->disk->major = zram_major;
1717 zram->disk->first_minor = device_id;
1718 zram->disk->fops = &zram_devops;
1719 zram->disk->queue = queue;
1720 zram->disk->queue->queuedata = zram;
1721 zram->disk->private_data = zram;
1722 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1724 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1725 set_capacity(zram->disk, 0);
1726 /* zram devices sort of resembles non-rotational disks */
1727 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
1728 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1731 * To ensure that we always get PAGE_SIZE aligned
1732 * and n*PAGE_SIZED sized I/O requests.
1734 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1735 blk_queue_logical_block_size(zram->disk->queue,
1736 ZRAM_LOGICAL_BLOCK_SIZE);
1737 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1738 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1739 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1740 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1741 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
1744 * zram_bio_discard() will clear all logical blocks if logical block
1745 * size is identical with physical block size(PAGE_SIZE). But if it is
1746 * different, we will skip discarding some parts of logical blocks in
1747 * the part of the request range which isn't aligned to physical block
1748 * size. So we can't ensure that all discarded logical blocks are
1751 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1752 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1754 zram->disk->queue->backing_dev_info->capabilities |=
1755 (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
1756 device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
1758 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1760 zram_debugfs_register(zram);
1761 pr_info("Added device: %s\n", zram->disk->disk_name);
1765 blk_cleanup_queue(queue);
1767 idr_remove(&zram_index_idr, device_id);
1773 static int zram_remove(struct zram *zram)
1775 struct block_device *bdev;
1777 bdev = bdget_disk(zram->disk, 0);
1781 mutex_lock(&bdev->bd_mutex);
1782 if (bdev->bd_openers || zram->claim) {
1783 mutex_unlock(&bdev->bd_mutex);
1789 mutex_unlock(&bdev->bd_mutex);
1791 zram_debugfs_unregister(zram);
1793 /* Make sure all the pending I/O are finished */
1795 zram_reset_device(zram);
1798 pr_info("Removed device: %s\n", zram->disk->disk_name);
1800 del_gendisk(zram->disk);
1801 blk_cleanup_queue(zram->disk->queue);
1802 put_disk(zram->disk);
1807 /* zram-control sysfs attributes */
1810 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1811 * sense that reading from this file does alter the state of your system -- it
1812 * creates a new un-initialized zram device and returns back this device's
1813 * device_id (or an error code if it fails to create a new device).
1815 static ssize_t hot_add_show(struct class *class,
1816 struct class_attribute *attr,
1821 mutex_lock(&zram_index_mutex);
1823 mutex_unlock(&zram_index_mutex);
1827 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1829 static CLASS_ATTR_RO(hot_add);
1831 static ssize_t hot_remove_store(struct class *class,
1832 struct class_attribute *attr,
1839 /* dev_id is gendisk->first_minor, which is `int' */
1840 ret = kstrtoint(buf, 10, &dev_id);
1846 mutex_lock(&zram_index_mutex);
1848 zram = idr_find(&zram_index_idr, dev_id);
1850 ret = zram_remove(zram);
1852 idr_remove(&zram_index_idr, dev_id);
1857 mutex_unlock(&zram_index_mutex);
1858 return ret ? ret : count;
1860 static CLASS_ATTR_WO(hot_remove);
1862 static struct attribute *zram_control_class_attrs[] = {
1863 &class_attr_hot_add.attr,
1864 &class_attr_hot_remove.attr,
1867 ATTRIBUTE_GROUPS(zram_control_class);
1869 static struct class zram_control_class = {
1870 .name = "zram-control",
1871 .owner = THIS_MODULE,
1872 .class_groups = zram_control_class_groups,
1875 static int zram_remove_cb(int id, void *ptr, void *data)
1881 static void destroy_devices(void)
1883 class_unregister(&zram_control_class);
1884 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1885 zram_debugfs_destroy();
1886 idr_destroy(&zram_index_idr);
1887 unregister_blkdev(zram_major, "zram");
1888 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1891 static int __init zram_init(void)
1895 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1896 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1900 ret = class_register(&zram_control_class);
1902 pr_err("Unable to register zram-control class\n");
1903 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1907 zram_debugfs_create();
1908 zram_major = register_blkdev(0, "zram");
1909 if (zram_major <= 0) {
1910 pr_err("Unable to get major number\n");
1911 class_unregister(&zram_control_class);
1912 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1916 while (num_devices != 0) {
1917 mutex_lock(&zram_index_mutex);
1919 mutex_unlock(&zram_index_mutex);
1932 static void __exit zram_exit(void)
1937 module_init(zram_init);
1938 module_exit(zram_exit);
1940 module_param(num_devices, uint, 0);
1941 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1943 MODULE_LICENSE("Dual BSD/GPL");
1944 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1945 MODULE_DESCRIPTION("Compressed RAM Block Device");