2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpuhotplug.h>
39 static DEFINE_IDR(zram_index_idr);
40 /* idr index must be protected */
41 static DEFINE_MUTEX(zram_index_mutex);
43 static int zram_major;
44 static const char *default_compressor = "lzo";
46 /* Module params (documentation at end) */
47 static unsigned int num_devices = 1;
49 * Pages that compress to sizes equals or greater than this are stored
50 * uncompressed in memory.
52 static size_t huge_class_size;
54 static void zram_free_page(struct zram *zram, size_t index);
55 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
56 u32 index, int offset, struct bio *bio);
59 static int zram_slot_trylock(struct zram *zram, u32 index)
61 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
64 static void zram_slot_lock(struct zram *zram, u32 index)
66 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
69 static void zram_slot_unlock(struct zram *zram, u32 index)
71 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
74 static inline bool init_done(struct zram *zram)
76 return zram->disksize;
79 static inline struct zram *dev_to_zram(struct device *dev)
81 return (struct zram *)dev_to_disk(dev)->private_data;
84 static unsigned long zram_get_handle(struct zram *zram, u32 index)
86 return zram->table[index].handle;
89 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
91 zram->table[index].handle = handle;
94 /* flag operations require table entry bit_spin_lock() being held */
95 static bool zram_test_flag(struct zram *zram, u32 index,
96 enum zram_pageflags flag)
98 return zram->table[index].flags & BIT(flag);
101 static void zram_set_flag(struct zram *zram, u32 index,
102 enum zram_pageflags flag)
104 zram->table[index].flags |= BIT(flag);
107 static void zram_clear_flag(struct zram *zram, u32 index,
108 enum zram_pageflags flag)
110 zram->table[index].flags &= ~BIT(flag);
113 static inline void zram_set_element(struct zram *zram, u32 index,
114 unsigned long element)
116 zram->table[index].element = element;
119 static unsigned long zram_get_element(struct zram *zram, u32 index)
121 return zram->table[index].element;
124 static size_t zram_get_obj_size(struct zram *zram, u32 index)
126 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
129 static void zram_set_obj_size(struct zram *zram,
130 u32 index, size_t size)
132 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
134 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
137 static inline bool zram_allocated(struct zram *zram, u32 index)
139 return zram_get_obj_size(zram, index) ||
140 zram_test_flag(zram, index, ZRAM_SAME) ||
141 zram_test_flag(zram, index, ZRAM_WB);
144 #if PAGE_SIZE != 4096
145 static inline bool is_partial_io(struct bio_vec *bvec)
147 return bvec->bv_len != PAGE_SIZE;
150 static inline bool is_partial_io(struct bio_vec *bvec)
157 * Check if request is within bounds and aligned on zram logical blocks.
159 static inline bool valid_io_request(struct zram *zram,
160 sector_t start, unsigned int size)
164 /* unaligned request */
165 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
167 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
170 end = start + (size >> SECTOR_SHIFT);
171 bound = zram->disksize >> SECTOR_SHIFT;
172 /* out of range range */
173 if (unlikely(start >= bound || end > bound || start > end))
176 /* I/O request is valid */
180 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
182 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
183 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
186 static inline void update_used_max(struct zram *zram,
187 const unsigned long pages)
189 unsigned long old_max, cur_max;
191 old_max = atomic_long_read(&zram->stats.max_used_pages);
196 old_max = atomic_long_cmpxchg(
197 &zram->stats.max_used_pages, cur_max, pages);
198 } while (old_max != cur_max);
201 static inline void zram_fill_page(void *ptr, unsigned long len,
204 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
205 memset_l(ptr, value, len / sizeof(unsigned long));
208 static bool page_same_filled(void *ptr, unsigned long *element)
214 page = (unsigned long *)ptr;
217 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
218 if (val != page[pos])
227 static ssize_t initstate_show(struct device *dev,
228 struct device_attribute *attr, char *buf)
231 struct zram *zram = dev_to_zram(dev);
233 down_read(&zram->init_lock);
234 val = init_done(zram);
235 up_read(&zram->init_lock);
237 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
240 static ssize_t disksize_show(struct device *dev,
241 struct device_attribute *attr, char *buf)
243 struct zram *zram = dev_to_zram(dev);
245 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
248 static ssize_t mem_limit_store(struct device *dev,
249 struct device_attribute *attr, const char *buf, size_t len)
253 struct zram *zram = dev_to_zram(dev);
255 limit = memparse(buf, &tmp);
256 if (buf == tmp) /* no chars parsed, invalid input */
259 down_write(&zram->init_lock);
260 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
261 up_write(&zram->init_lock);
266 static ssize_t mem_used_max_store(struct device *dev,
267 struct device_attribute *attr, const char *buf, size_t len)
271 struct zram *zram = dev_to_zram(dev);
273 err = kstrtoul(buf, 10, &val);
277 down_read(&zram->init_lock);
278 if (init_done(zram)) {
279 atomic_long_set(&zram->stats.max_used_pages,
280 zs_get_total_pages(zram->mem_pool));
282 up_read(&zram->init_lock);
287 static ssize_t idle_store(struct device *dev,
288 struct device_attribute *attr, const char *buf, size_t len)
290 struct zram *zram = dev_to_zram(dev);
291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
296 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
300 /* ignore trailing new line */
301 if (mode_buf[sz - 1] == '\n')
302 mode_buf[sz - 1] = 0x00;
304 if (strcmp(mode_buf, "all"))
307 down_read(&zram->init_lock);
308 if (!init_done(zram)) {
309 up_read(&zram->init_lock);
313 for (index = 0; index < nr_pages; index++) {
315 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
316 * See the comment in writeback_store.
318 zram_slot_lock(zram, index);
319 if (zram_allocated(zram, index) &&
320 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
321 zram_set_flag(zram, index, ZRAM_IDLE);
322 zram_slot_unlock(zram, index);
325 up_read(&zram->init_lock);
330 #ifdef CONFIG_ZRAM_WRITEBACK
331 static ssize_t writeback_limit_enable_store(struct device *dev,
332 struct device_attribute *attr, const char *buf, size_t len)
334 struct zram *zram = dev_to_zram(dev);
336 ssize_t ret = -EINVAL;
338 if (kstrtoull(buf, 10, &val))
341 down_read(&zram->init_lock);
342 spin_lock(&zram->wb_limit_lock);
343 zram->wb_limit_enable = val;
344 spin_unlock(&zram->wb_limit_lock);
345 up_read(&zram->init_lock);
351 static ssize_t writeback_limit_enable_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
355 struct zram *zram = dev_to_zram(dev);
357 down_read(&zram->init_lock);
358 spin_lock(&zram->wb_limit_lock);
359 val = zram->wb_limit_enable;
360 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock);
363 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
366 static ssize_t writeback_limit_store(struct device *dev,
367 struct device_attribute *attr, const char *buf, size_t len)
369 struct zram *zram = dev_to_zram(dev);
371 ssize_t ret = -EINVAL;
373 if (kstrtoull(buf, 10, &val))
376 down_read(&zram->init_lock);
377 spin_lock(&zram->wb_limit_lock);
378 zram->bd_wb_limit = val;
379 spin_unlock(&zram->wb_limit_lock);
380 up_read(&zram->init_lock);
386 static ssize_t writeback_limit_show(struct device *dev,
387 struct device_attribute *attr, char *buf)
390 struct zram *zram = dev_to_zram(dev);
392 down_read(&zram->init_lock);
393 spin_lock(&zram->wb_limit_lock);
394 val = zram->bd_wb_limit;
395 spin_unlock(&zram->wb_limit_lock);
396 up_read(&zram->init_lock);
398 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
401 static void reset_bdev(struct zram *zram)
403 struct block_device *bdev;
405 if (!zram->backing_dev)
409 if (zram->old_block_size)
410 set_blocksize(bdev, zram->old_block_size);
411 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
412 /* hope filp_close flush all of IO */
413 filp_close(zram->backing_dev, NULL);
414 zram->backing_dev = NULL;
415 zram->old_block_size = 0;
417 zram->disk->queue->backing_dev_info->capabilities |=
418 BDI_CAP_SYNCHRONOUS_IO;
419 kvfree(zram->bitmap);
423 static ssize_t backing_dev_show(struct device *dev,
424 struct device_attribute *attr, char *buf)
426 struct zram *zram = dev_to_zram(dev);
427 struct file *file = zram->backing_dev;
431 down_read(&zram->init_lock);
432 if (!zram->backing_dev) {
433 memcpy(buf, "none\n", 5);
434 up_read(&zram->init_lock);
438 p = file_path(file, buf, PAGE_SIZE - 1);
445 memmove(buf, p, ret);
448 up_read(&zram->init_lock);
452 static ssize_t backing_dev_store(struct device *dev,
453 struct device_attribute *attr, const char *buf, size_t len)
457 struct file *backing_dev = NULL;
459 struct address_space *mapping;
460 unsigned int bitmap_sz, old_block_size = 0;
461 unsigned long nr_pages, *bitmap = NULL;
462 struct block_device *bdev = NULL;
464 struct zram *zram = dev_to_zram(dev);
466 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
470 down_write(&zram->init_lock);
471 if (init_done(zram)) {
472 pr_info("Can't setup backing device for initialized device\n");
477 strlcpy(file_name, buf, PATH_MAX);
478 /* ignore trailing newline */
479 sz = strlen(file_name);
480 if (sz > 0 && file_name[sz - 1] == '\n')
481 file_name[sz - 1] = 0x00;
483 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
484 if (IS_ERR(backing_dev)) {
485 err = PTR_ERR(backing_dev);
490 mapping = backing_dev->f_mapping;
491 inode = mapping->host;
493 /* Support only block device in this moment */
494 if (!S_ISBLK(inode->i_mode)) {
499 bdev = bdgrab(I_BDEV(inode));
500 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
506 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
507 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
508 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
514 old_block_size = block_size(bdev);
515 err = set_blocksize(bdev, PAGE_SIZE);
521 zram->old_block_size = old_block_size;
523 zram->backing_dev = backing_dev;
524 zram->bitmap = bitmap;
525 zram->nr_pages = nr_pages;
527 * With writeback feature, zram does asynchronous IO so it's no longer
528 * synchronous device so let's remove synchronous io flag. Othewise,
529 * upper layer(e.g., swap) could wait IO completion rather than
530 * (submit and return), which will cause system sluggish.
531 * Furthermore, when the IO function returns(e.g., swap_readpage),
532 * upper layer expects IO was done so it could deallocate the page
533 * freely but in fact, IO is going on so finally could cause
534 * use-after-free when the IO is really done.
536 zram->disk->queue->backing_dev_info->capabilities &=
537 ~BDI_CAP_SYNCHRONOUS_IO;
538 up_write(&zram->init_lock);
540 pr_info("setup backing device %s\n", file_name);
549 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
552 filp_close(backing_dev, NULL);
554 up_write(&zram->init_lock);
561 static unsigned long alloc_block_bdev(struct zram *zram)
563 unsigned long blk_idx = 1;
565 /* skip 0 bit to confuse zram.handle = 0 */
566 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
567 if (blk_idx == zram->nr_pages)
570 if (test_and_set_bit(blk_idx, zram->bitmap))
573 atomic64_inc(&zram->stats.bd_count);
577 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
581 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
582 WARN_ON_ONCE(!was_set);
583 atomic64_dec(&zram->stats.bd_count);
586 static void zram_page_end_io(struct bio *bio)
588 struct page *page = bio_first_page_all(bio);
590 page_endio(page, op_is_write(bio_op(bio)),
591 blk_status_to_errno(bio->bi_status));
596 * Returns 1 if the submission is successful.
598 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
599 unsigned long entry, struct bio *parent)
603 bio = bio_alloc(GFP_ATOMIC, 1);
607 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
608 bio_set_dev(bio, zram->bdev);
609 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
615 bio->bi_opf = REQ_OP_READ;
616 bio->bi_end_io = zram_page_end_io;
618 bio->bi_opf = parent->bi_opf;
619 bio_chain(bio, parent);
626 #define HUGE_WRITEBACK 1
627 #define IDLE_WRITEBACK 2
629 static ssize_t writeback_store(struct device *dev,
630 struct device_attribute *attr, const char *buf, size_t len)
632 struct zram *zram = dev_to_zram(dev);
633 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
636 struct bio_vec bio_vec;
641 unsigned long blk_idx = 0;
643 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
647 /* ignore trailing newline */
648 if (mode_buf[sz - 1] == '\n')
649 mode_buf[sz - 1] = 0x00;
651 if (!strcmp(mode_buf, "idle"))
652 mode = IDLE_WRITEBACK;
653 else if (!strcmp(mode_buf, "huge"))
654 mode = HUGE_WRITEBACK;
659 down_read(&zram->init_lock);
660 if (!init_done(zram)) {
662 goto release_init_lock;
665 if (!zram->backing_dev) {
667 goto release_init_lock;
670 page = alloc_page(GFP_KERNEL);
673 goto release_init_lock;
676 for (index = 0; index < nr_pages; index++) {
680 bvec.bv_len = PAGE_SIZE;
683 spin_lock(&zram->wb_limit_lock);
684 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
685 spin_unlock(&zram->wb_limit_lock);
689 spin_unlock(&zram->wb_limit_lock);
692 blk_idx = alloc_block_bdev(zram);
699 zram_slot_lock(zram, index);
700 if (!zram_allocated(zram, index))
703 if (zram_test_flag(zram, index, ZRAM_WB) ||
704 zram_test_flag(zram, index, ZRAM_SAME) ||
705 zram_test_flag(zram, index, ZRAM_UNDER_WB))
708 if (mode == IDLE_WRITEBACK &&
709 !zram_test_flag(zram, index, ZRAM_IDLE))
711 if (mode == HUGE_WRITEBACK &&
712 !zram_test_flag(zram, index, ZRAM_HUGE))
715 * Clearing ZRAM_UNDER_WB is duty of caller.
716 * IOW, zram_free_page never clear it.
718 zram_set_flag(zram, index, ZRAM_UNDER_WB);
719 /* Need for hugepage writeback racing */
720 zram_set_flag(zram, index, ZRAM_IDLE);
721 zram_slot_unlock(zram, index);
722 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
723 zram_slot_lock(zram, index);
724 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
725 zram_clear_flag(zram, index, ZRAM_IDLE);
726 zram_slot_unlock(zram, index);
730 bio_init(&bio, &bio_vec, 1);
731 bio_set_dev(&bio, zram->bdev);
732 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
733 bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
735 bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
738 * XXX: A single page IO would be inefficient for write
739 * but it would be not bad as starter.
741 ret = submit_bio_wait(&bio);
743 zram_slot_lock(zram, index);
744 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
745 zram_clear_flag(zram, index, ZRAM_IDLE);
746 zram_slot_unlock(zram, index);
750 atomic64_inc(&zram->stats.bd_writes);
752 * We released zram_slot_lock so need to check if the slot was
753 * changed. If there is freeing for the slot, we can catch it
754 * easily by zram_allocated.
755 * A subtle case is the slot is freed/reallocated/marked as
756 * ZRAM_IDLE again. To close the race, idle_store doesn't
757 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
758 * Thus, we could close the race by checking ZRAM_IDLE bit.
760 zram_slot_lock(zram, index);
761 if (!zram_allocated(zram, index) ||
762 !zram_test_flag(zram, index, ZRAM_IDLE)) {
763 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
764 zram_clear_flag(zram, index, ZRAM_IDLE);
768 zram_free_page(zram, index);
769 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
770 zram_set_flag(zram, index, ZRAM_WB);
771 zram_set_element(zram, index, blk_idx);
773 atomic64_inc(&zram->stats.pages_stored);
774 spin_lock(&zram->wb_limit_lock);
775 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
776 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
777 spin_unlock(&zram->wb_limit_lock);
779 zram_slot_unlock(zram, index);
783 free_block_bdev(zram, blk_idx);
787 up_read(&zram->init_lock);
793 struct work_struct work;
799 #if PAGE_SIZE != 4096
800 static void zram_sync_read(struct work_struct *work)
803 struct zram_work *zw = container_of(work, struct zram_work, work);
804 struct zram *zram = zw->zram;
805 unsigned long entry = zw->entry;
806 struct bio *bio = zw->bio;
808 read_from_bdev_async(zram, &bvec, entry, bio);
812 * Block layer want one ->make_request_fn to be active at a time
813 * so if we use chained IO with parent IO in same context,
814 * it's a deadlock. To avoid, it, it uses worker thread context.
816 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
817 unsigned long entry, struct bio *bio)
819 struct zram_work work;
825 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
826 queue_work(system_unbound_wq, &work.work);
827 flush_work(&work.work);
828 destroy_work_on_stack(&work.work);
833 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
834 unsigned long entry, struct bio *bio)
841 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
842 unsigned long entry, struct bio *parent, bool sync)
844 atomic64_inc(&zram->stats.bd_reads);
846 return read_from_bdev_sync(zram, bvec, entry, parent);
848 return read_from_bdev_async(zram, bvec, entry, parent);
851 static inline void reset_bdev(struct zram *zram) {};
852 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
853 unsigned long entry, struct bio *parent, bool sync)
858 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
861 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
863 static struct dentry *zram_debugfs_root;
865 static void zram_debugfs_create(void)
867 zram_debugfs_root = debugfs_create_dir("zram", NULL);
870 static void zram_debugfs_destroy(void)
872 debugfs_remove_recursive(zram_debugfs_root);
875 static void zram_accessed(struct zram *zram, u32 index)
877 zram_clear_flag(zram, index, ZRAM_IDLE);
878 zram->table[index].ac_time = ktime_get_boottime();
881 static ssize_t read_block_state(struct file *file, char __user *buf,
882 size_t count, loff_t *ppos)
885 ssize_t index, written = 0;
886 struct zram *zram = file->private_data;
887 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
888 struct timespec64 ts;
890 kbuf = kvmalloc(count, GFP_KERNEL);
894 down_read(&zram->init_lock);
895 if (!init_done(zram)) {
896 up_read(&zram->init_lock);
901 for (index = *ppos; index < nr_pages; index++) {
904 zram_slot_lock(zram, index);
905 if (!zram_allocated(zram, index))
908 ts = ktime_to_timespec64(zram->table[index].ac_time);
909 copied = snprintf(kbuf + written, count,
910 "%12zd %12lld.%06lu %c%c%c%c\n",
911 index, (s64)ts.tv_sec,
912 ts.tv_nsec / NSEC_PER_USEC,
913 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
914 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
915 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
916 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
918 if (count < copied) {
919 zram_slot_unlock(zram, index);
925 zram_slot_unlock(zram, index);
929 up_read(&zram->init_lock);
930 if (copy_to_user(buf, kbuf, written))
937 static const struct file_operations proc_zram_block_state_op = {
939 .read = read_block_state,
940 .llseek = default_llseek,
943 static void zram_debugfs_register(struct zram *zram)
945 if (!zram_debugfs_root)
948 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
950 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
951 zram, &proc_zram_block_state_op);
954 static void zram_debugfs_unregister(struct zram *zram)
956 debugfs_remove_recursive(zram->debugfs_dir);
959 static void zram_debugfs_create(void) {};
960 static void zram_debugfs_destroy(void) {};
961 static void zram_accessed(struct zram *zram, u32 index)
963 zram_clear_flag(zram, index, ZRAM_IDLE);
965 static void zram_debugfs_register(struct zram *zram) {};
966 static void zram_debugfs_unregister(struct zram *zram) {};
970 * We switched to per-cpu streams and this attr is not needed anymore.
971 * However, we will keep it around for some time, because:
972 * a) we may revert per-cpu streams in the future
973 * b) it's visible to user space and we need to follow our 2 years
974 * retirement rule; but we already have a number of 'soon to be
975 * altered' attrs, so max_comp_streams need to wait for the next
978 static ssize_t max_comp_streams_show(struct device *dev,
979 struct device_attribute *attr, char *buf)
981 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
984 static ssize_t max_comp_streams_store(struct device *dev,
985 struct device_attribute *attr, const char *buf, size_t len)
990 static ssize_t comp_algorithm_show(struct device *dev,
991 struct device_attribute *attr, char *buf)
994 struct zram *zram = dev_to_zram(dev);
996 down_read(&zram->init_lock);
997 sz = zcomp_available_show(zram->compressor, buf);
998 up_read(&zram->init_lock);
1003 static ssize_t comp_algorithm_store(struct device *dev,
1004 struct device_attribute *attr, const char *buf, size_t len)
1006 struct zram *zram = dev_to_zram(dev);
1007 char compressor[ARRAY_SIZE(zram->compressor)];
1010 strlcpy(compressor, buf, sizeof(compressor));
1011 /* ignore trailing newline */
1012 sz = strlen(compressor);
1013 if (sz > 0 && compressor[sz - 1] == '\n')
1014 compressor[sz - 1] = 0x00;
1016 if (!zcomp_available_algorithm(compressor))
1019 down_write(&zram->init_lock);
1020 if (init_done(zram)) {
1021 up_write(&zram->init_lock);
1022 pr_info("Can't change algorithm for initialized device\n");
1026 strcpy(zram->compressor, compressor);
1027 up_write(&zram->init_lock);
1031 static ssize_t compact_store(struct device *dev,
1032 struct device_attribute *attr, const char *buf, size_t len)
1034 struct zram *zram = dev_to_zram(dev);
1036 down_read(&zram->init_lock);
1037 if (!init_done(zram)) {
1038 up_read(&zram->init_lock);
1042 zs_compact(zram->mem_pool);
1043 up_read(&zram->init_lock);
1048 static ssize_t io_stat_show(struct device *dev,
1049 struct device_attribute *attr, char *buf)
1051 struct zram *zram = dev_to_zram(dev);
1054 down_read(&zram->init_lock);
1055 ret = scnprintf(buf, PAGE_SIZE,
1056 "%8llu %8llu %8llu %8llu\n",
1057 (u64)atomic64_read(&zram->stats.failed_reads),
1058 (u64)atomic64_read(&zram->stats.failed_writes),
1059 (u64)atomic64_read(&zram->stats.invalid_io),
1060 (u64)atomic64_read(&zram->stats.notify_free));
1061 up_read(&zram->init_lock);
1066 static ssize_t mm_stat_show(struct device *dev,
1067 struct device_attribute *attr, char *buf)
1069 struct zram *zram = dev_to_zram(dev);
1070 struct zs_pool_stats pool_stats;
1071 u64 orig_size, mem_used = 0;
1075 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1077 down_read(&zram->init_lock);
1078 if (init_done(zram)) {
1079 mem_used = zs_get_total_pages(zram->mem_pool);
1080 zs_pool_stats(zram->mem_pool, &pool_stats);
1083 orig_size = atomic64_read(&zram->stats.pages_stored);
1084 max_used = atomic_long_read(&zram->stats.max_used_pages);
1086 ret = scnprintf(buf, PAGE_SIZE,
1087 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
1088 orig_size << PAGE_SHIFT,
1089 (u64)atomic64_read(&zram->stats.compr_data_size),
1090 mem_used << PAGE_SHIFT,
1091 zram->limit_pages << PAGE_SHIFT,
1092 max_used << PAGE_SHIFT,
1093 (u64)atomic64_read(&zram->stats.same_pages),
1094 pool_stats.pages_compacted,
1095 (u64)atomic64_read(&zram->stats.huge_pages));
1096 up_read(&zram->init_lock);
1101 #ifdef CONFIG_ZRAM_WRITEBACK
1102 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
1103 static ssize_t bd_stat_show(struct device *dev,
1104 struct device_attribute *attr, char *buf)
1106 struct zram *zram = dev_to_zram(dev);
1109 down_read(&zram->init_lock);
1110 ret = scnprintf(buf, PAGE_SIZE,
1111 "%8llu %8llu %8llu\n",
1112 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1113 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1114 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1115 up_read(&zram->init_lock);
1121 static ssize_t debug_stat_show(struct device *dev,
1122 struct device_attribute *attr, char *buf)
1125 struct zram *zram = dev_to_zram(dev);
1128 down_read(&zram->init_lock);
1129 ret = scnprintf(buf, PAGE_SIZE,
1130 "version: %d\n%8llu %8llu\n",
1132 (u64)atomic64_read(&zram->stats.writestall),
1133 (u64)atomic64_read(&zram->stats.miss_free));
1134 up_read(&zram->init_lock);
1139 static DEVICE_ATTR_RO(io_stat);
1140 static DEVICE_ATTR_RO(mm_stat);
1141 #ifdef CONFIG_ZRAM_WRITEBACK
1142 static DEVICE_ATTR_RO(bd_stat);
1144 static DEVICE_ATTR_RO(debug_stat);
1146 static void zram_meta_free(struct zram *zram, u64 disksize)
1148 size_t num_pages = disksize >> PAGE_SHIFT;
1151 /* Free all pages that are still in this zram device */
1152 for (index = 0; index < num_pages; index++)
1153 zram_free_page(zram, index);
1155 zs_destroy_pool(zram->mem_pool);
1159 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1163 num_pages = disksize >> PAGE_SHIFT;
1164 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1168 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1169 if (!zram->mem_pool) {
1174 if (!huge_class_size)
1175 huge_class_size = zs_huge_class_size(zram->mem_pool);
1180 * To protect concurrent access to the same index entry,
1181 * caller should hold this table index entry's bit_spinlock to
1182 * indicate this index entry is accessing.
1184 static void zram_free_page(struct zram *zram, size_t index)
1186 unsigned long handle;
1188 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
1189 zram->table[index].ac_time = 0;
1191 if (zram_test_flag(zram, index, ZRAM_IDLE))
1192 zram_clear_flag(zram, index, ZRAM_IDLE);
1194 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1195 zram_clear_flag(zram, index, ZRAM_HUGE);
1196 atomic64_dec(&zram->stats.huge_pages);
1199 if (zram_test_flag(zram, index, ZRAM_WB)) {
1200 zram_clear_flag(zram, index, ZRAM_WB);
1201 free_block_bdev(zram, zram_get_element(zram, index));
1206 * No memory is allocated for same element filled pages.
1207 * Simply clear same page flag.
1209 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1210 zram_clear_flag(zram, index, ZRAM_SAME);
1211 atomic64_dec(&zram->stats.same_pages);
1215 handle = zram_get_handle(zram, index);
1219 zs_free(zram->mem_pool, handle);
1221 atomic64_sub(zram_get_obj_size(zram, index),
1222 &zram->stats.compr_data_size);
1224 atomic64_dec(&zram->stats.pages_stored);
1225 zram_set_handle(zram, index, 0);
1226 zram_set_obj_size(zram, index, 0);
1227 WARN_ON_ONCE(zram->table[index].flags &
1228 ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
1231 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1232 struct bio *bio, bool partial_io)
1235 unsigned long handle;
1239 zram_slot_lock(zram, index);
1240 if (zram_test_flag(zram, index, ZRAM_WB)) {
1241 struct bio_vec bvec;
1243 zram_slot_unlock(zram, index);
1245 bvec.bv_page = page;
1246 bvec.bv_len = PAGE_SIZE;
1248 return read_from_bdev(zram, &bvec,
1249 zram_get_element(zram, index),
1253 handle = zram_get_handle(zram, index);
1254 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1255 unsigned long value;
1258 value = handle ? zram_get_element(zram, index) : 0;
1259 mem = kmap_atomic(page);
1260 zram_fill_page(mem, PAGE_SIZE, value);
1262 zram_slot_unlock(zram, index);
1266 size = zram_get_obj_size(zram, index);
1268 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1269 if (size == PAGE_SIZE) {
1270 dst = kmap_atomic(page);
1271 memcpy(dst, src, PAGE_SIZE);
1275 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
1277 dst = kmap_atomic(page);
1278 ret = zcomp_decompress(zstrm, src, size, dst);
1280 zcomp_stream_put(zram->comp);
1282 zs_unmap_object(zram->mem_pool, handle);
1283 zram_slot_unlock(zram, index);
1285 /* Should NEVER happen. Return bio error if it does. */
1287 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1292 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1293 u32 index, int offset, struct bio *bio)
1298 page = bvec->bv_page;
1299 if (is_partial_io(bvec)) {
1300 /* Use a temporary buffer to decompress the page */
1301 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1306 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1310 if (is_partial_io(bvec)) {
1311 void *dst = kmap_atomic(bvec->bv_page);
1312 void *src = kmap_atomic(page);
1314 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
1319 if (is_partial_io(bvec))
1325 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1326 u32 index, struct bio *bio)
1329 unsigned long alloced_pages;
1330 unsigned long handle = 0;
1331 unsigned int comp_len = 0;
1332 void *src, *dst, *mem;
1333 struct zcomp_strm *zstrm;
1334 struct page *page = bvec->bv_page;
1335 unsigned long element = 0;
1336 enum zram_pageflags flags = 0;
1338 mem = kmap_atomic(page);
1339 if (page_same_filled(mem, &element)) {
1341 /* Free memory associated with this sector now. */
1343 atomic64_inc(&zram->stats.same_pages);
1349 zstrm = zcomp_stream_get(zram->comp);
1350 src = kmap_atomic(page);
1351 ret = zcomp_compress(zstrm, src, &comp_len);
1354 if (unlikely(ret)) {
1355 zcomp_stream_put(zram->comp);
1356 pr_err("Compression failed! err=%d\n", ret);
1357 zs_free(zram->mem_pool, handle);
1361 if (comp_len >= huge_class_size)
1362 comp_len = PAGE_SIZE;
1364 * handle allocation has 2 paths:
1365 * a) fast path is executed with preemption disabled (for
1366 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1367 * since we can't sleep;
1368 * b) slow path enables preemption and attempts to allocate
1369 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1370 * put per-cpu compression stream and, thus, to re-do
1371 * the compression once handle is allocated.
1373 * if we have a 'non-null' handle here then we are coming
1374 * from the slow path and handle has already been allocated.
1377 handle = zs_malloc(zram->mem_pool, comp_len,
1378 __GFP_KSWAPD_RECLAIM |
1383 zcomp_stream_put(zram->comp);
1384 atomic64_inc(&zram->stats.writestall);
1385 handle = zs_malloc(zram->mem_pool, comp_len,
1386 GFP_NOIO | __GFP_HIGHMEM |
1389 goto compress_again;
1393 alloced_pages = zs_get_total_pages(zram->mem_pool);
1394 update_used_max(zram, alloced_pages);
1396 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1397 zcomp_stream_put(zram->comp);
1398 zs_free(zram->mem_pool, handle);
1402 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1404 src = zstrm->buffer;
1405 if (comp_len == PAGE_SIZE)
1406 src = kmap_atomic(page);
1407 memcpy(dst, src, comp_len);
1408 if (comp_len == PAGE_SIZE)
1411 zcomp_stream_put(zram->comp);
1412 zs_unmap_object(zram->mem_pool, handle);
1413 atomic64_add(comp_len, &zram->stats.compr_data_size);
1416 * Free memory associated with this sector
1417 * before overwriting unused sectors.
1419 zram_slot_lock(zram, index);
1420 zram_free_page(zram, index);
1422 if (comp_len == PAGE_SIZE) {
1423 zram_set_flag(zram, index, ZRAM_HUGE);
1424 atomic64_inc(&zram->stats.huge_pages);
1428 zram_set_flag(zram, index, flags);
1429 zram_set_element(zram, index, element);
1431 zram_set_handle(zram, index, handle);
1432 zram_set_obj_size(zram, index, comp_len);
1434 zram_slot_unlock(zram, index);
1437 atomic64_inc(&zram->stats.pages_stored);
1441 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1442 u32 index, int offset, struct bio *bio)
1445 struct page *page = NULL;
1450 if (is_partial_io(bvec)) {
1453 * This is a partial IO. We need to read the full page
1454 * before to write the changes.
1456 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1460 ret = __zram_bvec_read(zram, page, index, bio, true);
1464 src = kmap_atomic(bvec->bv_page);
1465 dst = kmap_atomic(page);
1466 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1471 vec.bv_len = PAGE_SIZE;
1475 ret = __zram_bvec_write(zram, &vec, index, bio);
1477 if (is_partial_io(bvec))
1483 * zram_bio_discard - handler on discard request
1484 * @index: physical block index in PAGE_SIZE units
1485 * @offset: byte offset within physical block
1487 static void zram_bio_discard(struct zram *zram, u32 index,
1488 int offset, struct bio *bio)
1490 size_t n = bio->bi_iter.bi_size;
1493 * zram manages data in physical block size units. Because logical block
1494 * size isn't identical with physical block size on some arch, we
1495 * could get a discard request pointing to a specific offset within a
1496 * certain physical block. Although we can handle this request by
1497 * reading that physiclal block and decompressing and partially zeroing
1498 * and re-compressing and then re-storing it, this isn't reasonable
1499 * because our intent with a discard request is to save memory. So
1500 * skipping this logical block is appropriate here.
1503 if (n <= (PAGE_SIZE - offset))
1506 n -= (PAGE_SIZE - offset);
1510 while (n >= PAGE_SIZE) {
1511 zram_slot_lock(zram, index);
1512 zram_free_page(zram, index);
1513 zram_slot_unlock(zram, index);
1514 atomic64_inc(&zram->stats.notify_free);
1521 * Returns errno if it has some problem. Otherwise return 0 or 1.
1522 * Returns 0 if IO request was done synchronously
1523 * Returns 1 if IO request was successfully submitted.
1525 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1526 int offset, unsigned int op, struct bio *bio)
1528 unsigned long start_time = jiffies;
1529 struct request_queue *q = zram->disk->queue;
1532 generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
1533 &zram->disk->part0);
1535 if (!op_is_write(op)) {
1536 atomic64_inc(&zram->stats.num_reads);
1537 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1538 flush_dcache_page(bvec->bv_page);
1540 atomic64_inc(&zram->stats.num_writes);
1541 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1544 generic_end_io_acct(q, op, &zram->disk->part0, start_time);
1546 zram_slot_lock(zram, index);
1547 zram_accessed(zram, index);
1548 zram_slot_unlock(zram, index);
1550 if (unlikely(ret < 0)) {
1551 if (!op_is_write(op))
1552 atomic64_inc(&zram->stats.failed_reads);
1554 atomic64_inc(&zram->stats.failed_writes);
1560 static void __zram_make_request(struct zram *zram, struct bio *bio)
1564 struct bio_vec bvec;
1565 struct bvec_iter iter;
1567 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1568 offset = (bio->bi_iter.bi_sector &
1569 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1571 switch (bio_op(bio)) {
1572 case REQ_OP_DISCARD:
1573 case REQ_OP_WRITE_ZEROES:
1574 zram_bio_discard(zram, index, offset, bio);
1581 bio_for_each_segment(bvec, bio, iter) {
1582 struct bio_vec bv = bvec;
1583 unsigned int unwritten = bvec.bv_len;
1586 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1588 if (zram_bvec_rw(zram, &bv, index, offset,
1589 bio_op(bio), bio) < 0)
1592 bv.bv_offset += bv.bv_len;
1593 unwritten -= bv.bv_len;
1595 update_position(&index, &offset, &bv);
1596 } while (unwritten);
1607 * Handler function for all zram I/O requests.
1609 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1611 struct zram *zram = queue->queuedata;
1613 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1614 bio->bi_iter.bi_size)) {
1615 atomic64_inc(&zram->stats.invalid_io);
1619 __zram_make_request(zram, bio);
1620 return BLK_QC_T_NONE;
1624 return BLK_QC_T_NONE;
1627 static void zram_slot_free_notify(struct block_device *bdev,
1628 unsigned long index)
1632 zram = bdev->bd_disk->private_data;
1634 atomic64_inc(&zram->stats.notify_free);
1635 if (!zram_slot_trylock(zram, index)) {
1636 atomic64_inc(&zram->stats.miss_free);
1640 zram_free_page(zram, index);
1641 zram_slot_unlock(zram, index);
1644 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1645 struct page *page, unsigned int op)
1652 if (PageTransHuge(page))
1654 zram = bdev->bd_disk->private_data;
1656 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1657 atomic64_inc(&zram->stats.invalid_io);
1662 index = sector >> SECTORS_PER_PAGE_SHIFT;
1663 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1666 bv.bv_len = PAGE_SIZE;
1669 ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1672 * If I/O fails, just return error(ie, non-zero) without
1673 * calling page_endio.
1674 * It causes resubmit the I/O with bio request by upper functions
1675 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1676 * bio->bi_end_io does things to handle the error
1677 * (e.g., SetPageError, set_page_dirty and extra works).
1679 if (unlikely(ret < 0))
1684 page_endio(page, op_is_write(op), 0);
1695 static void zram_reset_device(struct zram *zram)
1700 down_write(&zram->init_lock);
1702 zram->limit_pages = 0;
1704 if (!init_done(zram)) {
1705 up_write(&zram->init_lock);
1710 disksize = zram->disksize;
1713 set_capacity(zram->disk, 0);
1714 part_stat_set_all(&zram->disk->part0, 0);
1716 up_write(&zram->init_lock);
1717 /* I/O operation under all of CPU are done so let's free */
1718 zram_meta_free(zram, disksize);
1719 memset(&zram->stats, 0, sizeof(zram->stats));
1720 zcomp_destroy(comp);
1724 static ssize_t disksize_store(struct device *dev,
1725 struct device_attribute *attr, const char *buf, size_t len)
1729 struct zram *zram = dev_to_zram(dev);
1732 disksize = memparse(buf, NULL);
1736 down_write(&zram->init_lock);
1737 if (init_done(zram)) {
1738 pr_info("Cannot change disksize for initialized device\n");
1743 disksize = PAGE_ALIGN(disksize);
1744 if (!zram_meta_alloc(zram, disksize)) {
1749 comp = zcomp_create(zram->compressor);
1751 pr_err("Cannot initialise %s compressing backend\n",
1753 err = PTR_ERR(comp);
1758 zram->disksize = disksize;
1759 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1761 revalidate_disk(zram->disk);
1762 up_write(&zram->init_lock);
1767 zram_meta_free(zram, disksize);
1769 up_write(&zram->init_lock);
1773 static ssize_t reset_store(struct device *dev,
1774 struct device_attribute *attr, const char *buf, size_t len)
1777 unsigned short do_reset;
1779 struct block_device *bdev;
1781 ret = kstrtou16(buf, 10, &do_reset);
1788 zram = dev_to_zram(dev);
1789 bdev = bdget_disk(zram->disk, 0);
1793 mutex_lock(&bdev->bd_mutex);
1794 /* Do not reset an active device or claimed device */
1795 if (bdev->bd_openers || zram->claim) {
1796 mutex_unlock(&bdev->bd_mutex);
1801 /* From now on, anyone can't open /dev/zram[0-9] */
1803 mutex_unlock(&bdev->bd_mutex);
1805 /* Make sure all the pending I/O are finished */
1807 zram_reset_device(zram);
1808 revalidate_disk(zram->disk);
1811 mutex_lock(&bdev->bd_mutex);
1812 zram->claim = false;
1813 mutex_unlock(&bdev->bd_mutex);
1818 static int zram_open(struct block_device *bdev, fmode_t mode)
1823 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1825 zram = bdev->bd_disk->private_data;
1826 /* zram was claimed to reset so open request fails */
1833 static const struct block_device_operations zram_devops = {
1835 .swap_slot_free_notify = zram_slot_free_notify,
1836 .rw_page = zram_rw_page,
1837 .owner = THIS_MODULE
1840 static DEVICE_ATTR_WO(compact);
1841 static DEVICE_ATTR_RW(disksize);
1842 static DEVICE_ATTR_RO(initstate);
1843 static DEVICE_ATTR_WO(reset);
1844 static DEVICE_ATTR_WO(mem_limit);
1845 static DEVICE_ATTR_WO(mem_used_max);
1846 static DEVICE_ATTR_WO(idle);
1847 static DEVICE_ATTR_RW(max_comp_streams);
1848 static DEVICE_ATTR_RW(comp_algorithm);
1849 #ifdef CONFIG_ZRAM_WRITEBACK
1850 static DEVICE_ATTR_RW(backing_dev);
1851 static DEVICE_ATTR_WO(writeback);
1852 static DEVICE_ATTR_RW(writeback_limit);
1853 static DEVICE_ATTR_RW(writeback_limit_enable);
1856 static struct attribute *zram_disk_attrs[] = {
1857 &dev_attr_disksize.attr,
1858 &dev_attr_initstate.attr,
1859 &dev_attr_reset.attr,
1860 &dev_attr_compact.attr,
1861 &dev_attr_mem_limit.attr,
1862 &dev_attr_mem_used_max.attr,
1863 &dev_attr_idle.attr,
1864 &dev_attr_max_comp_streams.attr,
1865 &dev_attr_comp_algorithm.attr,
1866 #ifdef CONFIG_ZRAM_WRITEBACK
1867 &dev_attr_backing_dev.attr,
1868 &dev_attr_writeback.attr,
1869 &dev_attr_writeback_limit.attr,
1870 &dev_attr_writeback_limit_enable.attr,
1872 &dev_attr_io_stat.attr,
1873 &dev_attr_mm_stat.attr,
1874 #ifdef CONFIG_ZRAM_WRITEBACK
1875 &dev_attr_bd_stat.attr,
1877 &dev_attr_debug_stat.attr,
1881 static const struct attribute_group zram_disk_attr_group = {
1882 .attrs = zram_disk_attrs,
1885 static const struct attribute_group *zram_disk_attr_groups[] = {
1886 &zram_disk_attr_group,
1891 * Allocate and initialize new zram device. the function returns
1892 * '>= 0' device_id upon success, and negative value otherwise.
1894 static int zram_add(void)
1897 struct request_queue *queue;
1900 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1904 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1909 init_rwsem(&zram->init_lock);
1910 #ifdef CONFIG_ZRAM_WRITEBACK
1911 spin_lock_init(&zram->wb_limit_lock);
1913 queue = blk_alloc_queue(GFP_KERNEL);
1915 pr_err("Error allocating disk queue for device %d\n",
1921 blk_queue_make_request(queue, zram_make_request);
1923 /* gendisk structure */
1924 zram->disk = alloc_disk(1);
1926 pr_err("Error allocating disk structure for device %d\n",
1929 goto out_free_queue;
1932 zram->disk->major = zram_major;
1933 zram->disk->first_minor = device_id;
1934 zram->disk->fops = &zram_devops;
1935 zram->disk->queue = queue;
1936 zram->disk->queue->queuedata = zram;
1937 zram->disk->private_data = zram;
1938 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1940 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1941 set_capacity(zram->disk, 0);
1942 /* zram devices sort of resembles non-rotational disks */
1943 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
1944 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1947 * To ensure that we always get PAGE_SIZE aligned
1948 * and n*PAGE_SIZED sized I/O requests.
1950 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1951 blk_queue_logical_block_size(zram->disk->queue,
1952 ZRAM_LOGICAL_BLOCK_SIZE);
1953 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1954 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1955 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1956 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1957 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
1960 * zram_bio_discard() will clear all logical blocks if logical block
1961 * size is identical with physical block size(PAGE_SIZE). But if it is
1962 * different, we will skip discarding some parts of logical blocks in
1963 * the part of the request range which isn't aligned to physical block
1964 * size. So we can't ensure that all discarded logical blocks are
1967 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1968 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1970 zram->disk->queue->backing_dev_info->capabilities |=
1971 (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
1972 device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
1974 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1976 zram_debugfs_register(zram);
1977 pr_info("Added device: %s\n", zram->disk->disk_name);
1981 blk_cleanup_queue(queue);
1983 idr_remove(&zram_index_idr, device_id);
1989 static int zram_remove(struct zram *zram)
1991 struct block_device *bdev;
1993 bdev = bdget_disk(zram->disk, 0);
1997 mutex_lock(&bdev->bd_mutex);
1998 if (bdev->bd_openers || zram->claim) {
1999 mutex_unlock(&bdev->bd_mutex);
2005 mutex_unlock(&bdev->bd_mutex);
2007 zram_debugfs_unregister(zram);
2009 /* Make sure all the pending I/O are finished */
2011 zram_reset_device(zram);
2014 pr_info("Removed device: %s\n", zram->disk->disk_name);
2016 del_gendisk(zram->disk);
2017 blk_cleanup_queue(zram->disk->queue);
2018 put_disk(zram->disk);
2023 /* zram-control sysfs attributes */
2026 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2027 * sense that reading from this file does alter the state of your system -- it
2028 * creates a new un-initialized zram device and returns back this device's
2029 * device_id (or an error code if it fails to create a new device).
2031 static ssize_t hot_add_show(struct class *class,
2032 struct class_attribute *attr,
2037 mutex_lock(&zram_index_mutex);
2039 mutex_unlock(&zram_index_mutex);
2043 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2045 static CLASS_ATTR_RO(hot_add);
2047 static ssize_t hot_remove_store(struct class *class,
2048 struct class_attribute *attr,
2055 /* dev_id is gendisk->first_minor, which is `int' */
2056 ret = kstrtoint(buf, 10, &dev_id);
2062 mutex_lock(&zram_index_mutex);
2064 zram = idr_find(&zram_index_idr, dev_id);
2066 ret = zram_remove(zram);
2068 idr_remove(&zram_index_idr, dev_id);
2073 mutex_unlock(&zram_index_mutex);
2074 return ret ? ret : count;
2076 static CLASS_ATTR_WO(hot_remove);
2078 static struct attribute *zram_control_class_attrs[] = {
2079 &class_attr_hot_add.attr,
2080 &class_attr_hot_remove.attr,
2083 ATTRIBUTE_GROUPS(zram_control_class);
2085 static struct class zram_control_class = {
2086 .name = "zram-control",
2087 .owner = THIS_MODULE,
2088 .class_groups = zram_control_class_groups,
2091 static int zram_remove_cb(int id, void *ptr, void *data)
2097 static void destroy_devices(void)
2099 class_unregister(&zram_control_class);
2100 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2101 zram_debugfs_destroy();
2102 idr_destroy(&zram_index_idr);
2103 unregister_blkdev(zram_major, "zram");
2104 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2107 static int __init zram_init(void)
2111 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2112 zcomp_cpu_up_prepare, zcomp_cpu_dead);
2116 ret = class_register(&zram_control_class);
2118 pr_err("Unable to register zram-control class\n");
2119 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2123 zram_debugfs_create();
2124 zram_major = register_blkdev(0, "zram");
2125 if (zram_major <= 0) {
2126 pr_err("Unable to get major number\n");
2127 class_unregister(&zram_control_class);
2128 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2132 while (num_devices != 0) {
2133 mutex_lock(&zram_index_mutex);
2135 mutex_unlock(&zram_index_mutex);
2148 static void __exit zram_exit(void)
2153 module_init(zram_init);
2154 module_exit(zram_exit);
2156 module_param(num_devices, uint, 0);
2157 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2159 MODULE_LICENSE("Dual BSD/GPL");
2160 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2161 MODULE_DESCRIPTION("Compressed RAM Block Device");