2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major;
40 static struct zram *zram_devices;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices = 1;
45 static inline struct zram *dev_to_zram(struct device *dev)
47 return (struct zram *)dev_to_disk(dev)->private_data;
50 static ssize_t disksize_show(struct device *dev,
51 struct device_attribute *attr, char *buf)
53 struct zram *zram = dev_to_zram(dev);
55 return sprintf(buf, "%llu\n", zram->disksize);
58 static ssize_t initstate_show(struct device *dev,
59 struct device_attribute *attr, char *buf)
61 struct zram *zram = dev_to_zram(dev);
63 return sprintf(buf, "%u\n", zram->init_done);
66 static ssize_t num_reads_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
69 struct zram *zram = dev_to_zram(dev);
71 return sprintf(buf, "%llu\n",
72 (u64)atomic64_read(&zram->stats.num_reads));
75 static ssize_t num_writes_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
78 struct zram *zram = dev_to_zram(dev);
80 return sprintf(buf, "%llu\n",
81 (u64)atomic64_read(&zram->stats.num_writes));
84 static ssize_t invalid_io_show(struct device *dev,
85 struct device_attribute *attr, char *buf)
87 struct zram *zram = dev_to_zram(dev);
89 return sprintf(buf, "%llu\n",
90 (u64)atomic64_read(&zram->stats.invalid_io));
93 static ssize_t notify_free_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
96 struct zram *zram = dev_to_zram(dev);
98 return sprintf(buf, "%llu\n",
99 (u64)atomic64_read(&zram->stats.notify_free));
102 static ssize_t zero_pages_show(struct device *dev,
103 struct device_attribute *attr, char *buf)
105 struct zram *zram = dev_to_zram(dev);
107 return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
110 static ssize_t orig_data_size_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
113 struct zram *zram = dev_to_zram(dev);
115 return sprintf(buf, "%llu\n",
116 (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
119 static ssize_t compr_data_size_show(struct device *dev,
120 struct device_attribute *attr, char *buf)
122 struct zram *zram = dev_to_zram(dev);
124 return sprintf(buf, "%llu\n",
125 (u64)atomic64_read(&zram->stats.compr_size));
128 static ssize_t mem_used_total_show(struct device *dev,
129 struct device_attribute *attr, char *buf)
132 struct zram *zram = dev_to_zram(dev);
133 struct zram_meta *meta = zram->meta;
135 down_read(&zram->init_lock);
137 val = zs_get_total_size_bytes(meta->mem_pool);
138 up_read(&zram->init_lock);
140 return sprintf(buf, "%llu\n", val);
143 /* flag operations needs meta->tb_lock */
144 static int zram_test_flag(struct zram_meta *meta, u32 index,
145 enum zram_pageflags flag)
147 return meta->table[index].flags & BIT(flag);
150 static void zram_set_flag(struct zram_meta *meta, u32 index,
151 enum zram_pageflags flag)
153 meta->table[index].flags |= BIT(flag);
156 static void zram_clear_flag(struct zram_meta *meta, u32 index,
157 enum zram_pageflags flag)
159 meta->table[index].flags &= ~BIT(flag);
162 static inline int is_partial_io(struct bio_vec *bvec)
164 return bvec->bv_len != PAGE_SIZE;
168 * Check if request is within bounds and aligned on zram logical blocks.
170 static inline int valid_io_request(struct zram *zram, struct bio *bio)
172 u64 start, end, bound;
174 /* unaligned request */
175 if (unlikely(bio->bi_iter.bi_sector &
176 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
178 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
181 start = bio->bi_iter.bi_sector;
182 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
183 bound = zram->disksize >> SECTOR_SHIFT;
184 /* out of range range */
185 if (unlikely(start >= bound || end > bound || start > end))
188 /* I/O request is valid */
192 static void zram_meta_free(struct zram_meta *meta)
194 zs_destroy_pool(meta->mem_pool);
195 kfree(meta->compress_workmem);
196 free_pages((unsigned long)meta->compress_buffer, 1);
201 static struct zram_meta *zram_meta_alloc(u64 disksize)
204 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
208 meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
209 if (!meta->compress_workmem)
212 meta->compress_buffer =
213 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
214 if (!meta->compress_buffer) {
215 pr_err("Error allocating compressor buffer space\n");
219 num_pages = disksize >> PAGE_SHIFT;
220 meta->table = vzalloc(num_pages * sizeof(*meta->table));
222 pr_err("Error allocating zram address table\n");
226 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
227 if (!meta->mem_pool) {
228 pr_err("Error creating memory pool\n");
232 rwlock_init(&meta->tb_lock);
238 free_pages((unsigned long)meta->compress_buffer, 1);
240 kfree(meta->compress_workmem);
248 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
250 if (*offset + bvec->bv_len >= PAGE_SIZE)
252 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
255 static int page_zero_filled(void *ptr)
260 page = (unsigned long *)ptr;
262 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
270 static void handle_zero_page(struct bio_vec *bvec)
272 struct page *page = bvec->bv_page;
275 user_mem = kmap_atomic(page);
276 if (is_partial_io(bvec))
277 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
279 clear_page(user_mem);
280 kunmap_atomic(user_mem);
282 flush_dcache_page(page);
285 /* NOTE: caller should hold meta->tb_lock with write-side */
286 static void zram_free_page(struct zram *zram, size_t index)
288 struct zram_meta *meta = zram->meta;
289 unsigned long handle = meta->table[index].handle;
290 u16 size = meta->table[index].size;
292 if (unlikely(!handle)) {
294 * No memory is allocated for zero filled pages.
295 * Simply clear zero page flag.
297 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
298 zram_clear_flag(meta, index, ZRAM_ZERO);
299 atomic_dec(&zram->stats.pages_zero);
304 if (unlikely(size > max_zpage_size))
305 atomic_dec(&zram->stats.bad_compress);
307 zs_free(meta->mem_pool, handle);
309 if (size <= PAGE_SIZE / 2)
310 atomic_dec(&zram->stats.good_compress);
312 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
313 atomic_dec(&zram->stats.pages_stored);
315 meta->table[index].handle = 0;
316 meta->table[index].size = 0;
319 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
322 size_t clen = PAGE_SIZE;
324 struct zram_meta *meta = zram->meta;
325 unsigned long handle;
328 read_lock(&meta->tb_lock);
329 handle = meta->table[index].handle;
330 size = meta->table[index].size;
332 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
333 read_unlock(&meta->tb_lock);
338 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
339 if (size == PAGE_SIZE)
340 copy_page(mem, cmem);
342 ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
343 zs_unmap_object(meta->mem_pool, handle);
344 read_unlock(&meta->tb_lock);
346 /* Should NEVER happen. Return bio error if it does. */
347 if (unlikely(ret != LZO_E_OK)) {
348 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
349 atomic64_inc(&zram->stats.failed_reads);
356 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
357 u32 index, int offset, struct bio *bio)
361 unsigned char *user_mem, *uncmem = NULL;
362 struct zram_meta *meta = zram->meta;
363 page = bvec->bv_page;
365 read_lock(&meta->tb_lock);
366 if (unlikely(!meta->table[index].handle) ||
367 zram_test_flag(meta, index, ZRAM_ZERO)) {
368 read_unlock(&meta->tb_lock);
369 handle_zero_page(bvec);
372 read_unlock(&meta->tb_lock);
374 if (is_partial_io(bvec))
375 /* Use a temporary buffer to decompress the page */
376 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
378 user_mem = kmap_atomic(page);
379 if (!is_partial_io(bvec))
383 pr_info("Unable to allocate temp memory\n");
388 ret = zram_decompress_page(zram, uncmem, index);
389 /* Should NEVER happen. Return bio error if it does. */
390 if (unlikely(ret != LZO_E_OK))
393 if (is_partial_io(bvec))
394 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
397 flush_dcache_page(page);
400 kunmap_atomic(user_mem);
401 if (is_partial_io(bvec))
406 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
411 unsigned long handle;
413 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
414 struct zram_meta *meta = zram->meta;
416 page = bvec->bv_page;
417 src = meta->compress_buffer;
419 if (is_partial_io(bvec)) {
421 * This is a partial IO. We need to read the full page
422 * before to write the changes.
424 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
429 ret = zram_decompress_page(zram, uncmem, index);
434 user_mem = kmap_atomic(page);
436 if (is_partial_io(bvec)) {
437 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
439 kunmap_atomic(user_mem);
445 if (page_zero_filled(uncmem)) {
446 kunmap_atomic(user_mem);
447 /* Free memory associated with this sector now. */
448 write_lock(&zram->meta->tb_lock);
449 zram_free_page(zram, index);
450 zram_set_flag(meta, index, ZRAM_ZERO);
451 write_unlock(&zram->meta->tb_lock);
453 atomic_inc(&zram->stats.pages_zero);
458 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
459 meta->compress_workmem);
461 if (!is_partial_io(bvec)) {
462 kunmap_atomic(user_mem);
467 if (unlikely(ret != LZO_E_OK)) {
468 pr_err("Compression failed! err=%d\n", ret);
472 if (unlikely(clen > max_zpage_size)) {
473 atomic_inc(&zram->stats.bad_compress);
476 if (is_partial_io(bvec))
480 handle = zs_malloc(meta->mem_pool, clen);
482 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
487 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
489 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
490 src = kmap_atomic(page);
491 copy_page(cmem, src);
494 memcpy(cmem, src, clen);
497 zs_unmap_object(meta->mem_pool, handle);
500 * Free memory associated with this sector
501 * before overwriting unused sectors.
503 write_lock(&zram->meta->tb_lock);
504 zram_free_page(zram, index);
506 meta->table[index].handle = handle;
507 meta->table[index].size = clen;
508 write_unlock(&zram->meta->tb_lock);
511 atomic64_add(clen, &zram->stats.compr_size);
512 atomic_inc(&zram->stats.pages_stored);
513 if (clen <= PAGE_SIZE / 2)
514 atomic_inc(&zram->stats.good_compress);
517 if (is_partial_io(bvec))
521 atomic64_inc(&zram->stats.failed_writes);
525 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
526 int offset, struct bio *bio, int rw)
531 down_read(&zram->lock);
532 ret = zram_bvec_read(zram, bvec, index, offset, bio);
533 up_read(&zram->lock);
535 down_write(&zram->lock);
536 ret = zram_bvec_write(zram, bvec, index, offset);
537 up_write(&zram->lock);
543 static void zram_reset_device(struct zram *zram, bool reset_capacity)
546 struct zram_meta *meta;
548 down_write(&zram->init_lock);
549 if (!zram->init_done) {
550 up_write(&zram->init_lock);
557 /* Free all pages that are still in this zram device */
558 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
559 unsigned long handle = meta->table[index].handle;
563 zs_free(meta->mem_pool, handle);
566 zram_meta_free(zram->meta);
569 memset(&zram->stats, 0, sizeof(zram->stats));
573 set_capacity(zram->disk, 0);
574 up_write(&zram->init_lock);
577 static void zram_init_device(struct zram *zram, struct zram_meta *meta)
579 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
581 "There is little point creating a zram of greater than "
582 "twice the size of memory since we expect a 2:1 compression "
583 "ratio. Note that zram uses about 0.1%% of the size of "
584 "the disk when not in use so a huge zram is "
586 "\tMemory Size: %lu kB\n"
587 "\tSize you selected: %llu kB\n"
588 "Continuing anyway ...\n",
589 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
593 /* zram devices sort of resembles non-rotational disks */
594 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
599 pr_debug("Initialization done!\n");
602 static ssize_t disksize_store(struct device *dev,
603 struct device_attribute *attr, const char *buf, size_t len)
606 struct zram_meta *meta;
607 struct zram *zram = dev_to_zram(dev);
609 disksize = memparse(buf, NULL);
613 disksize = PAGE_ALIGN(disksize);
614 meta = zram_meta_alloc(disksize);
615 down_write(&zram->init_lock);
616 if (zram->init_done) {
617 up_write(&zram->init_lock);
618 zram_meta_free(meta);
619 pr_info("Cannot change disksize for initialized device\n");
623 zram->disksize = disksize;
624 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
625 zram_init_device(zram, meta);
626 up_write(&zram->init_lock);
631 static ssize_t reset_store(struct device *dev,
632 struct device_attribute *attr, const char *buf, size_t len)
635 unsigned short do_reset;
637 struct block_device *bdev;
639 zram = dev_to_zram(dev);
640 bdev = bdget_disk(zram->disk, 0);
645 /* Do not reset an active device! */
646 if (bdev->bd_holders) {
651 ret = kstrtou16(buf, 10, &do_reset);
660 /* Make sure all pending I/O is finished */
664 zram_reset_device(zram, true);
672 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
677 struct bvec_iter iter;
681 atomic64_inc(&zram->stats.num_reads);
684 atomic64_inc(&zram->stats.num_writes);
688 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
689 offset = (bio->bi_iter.bi_sector &
690 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
692 bio_for_each_segment(bvec, bio, iter) {
693 int max_transfer_size = PAGE_SIZE - offset;
695 if (bvec.bv_len > max_transfer_size) {
697 * zram_bvec_rw() can only make operation on a single
698 * zram page. Split the bio vector.
702 bv.bv_page = bvec.bv_page;
703 bv.bv_len = max_transfer_size;
704 bv.bv_offset = bvec.bv_offset;
706 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
709 bv.bv_len = bvec.bv_len - max_transfer_size;
710 bv.bv_offset += max_transfer_size;
711 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
714 if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
718 update_position(&index, &offset, &bvec);
721 set_bit(BIO_UPTODATE, &bio->bi_flags);
730 * Handler function for all zram I/O requests.
732 static void zram_make_request(struct request_queue *queue, struct bio *bio)
734 struct zram *zram = queue->queuedata;
736 down_read(&zram->init_lock);
737 if (unlikely(!zram->init_done))
740 if (!valid_io_request(zram, bio)) {
741 atomic64_inc(&zram->stats.invalid_io);
745 __zram_make_request(zram, bio, bio_data_dir(bio));
746 up_read(&zram->init_lock);
751 up_read(&zram->init_lock);
755 static void zram_slot_free_notify(struct block_device *bdev,
759 struct zram_meta *meta;
761 zram = bdev->bd_disk->private_data;
764 write_lock(&meta->tb_lock);
765 zram_free_page(zram, index);
766 write_unlock(&meta->tb_lock);
767 atomic64_inc(&zram->stats.notify_free);
770 static const struct block_device_operations zram_devops = {
771 .swap_slot_free_notify = zram_slot_free_notify,
775 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
776 disksize_show, disksize_store);
777 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
778 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
779 static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
780 static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
781 static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
782 static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
783 static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
784 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
785 static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
786 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
788 static struct attribute *zram_disk_attrs[] = {
789 &dev_attr_disksize.attr,
790 &dev_attr_initstate.attr,
791 &dev_attr_reset.attr,
792 &dev_attr_num_reads.attr,
793 &dev_attr_num_writes.attr,
794 &dev_attr_invalid_io.attr,
795 &dev_attr_notify_free.attr,
796 &dev_attr_zero_pages.attr,
797 &dev_attr_orig_data_size.attr,
798 &dev_attr_compr_data_size.attr,
799 &dev_attr_mem_used_total.attr,
803 static struct attribute_group zram_disk_attr_group = {
804 .attrs = zram_disk_attrs,
807 static int create_device(struct zram *zram, int device_id)
811 init_rwsem(&zram->lock);
812 init_rwsem(&zram->init_lock);
814 zram->queue = blk_alloc_queue(GFP_KERNEL);
816 pr_err("Error allocating disk queue for device %d\n",
821 blk_queue_make_request(zram->queue, zram_make_request);
822 zram->queue->queuedata = zram;
824 /* gendisk structure */
825 zram->disk = alloc_disk(1);
827 pr_warn("Error allocating disk structure for device %d\n",
832 zram->disk->major = zram_major;
833 zram->disk->first_minor = device_id;
834 zram->disk->fops = &zram_devops;
835 zram->disk->queue = zram->queue;
836 zram->disk->private_data = zram;
837 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
839 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
840 set_capacity(zram->disk, 0);
843 * To ensure that we always get PAGE_SIZE aligned
844 * and n*PAGE_SIZED sized I/O requests.
846 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
847 blk_queue_logical_block_size(zram->disk->queue,
848 ZRAM_LOGICAL_BLOCK_SIZE);
849 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
850 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
852 add_disk(zram->disk);
854 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
855 &zram_disk_attr_group);
857 pr_warn("Error creating sysfs group");
865 del_gendisk(zram->disk);
866 put_disk(zram->disk);
868 blk_cleanup_queue(zram->queue);
873 static void destroy_device(struct zram *zram)
875 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
876 &zram_disk_attr_group);
878 del_gendisk(zram->disk);
879 put_disk(zram->disk);
881 blk_cleanup_queue(zram->queue);
884 static int __init zram_init(void)
888 if (num_devices > max_num_devices) {
889 pr_warn("Invalid value for num_devices: %u\n",
895 zram_major = register_blkdev(0, "zram");
896 if (zram_major <= 0) {
897 pr_warn("Unable to get major number\n");
902 /* Allocate the device array and initialize each one */
903 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
909 for (dev_id = 0; dev_id < num_devices; dev_id++) {
910 ret = create_device(&zram_devices[dev_id], dev_id);
915 pr_info("Created %u device(s) ...\n", num_devices);
921 destroy_device(&zram_devices[--dev_id]);
924 unregister_blkdev(zram_major, "zram");
929 static void __exit zram_exit(void)
934 for (i = 0; i < num_devices; i++) {
935 zram = &zram_devices[i];
937 destroy_device(zram);
939 * Shouldn't access zram->disk after destroy_device
940 * because destroy_device already released zram->disk.
942 zram_reset_device(zram, false);
945 unregister_blkdev(zram_major, "zram");
948 pr_debug("Cleanup done!\n");
951 module_init(zram_init);
952 module_exit(zram_exit);
954 module_param(num_devices, uint, 0);
955 MODULE_PARM_DESC(num_devices, "Number of zram devices");
957 MODULE_LICENSE("Dual BSD/GPL");
958 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
959 MODULE_DESCRIPTION("Compressed RAM Block Device");