block: pass in queue to inflight accounting
[linux-2.6-block.git] / drivers / block / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/cpuhotplug.h>
35
36 #include "zram_drv.h"
37
38 static DEFINE_IDR(zram_index_idr);
39 /* idr index must be protected */
40 static DEFINE_MUTEX(zram_index_mutex);
41
42 static int zram_major;
43 static const char *default_compressor = "lzo";
44
45 /* Module params (documentation at end) */
46 static unsigned int num_devices = 1;
47
48 static void zram_free_page(struct zram *zram, size_t index);
49
50 static inline bool init_done(struct zram *zram)
51 {
52         return zram->disksize;
53 }
54
55 static inline struct zram *dev_to_zram(struct device *dev)
56 {
57         return (struct zram *)dev_to_disk(dev)->private_data;
58 }
59
60 static unsigned long zram_get_handle(struct zram *zram, u32 index)
61 {
62         return zram->table[index].handle;
63 }
64
65 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
66 {
67         zram->table[index].handle = handle;
68 }
69
70 /* flag operations require table entry bit_spin_lock() being held */
71 static int zram_test_flag(struct zram *zram, u32 index,
72                         enum zram_pageflags flag)
73 {
74         return zram->table[index].value & BIT(flag);
75 }
76
77 static void zram_set_flag(struct zram *zram, u32 index,
78                         enum zram_pageflags flag)
79 {
80         zram->table[index].value |= BIT(flag);
81 }
82
83 static void zram_clear_flag(struct zram *zram, u32 index,
84                         enum zram_pageflags flag)
85 {
86         zram->table[index].value &= ~BIT(flag);
87 }
88
89 static inline void zram_set_element(struct zram *zram, u32 index,
90                         unsigned long element)
91 {
92         zram->table[index].element = element;
93 }
94
95 static unsigned long zram_get_element(struct zram *zram, u32 index)
96 {
97         return zram->table[index].element;
98 }
99
100 static size_t zram_get_obj_size(struct zram *zram, u32 index)
101 {
102         return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
103 }
104
105 static void zram_set_obj_size(struct zram *zram,
106                                         u32 index, size_t size)
107 {
108         unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
109
110         zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
111 }
112
113 #if PAGE_SIZE != 4096
114 static inline bool is_partial_io(struct bio_vec *bvec)
115 {
116         return bvec->bv_len != PAGE_SIZE;
117 }
118 #else
119 static inline bool is_partial_io(struct bio_vec *bvec)
120 {
121         return false;
122 }
123 #endif
124
125 static void zram_revalidate_disk(struct zram *zram)
126 {
127         revalidate_disk(zram->disk);
128         /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
129         zram->disk->queue->backing_dev_info->capabilities |=
130                 BDI_CAP_STABLE_WRITES;
131 }
132
133 /*
134  * Check if request is within bounds and aligned on zram logical blocks.
135  */
136 static inline bool valid_io_request(struct zram *zram,
137                 sector_t start, unsigned int size)
138 {
139         u64 end, bound;
140
141         /* unaligned request */
142         if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
143                 return false;
144         if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
145                 return false;
146
147         end = start + (size >> SECTOR_SHIFT);
148         bound = zram->disksize >> SECTOR_SHIFT;
149         /* out of range range */
150         if (unlikely(start >= bound || end > bound || start > end))
151                 return false;
152
153         /* I/O request is valid */
154         return true;
155 }
156
157 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
158 {
159         *index  += (*offset + bvec->bv_len) / PAGE_SIZE;
160         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
161 }
162
163 static inline void update_used_max(struct zram *zram,
164                                         const unsigned long pages)
165 {
166         unsigned long old_max, cur_max;
167
168         old_max = atomic_long_read(&zram->stats.max_used_pages);
169
170         do {
171                 cur_max = old_max;
172                 if (pages > cur_max)
173                         old_max = atomic_long_cmpxchg(
174                                 &zram->stats.max_used_pages, cur_max, pages);
175         } while (old_max != cur_max);
176 }
177
178 static inline void zram_fill_page(char *ptr, unsigned long len,
179                                         unsigned long value)
180 {
181         int i;
182         unsigned long *page = (unsigned long *)ptr;
183
184         WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
185
186         if (likely(value == 0)) {
187                 memset(ptr, 0, len);
188         } else {
189                 for (i = 0; i < len / sizeof(*page); i++)
190                         page[i] = value;
191         }
192 }
193
194 static bool page_same_filled(void *ptr, unsigned long *element)
195 {
196         unsigned int pos;
197         unsigned long *page;
198         unsigned long val;
199
200         page = (unsigned long *)ptr;
201         val = page[0];
202
203         for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
204                 if (val != page[pos])
205                         return false;
206         }
207
208         *element = val;
209
210         return true;
211 }
212
213 static ssize_t initstate_show(struct device *dev,
214                 struct device_attribute *attr, char *buf)
215 {
216         u32 val;
217         struct zram *zram = dev_to_zram(dev);
218
219         down_read(&zram->init_lock);
220         val = init_done(zram);
221         up_read(&zram->init_lock);
222
223         return scnprintf(buf, PAGE_SIZE, "%u\n", val);
224 }
225
226 static ssize_t disksize_show(struct device *dev,
227                 struct device_attribute *attr, char *buf)
228 {
229         struct zram *zram = dev_to_zram(dev);
230
231         return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
232 }
233
234 static ssize_t mem_limit_store(struct device *dev,
235                 struct device_attribute *attr, const char *buf, size_t len)
236 {
237         u64 limit;
238         char *tmp;
239         struct zram *zram = dev_to_zram(dev);
240
241         limit = memparse(buf, &tmp);
242         if (buf == tmp) /* no chars parsed, invalid input */
243                 return -EINVAL;
244
245         down_write(&zram->init_lock);
246         zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
247         up_write(&zram->init_lock);
248
249         return len;
250 }
251
252 static ssize_t mem_used_max_store(struct device *dev,
253                 struct device_attribute *attr, const char *buf, size_t len)
254 {
255         int err;
256         unsigned long val;
257         struct zram *zram = dev_to_zram(dev);
258
259         err = kstrtoul(buf, 10, &val);
260         if (err || val != 0)
261                 return -EINVAL;
262
263         down_read(&zram->init_lock);
264         if (init_done(zram)) {
265                 atomic_long_set(&zram->stats.max_used_pages,
266                                 zs_get_total_pages(zram->mem_pool));
267         }
268         up_read(&zram->init_lock);
269
270         return len;
271 }
272
273 /*
274  * We switched to per-cpu streams and this attr is not needed anymore.
275  * However, we will keep it around for some time, because:
276  * a) we may revert per-cpu streams in the future
277  * b) it's visible to user space and we need to follow our 2 years
278  *    retirement rule; but we already have a number of 'soon to be
279  *    altered' attrs, so max_comp_streams need to wait for the next
280  *    layoff cycle.
281  */
282 static ssize_t max_comp_streams_show(struct device *dev,
283                 struct device_attribute *attr, char *buf)
284 {
285         return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
286 }
287
288 static ssize_t max_comp_streams_store(struct device *dev,
289                 struct device_attribute *attr, const char *buf, size_t len)
290 {
291         return len;
292 }
293
294 static ssize_t comp_algorithm_show(struct device *dev,
295                 struct device_attribute *attr, char *buf)
296 {
297         size_t sz;
298         struct zram *zram = dev_to_zram(dev);
299
300         down_read(&zram->init_lock);
301         sz = zcomp_available_show(zram->compressor, buf);
302         up_read(&zram->init_lock);
303
304         return sz;
305 }
306
307 static ssize_t comp_algorithm_store(struct device *dev,
308                 struct device_attribute *attr, const char *buf, size_t len)
309 {
310         struct zram *zram = dev_to_zram(dev);
311         char compressor[CRYPTO_MAX_ALG_NAME];
312         size_t sz;
313
314         strlcpy(compressor, buf, sizeof(compressor));
315         /* ignore trailing newline */
316         sz = strlen(compressor);
317         if (sz > 0 && compressor[sz - 1] == '\n')
318                 compressor[sz - 1] = 0x00;
319
320         if (!zcomp_available_algorithm(compressor))
321                 return -EINVAL;
322
323         down_write(&zram->init_lock);
324         if (init_done(zram)) {
325                 up_write(&zram->init_lock);
326                 pr_info("Can't change algorithm for initialized device\n");
327                 return -EBUSY;
328         }
329
330         strlcpy(zram->compressor, compressor, sizeof(compressor));
331         up_write(&zram->init_lock);
332         return len;
333 }
334
335 static ssize_t compact_store(struct device *dev,
336                 struct device_attribute *attr, const char *buf, size_t len)
337 {
338         struct zram *zram = dev_to_zram(dev);
339
340         down_read(&zram->init_lock);
341         if (!init_done(zram)) {
342                 up_read(&zram->init_lock);
343                 return -EINVAL;
344         }
345
346         zs_compact(zram->mem_pool);
347         up_read(&zram->init_lock);
348
349         return len;
350 }
351
352 static ssize_t io_stat_show(struct device *dev,
353                 struct device_attribute *attr, char *buf)
354 {
355         struct zram *zram = dev_to_zram(dev);
356         ssize_t ret;
357
358         down_read(&zram->init_lock);
359         ret = scnprintf(buf, PAGE_SIZE,
360                         "%8llu %8llu %8llu %8llu\n",
361                         (u64)atomic64_read(&zram->stats.failed_reads),
362                         (u64)atomic64_read(&zram->stats.failed_writes),
363                         (u64)atomic64_read(&zram->stats.invalid_io),
364                         (u64)atomic64_read(&zram->stats.notify_free));
365         up_read(&zram->init_lock);
366
367         return ret;
368 }
369
370 static ssize_t mm_stat_show(struct device *dev,
371                 struct device_attribute *attr, char *buf)
372 {
373         struct zram *zram = dev_to_zram(dev);
374         struct zs_pool_stats pool_stats;
375         u64 orig_size, mem_used = 0;
376         long max_used;
377         ssize_t ret;
378
379         memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
380
381         down_read(&zram->init_lock);
382         if (init_done(zram)) {
383                 mem_used = zs_get_total_pages(zram->mem_pool);
384                 zs_pool_stats(zram->mem_pool, &pool_stats);
385         }
386
387         orig_size = atomic64_read(&zram->stats.pages_stored);
388         max_used = atomic_long_read(&zram->stats.max_used_pages);
389
390         ret = scnprintf(buf, PAGE_SIZE,
391                         "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
392                         orig_size << PAGE_SHIFT,
393                         (u64)atomic64_read(&zram->stats.compr_data_size),
394                         mem_used << PAGE_SHIFT,
395                         zram->limit_pages << PAGE_SHIFT,
396                         max_used << PAGE_SHIFT,
397                         (u64)atomic64_read(&zram->stats.same_pages),
398                         pool_stats.pages_compacted);
399         up_read(&zram->init_lock);
400
401         return ret;
402 }
403
404 static ssize_t debug_stat_show(struct device *dev,
405                 struct device_attribute *attr, char *buf)
406 {
407         int version = 1;
408         struct zram *zram = dev_to_zram(dev);
409         ssize_t ret;
410
411         down_read(&zram->init_lock);
412         ret = scnprintf(buf, PAGE_SIZE,
413                         "version: %d\n%8llu\n",
414                         version,
415                         (u64)atomic64_read(&zram->stats.writestall));
416         up_read(&zram->init_lock);
417
418         return ret;
419 }
420
421 static DEVICE_ATTR_RO(io_stat);
422 static DEVICE_ATTR_RO(mm_stat);
423 static DEVICE_ATTR_RO(debug_stat);
424
425 static void zram_slot_lock(struct zram *zram, u32 index)
426 {
427         bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
428 }
429
430 static void zram_slot_unlock(struct zram *zram, u32 index)
431 {
432         bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
433 }
434
435 static bool zram_same_page_read(struct zram *zram, u32 index,
436                                 struct page *page,
437                                 unsigned int offset, unsigned int len)
438 {
439         zram_slot_lock(zram, index);
440         if (unlikely(!zram_get_handle(zram, index) ||
441                         zram_test_flag(zram, index, ZRAM_SAME))) {
442                 void *mem;
443
444                 zram_slot_unlock(zram, index);
445                 mem = kmap_atomic(page);
446                 zram_fill_page(mem + offset, len,
447                                         zram_get_element(zram, index));
448                 kunmap_atomic(mem);
449                 return true;
450         }
451         zram_slot_unlock(zram, index);
452
453         return false;
454 }
455
456 static bool zram_same_page_write(struct zram *zram, u32 index,
457                                         struct page *page)
458 {
459         unsigned long element;
460         void *mem = kmap_atomic(page);
461
462         if (page_same_filled(mem, &element)) {
463                 kunmap_atomic(mem);
464                 /* Free memory associated with this sector now. */
465                 zram_slot_lock(zram, index);
466                 zram_free_page(zram, index);
467                 zram_set_flag(zram, index, ZRAM_SAME);
468                 zram_set_element(zram, index, element);
469                 zram_slot_unlock(zram, index);
470
471                 atomic64_inc(&zram->stats.same_pages);
472                 atomic64_inc(&zram->stats.pages_stored);
473                 return true;
474         }
475         kunmap_atomic(mem);
476
477         return false;
478 }
479
480 static void zram_meta_free(struct zram *zram, u64 disksize)
481 {
482         size_t num_pages = disksize >> PAGE_SHIFT;
483         size_t index;
484
485         /* Free all pages that are still in this zram device */
486         for (index = 0; index < num_pages; index++)
487                 zram_free_page(zram, index);
488
489         zs_destroy_pool(zram->mem_pool);
490         vfree(zram->table);
491 }
492
493 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
494 {
495         size_t num_pages;
496
497         num_pages = disksize >> PAGE_SHIFT;
498         zram->table = vzalloc(num_pages * sizeof(*zram->table));
499         if (!zram->table)
500                 return false;
501
502         zram->mem_pool = zs_create_pool(zram->disk->disk_name);
503         if (!zram->mem_pool) {
504                 vfree(zram->table);
505                 return false;
506         }
507
508         return true;
509 }
510
511 /*
512  * To protect concurrent access to the same index entry,
513  * caller should hold this table index entry's bit_spinlock to
514  * indicate this index entry is accessing.
515  */
516 static void zram_free_page(struct zram *zram, size_t index)
517 {
518         unsigned long handle = zram_get_handle(zram, index);
519
520         /*
521          * No memory is allocated for same element filled pages.
522          * Simply clear same page flag.
523          */
524         if (zram_test_flag(zram, index, ZRAM_SAME)) {
525                 zram_clear_flag(zram, index, ZRAM_SAME);
526                 zram_set_element(zram, index, 0);
527                 atomic64_dec(&zram->stats.same_pages);
528                 atomic64_dec(&zram->stats.pages_stored);
529                 return;
530         }
531
532         if (!handle)
533                 return;
534
535         zs_free(zram->mem_pool, handle);
536
537         atomic64_sub(zram_get_obj_size(zram, index),
538                         &zram->stats.compr_data_size);
539         atomic64_dec(&zram->stats.pages_stored);
540
541         zram_set_handle(zram, index, 0);
542         zram_set_obj_size(zram, index, 0);
543 }
544
545 static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
546 {
547         int ret;
548         unsigned long handle;
549         unsigned int size;
550         void *src, *dst;
551
552         if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
553                 return 0;
554
555         zram_slot_lock(zram, index);
556         handle = zram_get_handle(zram, index);
557         size = zram_get_obj_size(zram, index);
558
559         src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
560         if (size == PAGE_SIZE) {
561                 dst = kmap_atomic(page);
562                 memcpy(dst, src, PAGE_SIZE);
563                 kunmap_atomic(dst);
564                 ret = 0;
565         } else {
566                 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
567
568                 dst = kmap_atomic(page);
569                 ret = zcomp_decompress(zstrm, src, size, dst);
570                 kunmap_atomic(dst);
571                 zcomp_stream_put(zram->comp);
572         }
573         zs_unmap_object(zram->mem_pool, handle);
574         zram_slot_unlock(zram, index);
575
576         /* Should NEVER happen. Return bio error if it does. */
577         if (unlikely(ret))
578                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
579
580         return ret;
581 }
582
583 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
584                                 u32 index, int offset)
585 {
586         int ret;
587         struct page *page;
588
589         page = bvec->bv_page;
590         if (is_partial_io(bvec)) {
591                 /* Use a temporary buffer to decompress the page */
592                 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
593                 if (!page)
594                         return -ENOMEM;
595         }
596
597         ret = zram_decompress_page(zram, page, index);
598         if (unlikely(ret))
599                 goto out;
600
601         if (is_partial_io(bvec)) {
602                 void *dst = kmap_atomic(bvec->bv_page);
603                 void *src = kmap_atomic(page);
604
605                 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
606                 kunmap_atomic(src);
607                 kunmap_atomic(dst);
608         }
609 out:
610         if (is_partial_io(bvec))
611                 __free_page(page);
612
613         return ret;
614 }
615
616 static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
617                         struct page *page,
618                         unsigned long *out_handle, unsigned int *out_comp_len)
619 {
620         int ret;
621         unsigned int comp_len;
622         void *src;
623         unsigned long alloced_pages;
624         unsigned long handle = 0;
625
626 compress_again:
627         src = kmap_atomic(page);
628         ret = zcomp_compress(*zstrm, src, &comp_len);
629         kunmap_atomic(src);
630
631         if (unlikely(ret)) {
632                 pr_err("Compression failed! err=%d\n", ret);
633                 if (handle)
634                         zs_free(zram->mem_pool, handle);
635                 return ret;
636         }
637
638         if (unlikely(comp_len > max_zpage_size))
639                 comp_len = PAGE_SIZE;
640
641         /*
642          * handle allocation has 2 paths:
643          * a) fast path is executed with preemption disabled (for
644          *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
645          *  since we can't sleep;
646          * b) slow path enables preemption and attempts to allocate
647          *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
648          *  put per-cpu compression stream and, thus, to re-do
649          *  the compression once handle is allocated.
650          *
651          * if we have a 'non-null' handle here then we are coming
652          * from the slow path and handle has already been allocated.
653          */
654         if (!handle)
655                 handle = zs_malloc(zram->mem_pool, comp_len,
656                                 __GFP_KSWAPD_RECLAIM |
657                                 __GFP_NOWARN |
658                                 __GFP_HIGHMEM |
659                                 __GFP_MOVABLE);
660         if (!handle) {
661                 zcomp_stream_put(zram->comp);
662                 atomic64_inc(&zram->stats.writestall);
663                 handle = zs_malloc(zram->mem_pool, comp_len,
664                                 GFP_NOIO | __GFP_HIGHMEM |
665                                 __GFP_MOVABLE);
666                 *zstrm = zcomp_stream_get(zram->comp);
667                 if (handle)
668                         goto compress_again;
669                 return -ENOMEM;
670         }
671
672         alloced_pages = zs_get_total_pages(zram->mem_pool);
673         update_used_max(zram, alloced_pages);
674
675         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
676                 zs_free(zram->mem_pool, handle);
677                 return -ENOMEM;
678         }
679
680         *out_handle = handle;
681         *out_comp_len = comp_len;
682         return 0;
683 }
684
685 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
686 {
687         int ret;
688         unsigned long handle;
689         unsigned int comp_len;
690         void *src, *dst;
691         struct zcomp_strm *zstrm;
692         struct page *page = bvec->bv_page;
693
694         if (zram_same_page_write(zram, index, page))
695                 return 0;
696
697         zstrm = zcomp_stream_get(zram->comp);
698         ret = zram_compress(zram, &zstrm, page, &handle, &comp_len);
699         if (ret) {
700                 zcomp_stream_put(zram->comp);
701                 return ret;
702         }
703
704         dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
705
706         src = zstrm->buffer;
707         if (comp_len == PAGE_SIZE)
708                 src = kmap_atomic(page);
709         memcpy(dst, src, comp_len);
710         if (comp_len == PAGE_SIZE)
711                 kunmap_atomic(src);
712
713         zcomp_stream_put(zram->comp);
714         zs_unmap_object(zram->mem_pool, handle);
715
716         /*
717          * Free memory associated with this sector
718          * before overwriting unused sectors.
719          */
720         zram_slot_lock(zram, index);
721         zram_free_page(zram, index);
722         zram_set_handle(zram, index, handle);
723         zram_set_obj_size(zram, index, comp_len);
724         zram_slot_unlock(zram, index);
725
726         /* Update stats */
727         atomic64_add(comp_len, &zram->stats.compr_data_size);
728         atomic64_inc(&zram->stats.pages_stored);
729         return 0;
730 }
731
732 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
733                                 u32 index, int offset)
734 {
735         int ret;
736         struct page *page = NULL;
737         void *src;
738         struct bio_vec vec;
739
740         vec = *bvec;
741         if (is_partial_io(bvec)) {
742                 void *dst;
743                 /*
744                  * This is a partial IO. We need to read the full page
745                  * before to write the changes.
746                  */
747                 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
748                 if (!page)
749                         return -ENOMEM;
750
751                 ret = zram_decompress_page(zram, page, index);
752                 if (ret)
753                         goto out;
754
755                 src = kmap_atomic(bvec->bv_page);
756                 dst = kmap_atomic(page);
757                 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
758                 kunmap_atomic(dst);
759                 kunmap_atomic(src);
760
761                 vec.bv_page = page;
762                 vec.bv_len = PAGE_SIZE;
763                 vec.bv_offset = 0;
764         }
765
766         ret = __zram_bvec_write(zram, &vec, index);
767 out:
768         if (is_partial_io(bvec))
769                 __free_page(page);
770         return ret;
771 }
772
773 /*
774  * zram_bio_discard - handler on discard request
775  * @index: physical block index in PAGE_SIZE units
776  * @offset: byte offset within physical block
777  */
778 static void zram_bio_discard(struct zram *zram, u32 index,
779                              int offset, struct bio *bio)
780 {
781         size_t n = bio->bi_iter.bi_size;
782
783         /*
784          * zram manages data in physical block size units. Because logical block
785          * size isn't identical with physical block size on some arch, we
786          * could get a discard request pointing to a specific offset within a
787          * certain physical block.  Although we can handle this request by
788          * reading that physiclal block and decompressing and partially zeroing
789          * and re-compressing and then re-storing it, this isn't reasonable
790          * because our intent with a discard request is to save memory.  So
791          * skipping this logical block is appropriate here.
792          */
793         if (offset) {
794                 if (n <= (PAGE_SIZE - offset))
795                         return;
796
797                 n -= (PAGE_SIZE - offset);
798                 index++;
799         }
800
801         while (n >= PAGE_SIZE) {
802                 zram_slot_lock(zram, index);
803                 zram_free_page(zram, index);
804                 zram_slot_unlock(zram, index);
805                 atomic64_inc(&zram->stats.notify_free);
806                 index++;
807                 n -= PAGE_SIZE;
808         }
809 }
810
811 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
812                         int offset, bool is_write)
813 {
814         unsigned long start_time = jiffies;
815         int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
816         struct request_queue *q = zram->disk->queue;
817         int ret;
818
819         generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
820                         &zram->disk->part0);
821
822         if (!is_write) {
823                 atomic64_inc(&zram->stats.num_reads);
824                 ret = zram_bvec_read(zram, bvec, index, offset);
825                 flush_dcache_page(bvec->bv_page);
826         } else {
827                 atomic64_inc(&zram->stats.num_writes);
828                 ret = zram_bvec_write(zram, bvec, index, offset);
829         }
830
831         generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
832
833         if (unlikely(ret)) {
834                 if (!is_write)
835                         atomic64_inc(&zram->stats.failed_reads);
836                 else
837                         atomic64_inc(&zram->stats.failed_writes);
838         }
839
840         return ret;
841 }
842
843 static void __zram_make_request(struct zram *zram, struct bio *bio)
844 {
845         int offset;
846         u32 index;
847         struct bio_vec bvec;
848         struct bvec_iter iter;
849
850         index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
851         offset = (bio->bi_iter.bi_sector &
852                   (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
853
854         switch (bio_op(bio)) {
855         case REQ_OP_DISCARD:
856         case REQ_OP_WRITE_ZEROES:
857                 zram_bio_discard(zram, index, offset, bio);
858                 bio_endio(bio);
859                 return;
860         default:
861                 break;
862         }
863
864         bio_for_each_segment(bvec, bio, iter) {
865                 struct bio_vec bv = bvec;
866                 unsigned int unwritten = bvec.bv_len;
867
868                 do {
869                         bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
870                                                         unwritten);
871                         if (zram_bvec_rw(zram, &bv, index, offset,
872                                         op_is_write(bio_op(bio))) < 0)
873                                 goto out;
874
875                         bv.bv_offset += bv.bv_len;
876                         unwritten -= bv.bv_len;
877
878                         update_position(&index, &offset, &bv);
879                 } while (unwritten);
880         }
881
882         bio_endio(bio);
883         return;
884
885 out:
886         bio_io_error(bio);
887 }
888
889 /*
890  * Handler function for all zram I/O requests.
891  */
892 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
893 {
894         struct zram *zram = queue->queuedata;
895
896         if (!valid_io_request(zram, bio->bi_iter.bi_sector,
897                                         bio->bi_iter.bi_size)) {
898                 atomic64_inc(&zram->stats.invalid_io);
899                 goto error;
900         }
901
902         __zram_make_request(zram, bio);
903         return BLK_QC_T_NONE;
904
905 error:
906         bio_io_error(bio);
907         return BLK_QC_T_NONE;
908 }
909
910 static void zram_slot_free_notify(struct block_device *bdev,
911                                 unsigned long index)
912 {
913         struct zram *zram;
914
915         zram = bdev->bd_disk->private_data;
916
917         zram_slot_lock(zram, index);
918         zram_free_page(zram, index);
919         zram_slot_unlock(zram, index);
920         atomic64_inc(&zram->stats.notify_free);
921 }
922
923 static int zram_rw_page(struct block_device *bdev, sector_t sector,
924                        struct page *page, bool is_write)
925 {
926         int offset, err = -EIO;
927         u32 index;
928         struct zram *zram;
929         struct bio_vec bv;
930
931         zram = bdev->bd_disk->private_data;
932
933         if (!valid_io_request(zram, sector, PAGE_SIZE)) {
934                 atomic64_inc(&zram->stats.invalid_io);
935                 err = -EINVAL;
936                 goto out;
937         }
938
939         index = sector >> SECTORS_PER_PAGE_SHIFT;
940         offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
941
942         bv.bv_page = page;
943         bv.bv_len = PAGE_SIZE;
944         bv.bv_offset = 0;
945
946         err = zram_bvec_rw(zram, &bv, index, offset, is_write);
947 out:
948         /*
949          * If I/O fails, just return error(ie, non-zero) without
950          * calling page_endio.
951          * It causes resubmit the I/O with bio request by upper functions
952          * of rw_page(e.g., swap_readpage, __swap_writepage) and
953          * bio->bi_end_io does things to handle the error
954          * (e.g., SetPageError, set_page_dirty and extra works).
955          */
956         if (err == 0)
957                 page_endio(page, is_write, 0);
958         return err;
959 }
960
961 static void zram_reset_device(struct zram *zram)
962 {
963         struct zcomp *comp;
964         u64 disksize;
965
966         down_write(&zram->init_lock);
967
968         zram->limit_pages = 0;
969
970         if (!init_done(zram)) {
971                 up_write(&zram->init_lock);
972                 return;
973         }
974
975         comp = zram->comp;
976         disksize = zram->disksize;
977         zram->disksize = 0;
978
979         set_capacity(zram->disk, 0);
980         part_stat_set_all(&zram->disk->part0, 0);
981
982         up_write(&zram->init_lock);
983         /* I/O operation under all of CPU are done so let's free */
984         zram_meta_free(zram, disksize);
985         memset(&zram->stats, 0, sizeof(zram->stats));
986         zcomp_destroy(comp);
987 }
988
989 static ssize_t disksize_store(struct device *dev,
990                 struct device_attribute *attr, const char *buf, size_t len)
991 {
992         u64 disksize;
993         struct zcomp *comp;
994         struct zram *zram = dev_to_zram(dev);
995         int err;
996
997         disksize = memparse(buf, NULL);
998         if (!disksize)
999                 return -EINVAL;
1000
1001         down_write(&zram->init_lock);
1002         if (init_done(zram)) {
1003                 pr_info("Cannot change disksize for initialized device\n");
1004                 err = -EBUSY;
1005                 goto out_unlock;
1006         }
1007
1008         disksize = PAGE_ALIGN(disksize);
1009         if (!zram_meta_alloc(zram, disksize)) {
1010                 err = -ENOMEM;
1011                 goto out_unlock;
1012         }
1013
1014         comp = zcomp_create(zram->compressor);
1015         if (IS_ERR(comp)) {
1016                 pr_err("Cannot initialise %s compressing backend\n",
1017                                 zram->compressor);
1018                 err = PTR_ERR(comp);
1019                 goto out_free_meta;
1020         }
1021
1022         zram->comp = comp;
1023         zram->disksize = disksize;
1024         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1025         zram_revalidate_disk(zram);
1026         up_write(&zram->init_lock);
1027
1028         return len;
1029
1030 out_free_meta:
1031         zram_meta_free(zram, disksize);
1032 out_unlock:
1033         up_write(&zram->init_lock);
1034         return err;
1035 }
1036
1037 static ssize_t reset_store(struct device *dev,
1038                 struct device_attribute *attr, const char *buf, size_t len)
1039 {
1040         int ret;
1041         unsigned short do_reset;
1042         struct zram *zram;
1043         struct block_device *bdev;
1044
1045         ret = kstrtou16(buf, 10, &do_reset);
1046         if (ret)
1047                 return ret;
1048
1049         if (!do_reset)
1050                 return -EINVAL;
1051
1052         zram = dev_to_zram(dev);
1053         bdev = bdget_disk(zram->disk, 0);
1054         if (!bdev)
1055                 return -ENOMEM;
1056
1057         mutex_lock(&bdev->bd_mutex);
1058         /* Do not reset an active device or claimed device */
1059         if (bdev->bd_openers || zram->claim) {
1060                 mutex_unlock(&bdev->bd_mutex);
1061                 bdput(bdev);
1062                 return -EBUSY;
1063         }
1064
1065         /* From now on, anyone can't open /dev/zram[0-9] */
1066         zram->claim = true;
1067         mutex_unlock(&bdev->bd_mutex);
1068
1069         /* Make sure all the pending I/O are finished */
1070         fsync_bdev(bdev);
1071         zram_reset_device(zram);
1072         zram_revalidate_disk(zram);
1073         bdput(bdev);
1074
1075         mutex_lock(&bdev->bd_mutex);
1076         zram->claim = false;
1077         mutex_unlock(&bdev->bd_mutex);
1078
1079         return len;
1080 }
1081
1082 static int zram_open(struct block_device *bdev, fmode_t mode)
1083 {
1084         int ret = 0;
1085         struct zram *zram;
1086
1087         WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1088
1089         zram = bdev->bd_disk->private_data;
1090         /* zram was claimed to reset so open request fails */
1091         if (zram->claim)
1092                 ret = -EBUSY;
1093
1094         return ret;
1095 }
1096
1097 static const struct block_device_operations zram_devops = {
1098         .open = zram_open,
1099         .swap_slot_free_notify = zram_slot_free_notify,
1100         .rw_page = zram_rw_page,
1101         .owner = THIS_MODULE
1102 };
1103
1104 static DEVICE_ATTR_WO(compact);
1105 static DEVICE_ATTR_RW(disksize);
1106 static DEVICE_ATTR_RO(initstate);
1107 static DEVICE_ATTR_WO(reset);
1108 static DEVICE_ATTR_WO(mem_limit);
1109 static DEVICE_ATTR_WO(mem_used_max);
1110 static DEVICE_ATTR_RW(max_comp_streams);
1111 static DEVICE_ATTR_RW(comp_algorithm);
1112
1113 static struct attribute *zram_disk_attrs[] = {
1114         &dev_attr_disksize.attr,
1115         &dev_attr_initstate.attr,
1116         &dev_attr_reset.attr,
1117         &dev_attr_compact.attr,
1118         &dev_attr_mem_limit.attr,
1119         &dev_attr_mem_used_max.attr,
1120         &dev_attr_max_comp_streams.attr,
1121         &dev_attr_comp_algorithm.attr,
1122         &dev_attr_io_stat.attr,
1123         &dev_attr_mm_stat.attr,
1124         &dev_attr_debug_stat.attr,
1125         NULL,
1126 };
1127
1128 static const struct attribute_group zram_disk_attr_group = {
1129         .attrs = zram_disk_attrs,
1130 };
1131
1132 /*
1133  * Allocate and initialize new zram device. the function returns
1134  * '>= 0' device_id upon success, and negative value otherwise.
1135  */
1136 static int zram_add(void)
1137 {
1138         struct zram *zram;
1139         struct request_queue *queue;
1140         int ret, device_id;
1141
1142         zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1143         if (!zram)
1144                 return -ENOMEM;
1145
1146         ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1147         if (ret < 0)
1148                 goto out_free_dev;
1149         device_id = ret;
1150
1151         init_rwsem(&zram->init_lock);
1152
1153         queue = blk_alloc_queue(GFP_KERNEL);
1154         if (!queue) {
1155                 pr_err("Error allocating disk queue for device %d\n",
1156                         device_id);
1157                 ret = -ENOMEM;
1158                 goto out_free_idr;
1159         }
1160
1161         blk_queue_make_request(queue, zram_make_request);
1162
1163         /* gendisk structure */
1164         zram->disk = alloc_disk(1);
1165         if (!zram->disk) {
1166                 pr_err("Error allocating disk structure for device %d\n",
1167                         device_id);
1168                 ret = -ENOMEM;
1169                 goto out_free_queue;
1170         }
1171
1172         zram->disk->major = zram_major;
1173         zram->disk->first_minor = device_id;
1174         zram->disk->fops = &zram_devops;
1175         zram->disk->queue = queue;
1176         zram->disk->queue->queuedata = zram;
1177         zram->disk->private_data = zram;
1178         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1179
1180         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1181         set_capacity(zram->disk, 0);
1182         /* zram devices sort of resembles non-rotational disks */
1183         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1184         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1185         /*
1186          * To ensure that we always get PAGE_SIZE aligned
1187          * and n*PAGE_SIZED sized I/O requests.
1188          */
1189         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1190         blk_queue_logical_block_size(zram->disk->queue,
1191                                         ZRAM_LOGICAL_BLOCK_SIZE);
1192         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1193         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1194         zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1195         blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1196         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1197
1198         /*
1199          * zram_bio_discard() will clear all logical blocks if logical block
1200          * size is identical with physical block size(PAGE_SIZE). But if it is
1201          * different, we will skip discarding some parts of logical blocks in
1202          * the part of the request range which isn't aligned to physical block
1203          * size.  So we can't ensure that all discarded logical blocks are
1204          * zeroed.
1205          */
1206         if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1207                 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1208
1209         add_disk(zram->disk);
1210
1211         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1212                                 &zram_disk_attr_group);
1213         if (ret < 0) {
1214                 pr_err("Error creating sysfs group for device %d\n",
1215                                 device_id);
1216                 goto out_free_disk;
1217         }
1218         strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1219
1220         pr_info("Added device: %s\n", zram->disk->disk_name);
1221         return device_id;
1222
1223 out_free_disk:
1224         del_gendisk(zram->disk);
1225         put_disk(zram->disk);
1226 out_free_queue:
1227         blk_cleanup_queue(queue);
1228 out_free_idr:
1229         idr_remove(&zram_index_idr, device_id);
1230 out_free_dev:
1231         kfree(zram);
1232         return ret;
1233 }
1234
1235 static int zram_remove(struct zram *zram)
1236 {
1237         struct block_device *bdev;
1238
1239         bdev = bdget_disk(zram->disk, 0);
1240         if (!bdev)
1241                 return -ENOMEM;
1242
1243         mutex_lock(&bdev->bd_mutex);
1244         if (bdev->bd_openers || zram->claim) {
1245                 mutex_unlock(&bdev->bd_mutex);
1246                 bdput(bdev);
1247                 return -EBUSY;
1248         }
1249
1250         zram->claim = true;
1251         mutex_unlock(&bdev->bd_mutex);
1252
1253         /*
1254          * Remove sysfs first, so no one will perform a disksize
1255          * store while we destroy the devices. This also helps during
1256          * hot_remove -- zram_reset_device() is the last holder of
1257          * ->init_lock, no later/concurrent disksize_store() or any
1258          * other sysfs handlers are possible.
1259          */
1260         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1261                         &zram_disk_attr_group);
1262
1263         /* Make sure all the pending I/O are finished */
1264         fsync_bdev(bdev);
1265         zram_reset_device(zram);
1266         bdput(bdev);
1267
1268         pr_info("Removed device: %s\n", zram->disk->disk_name);
1269
1270         blk_cleanup_queue(zram->disk->queue);
1271         del_gendisk(zram->disk);
1272         put_disk(zram->disk);
1273         kfree(zram);
1274         return 0;
1275 }
1276
1277 /* zram-control sysfs attributes */
1278
1279 /*
1280  * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1281  * sense that reading from this file does alter the state of your system -- it
1282  * creates a new un-initialized zram device and returns back this device's
1283  * device_id (or an error code if it fails to create a new device).
1284  */
1285 static ssize_t hot_add_show(struct class *class,
1286                         struct class_attribute *attr,
1287                         char *buf)
1288 {
1289         int ret;
1290
1291         mutex_lock(&zram_index_mutex);
1292         ret = zram_add();
1293         mutex_unlock(&zram_index_mutex);
1294
1295         if (ret < 0)
1296                 return ret;
1297         return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1298 }
1299 static CLASS_ATTR_RO(hot_add);
1300
1301 static ssize_t hot_remove_store(struct class *class,
1302                         struct class_attribute *attr,
1303                         const char *buf,
1304                         size_t count)
1305 {
1306         struct zram *zram;
1307         int ret, dev_id;
1308
1309         /* dev_id is gendisk->first_minor, which is `int' */
1310         ret = kstrtoint(buf, 10, &dev_id);
1311         if (ret)
1312                 return ret;
1313         if (dev_id < 0)
1314                 return -EINVAL;
1315
1316         mutex_lock(&zram_index_mutex);
1317
1318         zram = idr_find(&zram_index_idr, dev_id);
1319         if (zram) {
1320                 ret = zram_remove(zram);
1321                 if (!ret)
1322                         idr_remove(&zram_index_idr, dev_id);
1323         } else {
1324                 ret = -ENODEV;
1325         }
1326
1327         mutex_unlock(&zram_index_mutex);
1328         return ret ? ret : count;
1329 }
1330 static CLASS_ATTR_WO(hot_remove);
1331
1332 static struct attribute *zram_control_class_attrs[] = {
1333         &class_attr_hot_add.attr,
1334         &class_attr_hot_remove.attr,
1335         NULL,
1336 };
1337 ATTRIBUTE_GROUPS(zram_control_class);
1338
1339 static struct class zram_control_class = {
1340         .name           = "zram-control",
1341         .owner          = THIS_MODULE,
1342         .class_groups   = zram_control_class_groups,
1343 };
1344
1345 static int zram_remove_cb(int id, void *ptr, void *data)
1346 {
1347         zram_remove(ptr);
1348         return 0;
1349 }
1350
1351 static void destroy_devices(void)
1352 {
1353         class_unregister(&zram_control_class);
1354         idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1355         idr_destroy(&zram_index_idr);
1356         unregister_blkdev(zram_major, "zram");
1357         cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1358 }
1359
1360 static int __init zram_init(void)
1361 {
1362         int ret;
1363
1364         ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1365                                       zcomp_cpu_up_prepare, zcomp_cpu_dead);
1366         if (ret < 0)
1367                 return ret;
1368
1369         ret = class_register(&zram_control_class);
1370         if (ret) {
1371                 pr_err("Unable to register zram-control class\n");
1372                 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1373                 return ret;
1374         }
1375
1376         zram_major = register_blkdev(0, "zram");
1377         if (zram_major <= 0) {
1378                 pr_err("Unable to get major number\n");
1379                 class_unregister(&zram_control_class);
1380                 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1381                 return -EBUSY;
1382         }
1383
1384         while (num_devices != 0) {
1385                 mutex_lock(&zram_index_mutex);
1386                 ret = zram_add();
1387                 mutex_unlock(&zram_index_mutex);
1388                 if (ret < 0)
1389                         goto out_error;
1390                 num_devices--;
1391         }
1392
1393         return 0;
1394
1395 out_error:
1396         destroy_devices();
1397         return ret;
1398 }
1399
1400 static void __exit zram_exit(void)
1401 {
1402         destroy_devices();
1403 }
1404
1405 module_init(zram_init);
1406 module_exit(zram_exit);
1407
1408 module_param(num_devices, uint, 0);
1409 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1410
1411 MODULE_LICENSE("Dual BSD/GPL");
1412 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1413 MODULE_DESCRIPTION("Compressed RAM Block Device");