zram: move comp allocation out of init_lock
[linux-2.6-block.git] / drivers / block / zram / zram_drv.c
CommitLineData
306b0c95 1/*
f1e3cfff 2 * Compressed RAM block device
306b0c95 3 *
1130ebba 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
7bfb3de8 5 * 2012, 2013 Minchan Kim
306b0c95
NG
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
306b0c95
NG
13 */
14
f1e3cfff 15#define KMSG_COMPONENT "zram"
306b0c95
NG
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
b1f5b81e
RJ
18#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
306b0c95
NG
22#include <linux/module.h>
23#include <linux/kernel.h>
8946a086 24#include <linux/bio.h>
306b0c95
NG
25#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
5a0e3ad6 31#include <linux/slab.h>
306b0c95 32#include <linux/string.h>
306b0c95 33#include <linux/vmalloc.h>
306b0c95 34
16a4bfb9 35#include "zram_drv.h"
306b0c95
NG
36
37/* Globals */
f1e3cfff 38static int zram_major;
0f0e3ba3 39static struct zram *zram_devices;
b7ca232e 40static const char *default_compressor = "lzo";
306b0c95 41
306b0c95 42/* Module params (documentation at end) */
ca3d70bd 43static unsigned int num_devices = 1;
33863c21 44
a68eb3b6
SS
45#define ZRAM_ATTR_RO(name) \
46static ssize_t zram_attr_##name##_show(struct device *d, \
47 struct device_attribute *attr, char *b) \
48{ \
49 struct zram *zram = dev_to_zram(d); \
50 return sprintf(b, "%llu\n", \
51 (u64)atomic64_read(&zram->stats.name)); \
52} \
53static struct device_attribute dev_attr_##name = \
54 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
55
be2d1d56
SS
56static inline int init_done(struct zram *zram)
57{
58 return zram->meta != NULL;
59}
60
9b3bb7ab
SS
61static inline struct zram *dev_to_zram(struct device *dev)
62{
63 return (struct zram *)dev_to_disk(dev)->private_data;
64}
65
66static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
71 return sprintf(buf, "%llu\n", zram->disksize);
72}
73
74static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
a68eb3b6 77 u32 val;
9b3bb7ab
SS
78 struct zram *zram = dev_to_zram(dev);
79
a68eb3b6
SS
80 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
9b3bb7ab 83
a68eb3b6 84 return sprintf(buf, "%u\n", val);
9b3bb7ab
SS
85}
86
87static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct zram *zram = dev_to_zram(dev);
91
92 return sprintf(buf, "%llu\n",
90a7806e 93 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
9b3bb7ab
SS
94}
95
9b3bb7ab
SS
96static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
98{
99 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev);
101 struct zram_meta *meta = zram->meta;
102
103 down_read(&zram->init_lock);
be2d1d56 104 if (init_done(zram))
9b3bb7ab
SS
105 val = zs_get_total_size_bytes(meta->mem_pool);
106 up_read(&zram->init_lock);
107
108 return sprintf(buf, "%llu\n", val);
109}
110
beca3ec7
SS
111static ssize_t max_comp_streams_show(struct device *dev,
112 struct device_attribute *attr, char *buf)
113{
114 int val;
115 struct zram *zram = dev_to_zram(dev);
116
117 down_read(&zram->init_lock);
118 val = zram->max_comp_streams;
119 up_read(&zram->init_lock);
120
121 return sprintf(buf, "%d\n", val);
122}
123
124static ssize_t max_comp_streams_store(struct device *dev,
125 struct device_attribute *attr, const char *buf, size_t len)
126{
127 int num;
128 struct zram *zram = dev_to_zram(dev);
129
130 if (kstrtoint(buf, 0, &num))
131 return -EINVAL;
132 if (num < 1)
133 return -EINVAL;
134 down_write(&zram->init_lock);
135 if (init_done(zram)) {
fe8eb122
SS
136 if (zcomp_set_max_streams(zram->comp, num))
137 pr_info("Cannot change max compression streams\n");
beca3ec7
SS
138 }
139 zram->max_comp_streams = num;
140 up_write(&zram->init_lock);
141 return len;
142}
143
e46b8a03
SS
144static ssize_t comp_algorithm_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
146{
147 size_t sz;
148 struct zram *zram = dev_to_zram(dev);
149
150 down_read(&zram->init_lock);
151 sz = zcomp_available_show(zram->compressor, buf);
152 up_read(&zram->init_lock);
153
154 return sz;
155}
156
157static ssize_t comp_algorithm_store(struct device *dev,
158 struct device_attribute *attr, const char *buf, size_t len)
159{
160 struct zram *zram = dev_to_zram(dev);
161 down_write(&zram->init_lock);
162 if (init_done(zram)) {
163 up_write(&zram->init_lock);
164 pr_info("Can't change algorithm for initialized device\n");
165 return -EBUSY;
166 }
167 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
168 up_write(&zram->init_lock);
169 return len;
170}
171
92967471 172/* flag operations needs meta->tb_lock */
8b3cc3ed 173static int zram_test_flag(struct zram_meta *meta, u32 index,
f1e3cfff 174 enum zram_pageflags flag)
306b0c95 175{
8b3cc3ed 176 return meta->table[index].flags & BIT(flag);
306b0c95
NG
177}
178
8b3cc3ed 179static void zram_set_flag(struct zram_meta *meta, u32 index,
f1e3cfff 180 enum zram_pageflags flag)
306b0c95 181{
8b3cc3ed 182 meta->table[index].flags |= BIT(flag);
306b0c95
NG
183}
184
8b3cc3ed 185static void zram_clear_flag(struct zram_meta *meta, u32 index,
f1e3cfff 186 enum zram_pageflags flag)
306b0c95 187{
8b3cc3ed 188 meta->table[index].flags &= ~BIT(flag);
306b0c95
NG
189}
190
9b3bb7ab
SS
191static inline int is_partial_io(struct bio_vec *bvec)
192{
193 return bvec->bv_len != PAGE_SIZE;
194}
195
196/*
197 * Check if request is within bounds and aligned on zram logical blocks.
198 */
199static inline int valid_io_request(struct zram *zram, struct bio *bio)
200{
201 u64 start, end, bound;
a539c72a 202
9b3bb7ab 203 /* unaligned request */
4f024f37
KO
204 if (unlikely(bio->bi_iter.bi_sector &
205 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
9b3bb7ab 206 return 0;
4f024f37 207 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
9b3bb7ab
SS
208 return 0;
209
4f024f37
KO
210 start = bio->bi_iter.bi_sector;
211 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
9b3bb7ab
SS
212 bound = zram->disksize >> SECTOR_SHIFT;
213 /* out of range range */
75c7caf5 214 if (unlikely(start >= bound || end > bound || start > end))
9b3bb7ab
SS
215 return 0;
216
217 /* I/O request is valid */
218 return 1;
219}
220
221static void zram_meta_free(struct zram_meta *meta)
222{
223 zs_destroy_pool(meta->mem_pool);
9b3bb7ab
SS
224 vfree(meta->table);
225 kfree(meta);
226}
227
228static struct zram_meta *zram_meta_alloc(u64 disksize)
229{
230 size_t num_pages;
231 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
232 if (!meta)
233 goto out;
234
9b3bb7ab
SS
235 num_pages = disksize >> PAGE_SHIFT;
236 meta->table = vzalloc(num_pages * sizeof(*meta->table));
237 if (!meta->table) {
238 pr_err("Error allocating zram address table\n");
b7ca232e 239 goto free_meta;
9b3bb7ab
SS
240 }
241
242 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
243 if (!meta->mem_pool) {
244 pr_err("Error creating memory pool\n");
245 goto free_table;
246 }
247
92967471 248 rwlock_init(&meta->tb_lock);
9b3bb7ab
SS
249 return meta;
250
251free_table:
252 vfree(meta->table);
9b3bb7ab
SS
253free_meta:
254 kfree(meta);
255 meta = NULL;
256out:
257 return meta;
258}
259
260static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
261{
262 if (*offset + bvec->bv_len >= PAGE_SIZE)
263 (*index)++;
264 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
265}
266
306b0c95
NG
267static int page_zero_filled(void *ptr)
268{
269 unsigned int pos;
270 unsigned long *page;
271
272 page = (unsigned long *)ptr;
273
274 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
275 if (page[pos])
276 return 0;
277 }
278
279 return 1;
280}
281
9b3bb7ab
SS
282static void handle_zero_page(struct bio_vec *bvec)
283{
284 struct page *page = bvec->bv_page;
285 void *user_mem;
286
287 user_mem = kmap_atomic(page);
288 if (is_partial_io(bvec))
289 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
290 else
291 clear_page(user_mem);
292 kunmap_atomic(user_mem);
293
294 flush_dcache_page(page);
295}
296
92967471 297/* NOTE: caller should hold meta->tb_lock with write-side */
f1e3cfff 298static void zram_free_page(struct zram *zram, size_t index)
306b0c95 299{
8b3cc3ed
MK
300 struct zram_meta *meta = zram->meta;
301 unsigned long handle = meta->table[index].handle;
306b0c95 302
fd1a30de 303 if (unlikely(!handle)) {
2e882281
NG
304 /*
305 * No memory is allocated for zero filled pages.
306 * Simply clear zero page flag.
307 */
8b3cc3ed
MK
308 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
309 zram_clear_flag(meta, index, ZRAM_ZERO);
90a7806e 310 atomic64_dec(&zram->stats.zero_pages);
306b0c95
NG
311 }
312 return;
313 }
314
8b3cc3ed 315 zs_free(meta->mem_pool, handle);
306b0c95 316
90a7806e
SS
317 atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
318 atomic64_dec(&zram->stats.pages_stored);
306b0c95 319
8b3cc3ed
MK
320 meta->table[index].handle = 0;
321 meta->table[index].size = 0;
306b0c95
NG
322}
323
37b51fdd 324static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
306b0c95 325{
b7ca232e 326 int ret = 0;
37b51fdd 327 unsigned char *cmem;
8b3cc3ed 328 struct zram_meta *meta = zram->meta;
92967471
MK
329 unsigned long handle;
330 u16 size;
331
332 read_lock(&meta->tb_lock);
333 handle = meta->table[index].handle;
334 size = meta->table[index].size;
306b0c95 335
8b3cc3ed 336 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
92967471 337 read_unlock(&meta->tb_lock);
42e99bd9 338 clear_page(mem);
8c921b2b
JM
339 return 0;
340 }
306b0c95 341
8b3cc3ed 342 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
92967471 343 if (size == PAGE_SIZE)
42e99bd9 344 copy_page(mem, cmem);
37b51fdd 345 else
b7ca232e 346 ret = zcomp_decompress(zram->comp, cmem, size, mem);
8b3cc3ed 347 zs_unmap_object(meta->mem_pool, handle);
92967471 348 read_unlock(&meta->tb_lock);
a1dd52af 349
8c921b2b 350 /* Should NEVER happen. Return bio error if it does. */
b7ca232e 351 if (unlikely(ret)) {
8c921b2b 352 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
da5cc7d3 353 atomic64_inc(&zram->stats.failed_reads);
8c921b2b 354 return ret;
a1dd52af 355 }
306b0c95 356
8c921b2b 357 return 0;
306b0c95
NG
358}
359
37b51fdd
SS
360static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
361 u32 index, int offset, struct bio *bio)
924bd88d
JM
362{
363 int ret;
37b51fdd
SS
364 struct page *page;
365 unsigned char *user_mem, *uncmem = NULL;
8b3cc3ed 366 struct zram_meta *meta = zram->meta;
37b51fdd
SS
367 page = bvec->bv_page;
368
92967471 369 read_lock(&meta->tb_lock);
8b3cc3ed
MK
370 if (unlikely(!meta->table[index].handle) ||
371 zram_test_flag(meta, index, ZRAM_ZERO)) {
92967471 372 read_unlock(&meta->tb_lock);
37b51fdd 373 handle_zero_page(bvec);
924bd88d
JM
374 return 0;
375 }
92967471 376 read_unlock(&meta->tb_lock);
924bd88d 377
37b51fdd
SS
378 if (is_partial_io(bvec))
379 /* Use a temporary buffer to decompress the page */
7e5a5104
MK
380 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
381
382 user_mem = kmap_atomic(page);
383 if (!is_partial_io(bvec))
37b51fdd
SS
384 uncmem = user_mem;
385
386 if (!uncmem) {
387 pr_info("Unable to allocate temp memory\n");
388 ret = -ENOMEM;
389 goto out_cleanup;
390 }
924bd88d 391
37b51fdd 392 ret = zram_decompress_page(zram, uncmem, index);
924bd88d 393 /* Should NEVER happen. Return bio error if it does. */
b7ca232e 394 if (unlikely(ret))
37b51fdd 395 goto out_cleanup;
924bd88d 396
37b51fdd
SS
397 if (is_partial_io(bvec))
398 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
399 bvec->bv_len);
400
401 flush_dcache_page(page);
402 ret = 0;
403out_cleanup:
404 kunmap_atomic(user_mem);
405 if (is_partial_io(bvec))
406 kfree(uncmem);
407 return ret;
924bd88d
JM
408}
409
410static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
411 int offset)
306b0c95 412{
397c6066 413 int ret = 0;
8c921b2b 414 size_t clen;
c2344348 415 unsigned long handle;
130f315a 416 struct page *page;
924bd88d 417 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
8b3cc3ed 418 struct zram_meta *meta = zram->meta;
b7ca232e 419 struct zcomp_strm *zstrm;
e46e3315 420 bool locked = false;
306b0c95 421
8c921b2b 422 page = bvec->bv_page;
924bd88d
JM
423 if (is_partial_io(bvec)) {
424 /*
425 * This is a partial IO. We need to read the full page
426 * before to write the changes.
427 */
7e5a5104 428 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
924bd88d 429 if (!uncmem) {
924bd88d
JM
430 ret = -ENOMEM;
431 goto out;
432 }
37b51fdd 433 ret = zram_decompress_page(zram, uncmem, index);
397c6066 434 if (ret)
924bd88d 435 goto out;
924bd88d
JM
436 }
437
b7ca232e 438 zstrm = zcomp_strm_find(zram->comp);
e46e3315 439 locked = true;
ba82fe2e 440 user_mem = kmap_atomic(page);
924bd88d 441
397c6066 442 if (is_partial_io(bvec)) {
924bd88d
JM
443 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
444 bvec->bv_len);
397c6066
NG
445 kunmap_atomic(user_mem);
446 user_mem = NULL;
447 } else {
924bd88d 448 uncmem = user_mem;
397c6066 449 }
924bd88d
JM
450
451 if (page_zero_filled(uncmem)) {
ba82fe2e 452 kunmap_atomic(user_mem);
f40ac2ae 453 /* Free memory associated with this sector now. */
92967471 454 write_lock(&zram->meta->tb_lock);
f40ac2ae 455 zram_free_page(zram, index);
92967471
MK
456 zram_set_flag(meta, index, ZRAM_ZERO);
457 write_unlock(&zram->meta->tb_lock);
f40ac2ae 458
90a7806e 459 atomic64_inc(&zram->stats.zero_pages);
924bd88d
JM
460 ret = 0;
461 goto out;
8c921b2b 462 }
306b0c95 463
b7ca232e 464 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
397c6066
NG
465 if (!is_partial_io(bvec)) {
466 kunmap_atomic(user_mem);
467 user_mem = NULL;
468 uncmem = NULL;
469 }
306b0c95 470
b7ca232e 471 if (unlikely(ret)) {
8c921b2b 472 pr_err("Compression failed! err=%d\n", ret);
924bd88d 473 goto out;
8c921b2b 474 }
b7ca232e 475 src = zstrm->buffer;
c8f2f0db 476 if (unlikely(clen > max_zpage_size)) {
c8f2f0db 477 clen = PAGE_SIZE;
397c6066
NG
478 if (is_partial_io(bvec))
479 src = uncmem;
c8f2f0db 480 }
a1dd52af 481
8b3cc3ed 482 handle = zs_malloc(meta->mem_pool, clen);
fd1a30de 483 if (!handle) {
596b3dd4
MR
484 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
485 index, clen);
924bd88d
JM
486 ret = -ENOMEM;
487 goto out;
8c921b2b 488 }
8b3cc3ed 489 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
306b0c95 490
42e99bd9 491 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
397c6066 492 src = kmap_atomic(page);
42e99bd9 493 copy_page(cmem, src);
397c6066 494 kunmap_atomic(src);
42e99bd9
JL
495 } else {
496 memcpy(cmem, src, clen);
497 }
306b0c95 498
b7ca232e
SS
499 zcomp_strm_release(zram->comp, zstrm);
500 locked = false;
8b3cc3ed 501 zs_unmap_object(meta->mem_pool, handle);
fd1a30de 502
f40ac2ae
SS
503 /*
504 * Free memory associated with this sector
505 * before overwriting unused sectors.
506 */
92967471 507 write_lock(&zram->meta->tb_lock);
f40ac2ae
SS
508 zram_free_page(zram, index);
509
8b3cc3ed
MK
510 meta->table[index].handle = handle;
511 meta->table[index].size = clen;
92967471 512 write_unlock(&zram->meta->tb_lock);
306b0c95 513
8c921b2b 514 /* Update stats */
90a7806e
SS
515 atomic64_add(clen, &zram->stats.compr_data_size);
516 atomic64_inc(&zram->stats.pages_stored);
924bd88d 517out:
e46e3315 518 if (locked)
b7ca232e 519 zcomp_strm_release(zram->comp, zstrm);
397c6066
NG
520 if (is_partial_io(bvec))
521 kfree(uncmem);
924bd88d 522 if (ret)
da5cc7d3 523 atomic64_inc(&zram->stats.failed_writes);
924bd88d 524 return ret;
8c921b2b
JM
525}
526
527static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
be257c61 528 int offset, struct bio *bio)
8c921b2b 529{
c5bde238 530 int ret;
be257c61 531 int rw = bio_data_dir(bio);
8c921b2b 532
be257c61
SS
533 if (rw == READ) {
534 atomic64_inc(&zram->stats.num_reads);
c5bde238 535 ret = zram_bvec_read(zram, bvec, index, offset, bio);
be257c61
SS
536 } else {
537 atomic64_inc(&zram->stats.num_writes);
c5bde238 538 ret = zram_bvec_write(zram, bvec, index, offset);
be257c61 539 }
c5bde238
JM
540
541 return ret;
924bd88d
JM
542}
543
2b86ab9c 544static void zram_reset_device(struct zram *zram, bool reset_capacity)
924bd88d 545{
9b3bb7ab
SS
546 size_t index;
547 struct zram_meta *meta;
548
644d4787 549 down_write(&zram->init_lock);
be2d1d56 550 if (!init_done(zram)) {
644d4787 551 up_write(&zram->init_lock);
9b3bb7ab 552 return;
644d4787 553 }
9b3bb7ab
SS
554
555 meta = zram->meta;
9b3bb7ab
SS
556 /* Free all pages that are still in this zram device */
557 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
558 unsigned long handle = meta->table[index].handle;
559 if (!handle)
560 continue;
561
562 zs_free(meta->mem_pool, handle);
563 }
564
b7ca232e 565 zcomp_destroy(zram->comp);
beca3ec7
SS
566 zram->max_comp_streams = 1;
567
9b3bb7ab
SS
568 zram_meta_free(zram->meta);
569 zram->meta = NULL;
570 /* Reset stats */
571 memset(&zram->stats, 0, sizeof(zram->stats));
572
573 zram->disksize = 0;
2b86ab9c
MK
574 if (reset_capacity)
575 set_capacity(zram->disk, 0);
644d4787 576 up_write(&zram->init_lock);
9b3bb7ab
SS
577}
578
9b3bb7ab
SS
579static ssize_t disksize_store(struct device *dev,
580 struct device_attribute *attr, const char *buf, size_t len)
581{
582 u64 disksize;
d61f98c7 583 struct zcomp *comp;
9b3bb7ab
SS
584 struct zram_meta *meta;
585 struct zram *zram = dev_to_zram(dev);
d61f98c7 586 int err = -EINVAL;
9b3bb7ab
SS
587
588 disksize = memparse(buf, NULL);
589 if (!disksize)
590 return -EINVAL;
591
592 disksize = PAGE_ALIGN(disksize);
593 meta = zram_meta_alloc(disksize);
db5d711e
MK
594 if (!meta)
595 return -ENOMEM;
b67d1ec1 596
d61f98c7
SS
597 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
598 if (!comp) {
599 pr_info("Cannot initialise %s compressing backend\n",
600 zram->compressor);
601 goto out_cleanup;
602 }
603
9b3bb7ab 604 down_write(&zram->init_lock);
be2d1d56 605 if (init_done(zram)) {
d61f98c7 606 up_write(&zram->init_lock);
9b3bb7ab 607 pr_info("Cannot change disksize for initialized device\n");
b7ca232e 608 err = -EBUSY;
d61f98c7 609 goto out_cleanup;
9b3bb7ab
SS
610 }
611
b67d1ec1 612 zram->meta = meta;
d61f98c7 613 zram->comp = comp;
9b3bb7ab
SS
614 zram->disksize = disksize;
615 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
9b3bb7ab
SS
616 up_write(&zram->init_lock);
617
618 return len;
b7ca232e 619
d61f98c7
SS
620out_cleanup:
621 if (comp)
622 zcomp_destroy(comp);
b7ca232e
SS
623 zram_meta_free(meta);
624 return err;
9b3bb7ab
SS
625}
626
627static ssize_t reset_store(struct device *dev,
628 struct device_attribute *attr, const char *buf, size_t len)
629{
630 int ret;
631 unsigned short do_reset;
632 struct zram *zram;
633 struct block_device *bdev;
634
635 zram = dev_to_zram(dev);
636 bdev = bdget_disk(zram->disk, 0);
637
46a51c80
RK
638 if (!bdev)
639 return -ENOMEM;
640
9b3bb7ab 641 /* Do not reset an active device! */
1b672224
RK
642 if (bdev->bd_holders) {
643 ret = -EBUSY;
644 goto out;
645 }
9b3bb7ab
SS
646
647 ret = kstrtou16(buf, 10, &do_reset);
648 if (ret)
1b672224 649 goto out;
9b3bb7ab 650
1b672224
RK
651 if (!do_reset) {
652 ret = -EINVAL;
653 goto out;
654 }
9b3bb7ab
SS
655
656 /* Make sure all pending I/O is finished */
46a51c80 657 fsync_bdev(bdev);
1b672224 658 bdput(bdev);
9b3bb7ab 659
2b86ab9c 660 zram_reset_device(zram, true);
9b3bb7ab 661 return len;
1b672224
RK
662
663out:
664 bdput(bdev);
665 return ret;
8c921b2b
JM
666}
667
be257c61 668static void __zram_make_request(struct zram *zram, struct bio *bio)
8c921b2b 669{
7988613b 670 int offset;
8c921b2b 671 u32 index;
7988613b
KO
672 struct bio_vec bvec;
673 struct bvec_iter iter;
8c921b2b 674
4f024f37
KO
675 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
676 offset = (bio->bi_iter.bi_sector &
677 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
8c921b2b 678
7988613b 679 bio_for_each_segment(bvec, bio, iter) {
924bd88d
JM
680 int max_transfer_size = PAGE_SIZE - offset;
681
7988613b 682 if (bvec.bv_len > max_transfer_size) {
924bd88d
JM
683 /*
684 * zram_bvec_rw() can only make operation on a single
685 * zram page. Split the bio vector.
686 */
687 struct bio_vec bv;
688
7988613b 689 bv.bv_page = bvec.bv_page;
924bd88d 690 bv.bv_len = max_transfer_size;
7988613b 691 bv.bv_offset = bvec.bv_offset;
924bd88d 692
be257c61 693 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
924bd88d
JM
694 goto out;
695
7988613b 696 bv.bv_len = bvec.bv_len - max_transfer_size;
924bd88d 697 bv.bv_offset += max_transfer_size;
be257c61 698 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
924bd88d
JM
699 goto out;
700 } else
be257c61 701 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
924bd88d
JM
702 goto out;
703
7988613b 704 update_position(&index, &offset, &bvec);
a1dd52af 705 }
306b0c95
NG
706
707 set_bit(BIO_UPTODATE, &bio->bi_flags);
708 bio_endio(bio, 0);
7d7854b4 709 return;
306b0c95
NG
710
711out:
306b0c95 712 bio_io_error(bio);
306b0c95
NG
713}
714
306b0c95 715/*
f1e3cfff 716 * Handler function for all zram I/O requests.
306b0c95 717 */
5a7bbad2 718static void zram_make_request(struct request_queue *queue, struct bio *bio)
306b0c95 719{
f1e3cfff 720 struct zram *zram = queue->queuedata;
306b0c95 721
0900beae 722 down_read(&zram->init_lock);
be2d1d56 723 if (unlikely(!init_done(zram)))
3de738cd 724 goto error;
0900beae 725
f1e3cfff 726 if (!valid_io_request(zram, bio)) {
da5cc7d3 727 atomic64_inc(&zram->stats.invalid_io);
3de738cd 728 goto error;
6642a67c
JM
729 }
730
be257c61 731 __zram_make_request(zram, bio);
0900beae 732 up_read(&zram->init_lock);
306b0c95 733
b4fdcb02 734 return;
0900beae 735
0900beae 736error:
3de738cd 737 up_read(&zram->init_lock);
0900beae 738 bio_io_error(bio);
306b0c95
NG
739}
740
2ccbec05
NG
741static void zram_slot_free_notify(struct block_device *bdev,
742 unsigned long index)
107c161b 743{
f1e3cfff 744 struct zram *zram;
f614a9f4 745 struct zram_meta *meta;
107c161b 746
f1e3cfff 747 zram = bdev->bd_disk->private_data;
f614a9f4 748 meta = zram->meta;
a0c516cb 749
f614a9f4
MK
750 write_lock(&meta->tb_lock);
751 zram_free_page(zram, index);
752 write_unlock(&meta->tb_lock);
753 atomic64_inc(&zram->stats.notify_free);
107c161b
NG
754}
755
f1e3cfff 756static const struct block_device_operations zram_devops = {
f1e3cfff 757 .swap_slot_free_notify = zram_slot_free_notify,
107c161b 758 .owner = THIS_MODULE
306b0c95
NG
759};
760
9b3bb7ab
SS
761static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
762 disksize_show, disksize_store);
763static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
764static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
9b3bb7ab 765static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
9b3bb7ab 766static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
beca3ec7
SS
767static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
768 max_comp_streams_show, max_comp_streams_store);
e46b8a03
SS
769static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
770 comp_algorithm_show, comp_algorithm_store);
9b3bb7ab 771
a68eb3b6
SS
772ZRAM_ATTR_RO(num_reads);
773ZRAM_ATTR_RO(num_writes);
64447249
SS
774ZRAM_ATTR_RO(failed_reads);
775ZRAM_ATTR_RO(failed_writes);
a68eb3b6
SS
776ZRAM_ATTR_RO(invalid_io);
777ZRAM_ATTR_RO(notify_free);
778ZRAM_ATTR_RO(zero_pages);
779ZRAM_ATTR_RO(compr_data_size);
780
9b3bb7ab
SS
781static struct attribute *zram_disk_attrs[] = {
782 &dev_attr_disksize.attr,
783 &dev_attr_initstate.attr,
784 &dev_attr_reset.attr,
785 &dev_attr_num_reads.attr,
786 &dev_attr_num_writes.attr,
64447249
SS
787 &dev_attr_failed_reads.attr,
788 &dev_attr_failed_writes.attr,
9b3bb7ab
SS
789 &dev_attr_invalid_io.attr,
790 &dev_attr_notify_free.attr,
791 &dev_attr_zero_pages.attr,
792 &dev_attr_orig_data_size.attr,
793 &dev_attr_compr_data_size.attr,
794 &dev_attr_mem_used_total.attr,
beca3ec7 795 &dev_attr_max_comp_streams.attr,
e46b8a03 796 &dev_attr_comp_algorithm.attr,
9b3bb7ab
SS
797 NULL,
798};
799
800static struct attribute_group zram_disk_attr_group = {
801 .attrs = zram_disk_attrs,
802};
803
f1e3cfff 804static int create_device(struct zram *zram, int device_id)
306b0c95 805{
39a9b8ac 806 int ret = -ENOMEM;
de1a21a0 807
0900beae 808 init_rwsem(&zram->init_lock);
306b0c95 809
f1e3cfff
NG
810 zram->queue = blk_alloc_queue(GFP_KERNEL);
811 if (!zram->queue) {
306b0c95
NG
812 pr_err("Error allocating disk queue for device %d\n",
813 device_id);
de1a21a0 814 goto out;
306b0c95
NG
815 }
816
f1e3cfff
NG
817 blk_queue_make_request(zram->queue, zram_make_request);
818 zram->queue->queuedata = zram;
306b0c95
NG
819
820 /* gendisk structure */
f1e3cfff
NG
821 zram->disk = alloc_disk(1);
822 if (!zram->disk) {
94b8435f 823 pr_warn("Error allocating disk structure for device %d\n",
306b0c95 824 device_id);
39a9b8ac 825 goto out_free_queue;
306b0c95
NG
826 }
827
f1e3cfff
NG
828 zram->disk->major = zram_major;
829 zram->disk->first_minor = device_id;
830 zram->disk->fops = &zram_devops;
831 zram->disk->queue = zram->queue;
832 zram->disk->private_data = zram;
833 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
306b0c95 834
33863c21 835 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
f1e3cfff 836 set_capacity(zram->disk, 0);
b67d1ec1
SS
837 /* zram devices sort of resembles non-rotational disks */
838 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
a1dd52af
NG
839 /*
840 * To ensure that we always get PAGE_SIZE aligned
841 * and n*PAGE_SIZED sized I/O requests.
842 */
f1e3cfff 843 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
7b19b8d4
RJ
844 blk_queue_logical_block_size(zram->disk->queue,
845 ZRAM_LOGICAL_BLOCK_SIZE);
f1e3cfff
NG
846 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
847 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
5d83d5a0 848
f1e3cfff 849 add_disk(zram->disk);
306b0c95 850
33863c21
NG
851 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
852 &zram_disk_attr_group);
853 if (ret < 0) {
94b8435f 854 pr_warn("Error creating sysfs group");
39a9b8ac 855 goto out_free_disk;
33863c21 856 }
e46b8a03 857 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
be2d1d56 858 zram->meta = NULL;
beca3ec7 859 zram->max_comp_streams = 1;
39a9b8ac 860 return 0;
de1a21a0 861
39a9b8ac
JL
862out_free_disk:
863 del_gendisk(zram->disk);
864 put_disk(zram->disk);
865out_free_queue:
866 blk_cleanup_queue(zram->queue);
de1a21a0
NG
867out:
868 return ret;
306b0c95
NG
869}
870
f1e3cfff 871static void destroy_device(struct zram *zram)
306b0c95 872{
33863c21
NG
873 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
874 &zram_disk_attr_group);
33863c21 875
59d3fe54
RK
876 del_gendisk(zram->disk);
877 put_disk(zram->disk);
306b0c95 878
59d3fe54 879 blk_cleanup_queue(zram->queue);
306b0c95
NG
880}
881
f1e3cfff 882static int __init zram_init(void)
306b0c95 883{
de1a21a0 884 int ret, dev_id;
306b0c95 885
5fa5a901 886 if (num_devices > max_num_devices) {
94b8435f 887 pr_warn("Invalid value for num_devices: %u\n",
5fa5a901 888 num_devices);
de1a21a0
NG
889 ret = -EINVAL;
890 goto out;
306b0c95
NG
891 }
892
f1e3cfff
NG
893 zram_major = register_blkdev(0, "zram");
894 if (zram_major <= 0) {
94b8435f 895 pr_warn("Unable to get major number\n");
de1a21a0
NG
896 ret = -EBUSY;
897 goto out;
306b0c95
NG
898 }
899
306b0c95 900 /* Allocate the device array and initialize each one */
5fa5a901 901 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
43801f6e 902 if (!zram_devices) {
de1a21a0
NG
903 ret = -ENOMEM;
904 goto unregister;
905 }
306b0c95 906
5fa5a901 907 for (dev_id = 0; dev_id < num_devices; dev_id++) {
43801f6e 908 ret = create_device(&zram_devices[dev_id], dev_id);
de1a21a0 909 if (ret)
3bf040c7 910 goto free_devices;
de1a21a0
NG
911 }
912
ca3d70bd
DB
913 pr_info("Created %u device(s) ...\n", num_devices);
914
306b0c95 915 return 0;
de1a21a0 916
3bf040c7 917free_devices:
de1a21a0 918 while (dev_id)
43801f6e
NW
919 destroy_device(&zram_devices[--dev_id]);
920 kfree(zram_devices);
de1a21a0 921unregister:
f1e3cfff 922 unregister_blkdev(zram_major, "zram");
de1a21a0 923out:
306b0c95
NG
924 return ret;
925}
926
f1e3cfff 927static void __exit zram_exit(void)
306b0c95
NG
928{
929 int i;
f1e3cfff 930 struct zram *zram;
306b0c95 931
5fa5a901 932 for (i = 0; i < num_devices; i++) {
43801f6e 933 zram = &zram_devices[i];
306b0c95 934
f1e3cfff 935 destroy_device(zram);
2b86ab9c
MK
936 /*
937 * Shouldn't access zram->disk after destroy_device
938 * because destroy_device already released zram->disk.
939 */
940 zram_reset_device(zram, false);
306b0c95
NG
941 }
942
f1e3cfff 943 unregister_blkdev(zram_major, "zram");
306b0c95 944
43801f6e 945 kfree(zram_devices);
306b0c95
NG
946 pr_debug("Cleanup done!\n");
947}
948
f1e3cfff
NG
949module_init(zram_init);
950module_exit(zram_exit);
306b0c95 951
9b3bb7ab
SS
952module_param(num_devices, uint, 0);
953MODULE_PARM_DESC(num_devices, "Number of zram devices");
954
306b0c95
NG
955MODULE_LICENSE("Dual BSD/GPL");
956MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
f1e3cfff 957MODULE_DESCRIPTION("Compressed RAM Block Device");