Commit | Line | Data |
---|---|---|
306b0c95 | 1 | /* |
f1e3cfff | 2 | * Compressed RAM block device |
306b0c95 | 3 | * |
1130ebba | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
7bfb3de8 | 5 | * 2012, 2013 Minchan Kim |
306b0c95 NG |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the licence that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | * | |
306b0c95 NG |
13 | */ |
14 | ||
f1e3cfff | 15 | #define KMSG_COMPONENT "zram" |
306b0c95 NG |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | ||
b1f5b81e RJ |
18 | #ifdef CONFIG_ZRAM_DEBUG |
19 | #define DEBUG | |
20 | #endif | |
21 | ||
306b0c95 NG |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
8946a086 | 24 | #include <linux/bio.h> |
306b0c95 NG |
25 | #include <linux/bitops.h> |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/genhd.h> | |
30 | #include <linux/highmem.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
306b0c95 | 32 | #include <linux/string.h> |
306b0c95 | 33 | #include <linux/vmalloc.h> |
fcfa8d95 | 34 | #include <linux/err.h> |
306b0c95 | 35 | |
16a4bfb9 | 36 | #include "zram_drv.h" |
306b0c95 NG |
37 | |
38 | /* Globals */ | |
f1e3cfff | 39 | static int zram_major; |
0f0e3ba3 | 40 | static struct zram *zram_devices; |
b7ca232e | 41 | static const char *default_compressor = "lzo"; |
306b0c95 | 42 | |
306b0c95 | 43 | /* Module params (documentation at end) */ |
ca3d70bd | 44 | static unsigned int num_devices = 1; |
33863c21 | 45 | |
a68eb3b6 SS |
46 | #define ZRAM_ATTR_RO(name) \ |
47 | static ssize_t zram_attr_##name##_show(struct device *d, \ | |
48 | struct device_attribute *attr, char *b) \ | |
49 | { \ | |
50 | struct zram *zram = dev_to_zram(d); \ | |
56b4e8cb | 51 | return scnprintf(b, PAGE_SIZE, "%llu\n", \ |
a68eb3b6 SS |
52 | (u64)atomic64_read(&zram->stats.name)); \ |
53 | } \ | |
54 | static struct device_attribute dev_attr_##name = \ | |
55 | __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL); | |
56 | ||
be2d1d56 SS |
57 | static inline int init_done(struct zram *zram) |
58 | { | |
59 | return zram->meta != NULL; | |
60 | } | |
61 | ||
9b3bb7ab SS |
62 | static inline struct zram *dev_to_zram(struct device *dev) |
63 | { | |
64 | return (struct zram *)dev_to_disk(dev)->private_data; | |
65 | } | |
66 | ||
67 | static ssize_t disksize_show(struct device *dev, | |
68 | struct device_attribute *attr, char *buf) | |
69 | { | |
70 | struct zram *zram = dev_to_zram(dev); | |
71 | ||
56b4e8cb | 72 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); |
9b3bb7ab SS |
73 | } |
74 | ||
75 | static ssize_t initstate_show(struct device *dev, | |
76 | struct device_attribute *attr, char *buf) | |
77 | { | |
a68eb3b6 | 78 | u32 val; |
9b3bb7ab SS |
79 | struct zram *zram = dev_to_zram(dev); |
80 | ||
a68eb3b6 SS |
81 | down_read(&zram->init_lock); |
82 | val = init_done(zram); | |
83 | up_read(&zram->init_lock); | |
9b3bb7ab | 84 | |
56b4e8cb | 85 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
9b3bb7ab SS |
86 | } |
87 | ||
88 | static ssize_t orig_data_size_show(struct device *dev, | |
89 | struct device_attribute *attr, char *buf) | |
90 | { | |
91 | struct zram *zram = dev_to_zram(dev); | |
92 | ||
56b4e8cb | 93 | return scnprintf(buf, PAGE_SIZE, "%llu\n", |
90a7806e | 94 | (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); |
9b3bb7ab SS |
95 | } |
96 | ||
9b3bb7ab SS |
97 | static ssize_t mem_used_total_show(struct device *dev, |
98 | struct device_attribute *attr, char *buf) | |
99 | { | |
100 | u64 val = 0; | |
101 | struct zram *zram = dev_to_zram(dev); | |
102 | struct zram_meta *meta = zram->meta; | |
103 | ||
104 | down_read(&zram->init_lock); | |
be2d1d56 | 105 | if (init_done(zram)) |
9b3bb7ab SS |
106 | val = zs_get_total_size_bytes(meta->mem_pool); |
107 | up_read(&zram->init_lock); | |
108 | ||
56b4e8cb | 109 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val); |
9b3bb7ab SS |
110 | } |
111 | ||
beca3ec7 SS |
112 | static ssize_t max_comp_streams_show(struct device *dev, |
113 | struct device_attribute *attr, char *buf) | |
114 | { | |
115 | int val; | |
116 | struct zram *zram = dev_to_zram(dev); | |
117 | ||
118 | down_read(&zram->init_lock); | |
119 | val = zram->max_comp_streams; | |
120 | up_read(&zram->init_lock); | |
121 | ||
56b4e8cb | 122 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); |
beca3ec7 SS |
123 | } |
124 | ||
125 | static ssize_t max_comp_streams_store(struct device *dev, | |
126 | struct device_attribute *attr, const char *buf, size_t len) | |
127 | { | |
128 | int num; | |
129 | struct zram *zram = dev_to_zram(dev); | |
60a726e3 | 130 | int ret; |
beca3ec7 | 131 | |
60a726e3 MK |
132 | ret = kstrtoint(buf, 0, &num); |
133 | if (ret < 0) | |
134 | return ret; | |
beca3ec7 SS |
135 | if (num < 1) |
136 | return -EINVAL; | |
60a726e3 | 137 | |
beca3ec7 SS |
138 | down_write(&zram->init_lock); |
139 | if (init_done(zram)) { | |
60a726e3 | 140 | if (!zcomp_set_max_streams(zram->comp, num)) { |
fe8eb122 | 141 | pr_info("Cannot change max compression streams\n"); |
60a726e3 MK |
142 | ret = -EINVAL; |
143 | goto out; | |
144 | } | |
beca3ec7 | 145 | } |
60a726e3 | 146 | |
beca3ec7 | 147 | zram->max_comp_streams = num; |
60a726e3 MK |
148 | ret = len; |
149 | out: | |
beca3ec7 | 150 | up_write(&zram->init_lock); |
60a726e3 | 151 | return ret; |
beca3ec7 SS |
152 | } |
153 | ||
e46b8a03 SS |
154 | static ssize_t comp_algorithm_show(struct device *dev, |
155 | struct device_attribute *attr, char *buf) | |
156 | { | |
157 | size_t sz; | |
158 | struct zram *zram = dev_to_zram(dev); | |
159 | ||
160 | down_read(&zram->init_lock); | |
161 | sz = zcomp_available_show(zram->compressor, buf); | |
162 | up_read(&zram->init_lock); | |
163 | ||
164 | return sz; | |
165 | } | |
166 | ||
167 | static ssize_t comp_algorithm_store(struct device *dev, | |
168 | struct device_attribute *attr, const char *buf, size_t len) | |
169 | { | |
170 | struct zram *zram = dev_to_zram(dev); | |
171 | down_write(&zram->init_lock); | |
172 | if (init_done(zram)) { | |
173 | up_write(&zram->init_lock); | |
174 | pr_info("Can't change algorithm for initialized device\n"); | |
175 | return -EBUSY; | |
176 | } | |
177 | strlcpy(zram->compressor, buf, sizeof(zram->compressor)); | |
178 | up_write(&zram->init_lock); | |
179 | return len; | |
180 | } | |
181 | ||
92967471 | 182 | /* flag operations needs meta->tb_lock */ |
8b3cc3ed | 183 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 184 | enum zram_pageflags flag) |
306b0c95 | 185 | { |
d2d5e762 | 186 | return meta->table[index].value & BIT(flag); |
306b0c95 NG |
187 | } |
188 | ||
8b3cc3ed | 189 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 190 | enum zram_pageflags flag) |
306b0c95 | 191 | { |
d2d5e762 | 192 | meta->table[index].value |= BIT(flag); |
306b0c95 NG |
193 | } |
194 | ||
8b3cc3ed | 195 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 196 | enum zram_pageflags flag) |
306b0c95 | 197 | { |
d2d5e762 WY |
198 | meta->table[index].value &= ~BIT(flag); |
199 | } | |
200 | ||
201 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) | |
202 | { | |
203 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | |
204 | } | |
205 | ||
206 | static void zram_set_obj_size(struct zram_meta *meta, | |
207 | u32 index, size_t size) | |
208 | { | |
209 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; | |
210 | ||
211 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | |
306b0c95 NG |
212 | } |
213 | ||
9b3bb7ab SS |
214 | static inline int is_partial_io(struct bio_vec *bvec) |
215 | { | |
216 | return bvec->bv_len != PAGE_SIZE; | |
217 | } | |
218 | ||
219 | /* | |
220 | * Check if request is within bounds and aligned on zram logical blocks. | |
221 | */ | |
222 | static inline int valid_io_request(struct zram *zram, struct bio *bio) | |
223 | { | |
224 | u64 start, end, bound; | |
a539c72a | 225 | |
9b3bb7ab | 226 | /* unaligned request */ |
4f024f37 KO |
227 | if (unlikely(bio->bi_iter.bi_sector & |
228 | (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | |
9b3bb7ab | 229 | return 0; |
4f024f37 | 230 | if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
9b3bb7ab SS |
231 | return 0; |
232 | ||
4f024f37 KO |
233 | start = bio->bi_iter.bi_sector; |
234 | end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT); | |
9b3bb7ab SS |
235 | bound = zram->disksize >> SECTOR_SHIFT; |
236 | /* out of range range */ | |
75c7caf5 | 237 | if (unlikely(start >= bound || end > bound || start > end)) |
9b3bb7ab SS |
238 | return 0; |
239 | ||
240 | /* I/O request is valid */ | |
241 | return 1; | |
242 | } | |
243 | ||
244 | static void zram_meta_free(struct zram_meta *meta) | |
245 | { | |
246 | zs_destroy_pool(meta->mem_pool); | |
9b3bb7ab SS |
247 | vfree(meta->table); |
248 | kfree(meta); | |
249 | } | |
250 | ||
251 | static struct zram_meta *zram_meta_alloc(u64 disksize) | |
252 | { | |
253 | size_t num_pages; | |
254 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); | |
255 | if (!meta) | |
256 | goto out; | |
257 | ||
9b3bb7ab SS |
258 | num_pages = disksize >> PAGE_SHIFT; |
259 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | |
260 | if (!meta->table) { | |
261 | pr_err("Error allocating zram address table\n"); | |
b7ca232e | 262 | goto free_meta; |
9b3bb7ab SS |
263 | } |
264 | ||
265 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); | |
266 | if (!meta->mem_pool) { | |
267 | pr_err("Error creating memory pool\n"); | |
268 | goto free_table; | |
269 | } | |
270 | ||
271 | return meta; | |
272 | ||
273 | free_table: | |
274 | vfree(meta->table); | |
9b3bb7ab SS |
275 | free_meta: |
276 | kfree(meta); | |
277 | meta = NULL; | |
278 | out: | |
279 | return meta; | |
280 | } | |
281 | ||
282 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | |
283 | { | |
284 | if (*offset + bvec->bv_len >= PAGE_SIZE) | |
285 | (*index)++; | |
286 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | |
287 | } | |
288 | ||
306b0c95 NG |
289 | static int page_zero_filled(void *ptr) |
290 | { | |
291 | unsigned int pos; | |
292 | unsigned long *page; | |
293 | ||
294 | page = (unsigned long *)ptr; | |
295 | ||
296 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | |
297 | if (page[pos]) | |
298 | return 0; | |
299 | } | |
300 | ||
301 | return 1; | |
302 | } | |
303 | ||
9b3bb7ab SS |
304 | static void handle_zero_page(struct bio_vec *bvec) |
305 | { | |
306 | struct page *page = bvec->bv_page; | |
307 | void *user_mem; | |
308 | ||
309 | user_mem = kmap_atomic(page); | |
310 | if (is_partial_io(bvec)) | |
311 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | |
312 | else | |
313 | clear_page(user_mem); | |
314 | kunmap_atomic(user_mem); | |
315 | ||
316 | flush_dcache_page(page); | |
317 | } | |
318 | ||
d2d5e762 WY |
319 | |
320 | /* | |
321 | * To protect concurrent access to the same index entry, | |
322 | * caller should hold this table index entry's bit_spinlock to | |
323 | * indicate this index entry is accessing. | |
324 | */ | |
f1e3cfff | 325 | static void zram_free_page(struct zram *zram, size_t index) |
306b0c95 | 326 | { |
8b3cc3ed MK |
327 | struct zram_meta *meta = zram->meta; |
328 | unsigned long handle = meta->table[index].handle; | |
306b0c95 | 329 | |
fd1a30de | 330 | if (unlikely(!handle)) { |
2e882281 NG |
331 | /* |
332 | * No memory is allocated for zero filled pages. | |
333 | * Simply clear zero page flag. | |
334 | */ | |
8b3cc3ed MK |
335 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
336 | zram_clear_flag(meta, index, ZRAM_ZERO); | |
90a7806e | 337 | atomic64_dec(&zram->stats.zero_pages); |
306b0c95 NG |
338 | } |
339 | return; | |
340 | } | |
341 | ||
8b3cc3ed | 342 | zs_free(meta->mem_pool, handle); |
306b0c95 | 343 | |
d2d5e762 WY |
344 | atomic64_sub(zram_get_obj_size(meta, index), |
345 | &zram->stats.compr_data_size); | |
90a7806e | 346 | atomic64_dec(&zram->stats.pages_stored); |
306b0c95 | 347 | |
8b3cc3ed | 348 | meta->table[index].handle = 0; |
d2d5e762 | 349 | zram_set_obj_size(meta, index, 0); |
306b0c95 NG |
350 | } |
351 | ||
37b51fdd | 352 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
306b0c95 | 353 | { |
b7ca232e | 354 | int ret = 0; |
37b51fdd | 355 | unsigned char *cmem; |
8b3cc3ed | 356 | struct zram_meta *meta = zram->meta; |
92967471 | 357 | unsigned long handle; |
023b409f | 358 | size_t size; |
92967471 | 359 | |
d2d5e762 | 360 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
92967471 | 361 | handle = meta->table[index].handle; |
d2d5e762 | 362 | size = zram_get_obj_size(meta, index); |
306b0c95 | 363 | |
8b3cc3ed | 364 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
d2d5e762 | 365 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
42e99bd9 | 366 | clear_page(mem); |
8c921b2b JM |
367 | return 0; |
368 | } | |
306b0c95 | 369 | |
8b3cc3ed | 370 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
92967471 | 371 | if (size == PAGE_SIZE) |
42e99bd9 | 372 | copy_page(mem, cmem); |
37b51fdd | 373 | else |
b7ca232e | 374 | ret = zcomp_decompress(zram->comp, cmem, size, mem); |
8b3cc3ed | 375 | zs_unmap_object(meta->mem_pool, handle); |
d2d5e762 | 376 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
a1dd52af | 377 | |
8c921b2b | 378 | /* Should NEVER happen. Return bio error if it does. */ |
b7ca232e | 379 | if (unlikely(ret)) { |
8c921b2b | 380 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
8c921b2b | 381 | return ret; |
a1dd52af | 382 | } |
306b0c95 | 383 | |
8c921b2b | 384 | return 0; |
306b0c95 NG |
385 | } |
386 | ||
37b51fdd SS |
387 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
388 | u32 index, int offset, struct bio *bio) | |
924bd88d JM |
389 | { |
390 | int ret; | |
37b51fdd SS |
391 | struct page *page; |
392 | unsigned char *user_mem, *uncmem = NULL; | |
8b3cc3ed | 393 | struct zram_meta *meta = zram->meta; |
37b51fdd SS |
394 | page = bvec->bv_page; |
395 | ||
d2d5e762 | 396 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
8b3cc3ed MK |
397 | if (unlikely(!meta->table[index].handle) || |
398 | zram_test_flag(meta, index, ZRAM_ZERO)) { | |
d2d5e762 | 399 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
37b51fdd | 400 | handle_zero_page(bvec); |
924bd88d JM |
401 | return 0; |
402 | } | |
d2d5e762 | 403 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
924bd88d | 404 | |
37b51fdd SS |
405 | if (is_partial_io(bvec)) |
406 | /* Use a temporary buffer to decompress the page */ | |
7e5a5104 MK |
407 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
408 | ||
409 | user_mem = kmap_atomic(page); | |
410 | if (!is_partial_io(bvec)) | |
37b51fdd SS |
411 | uncmem = user_mem; |
412 | ||
413 | if (!uncmem) { | |
414 | pr_info("Unable to allocate temp memory\n"); | |
415 | ret = -ENOMEM; | |
416 | goto out_cleanup; | |
417 | } | |
924bd88d | 418 | |
37b51fdd | 419 | ret = zram_decompress_page(zram, uncmem, index); |
924bd88d | 420 | /* Should NEVER happen. Return bio error if it does. */ |
b7ca232e | 421 | if (unlikely(ret)) |
37b51fdd | 422 | goto out_cleanup; |
924bd88d | 423 | |
37b51fdd SS |
424 | if (is_partial_io(bvec)) |
425 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, | |
426 | bvec->bv_len); | |
427 | ||
428 | flush_dcache_page(page); | |
429 | ret = 0; | |
430 | out_cleanup: | |
431 | kunmap_atomic(user_mem); | |
432 | if (is_partial_io(bvec)) | |
433 | kfree(uncmem); | |
434 | return ret; | |
924bd88d JM |
435 | } |
436 | ||
437 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |
438 | int offset) | |
306b0c95 | 439 | { |
397c6066 | 440 | int ret = 0; |
8c921b2b | 441 | size_t clen; |
c2344348 | 442 | unsigned long handle; |
130f315a | 443 | struct page *page; |
924bd88d | 444 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
8b3cc3ed | 445 | struct zram_meta *meta = zram->meta; |
b7ca232e | 446 | struct zcomp_strm *zstrm; |
e46e3315 | 447 | bool locked = false; |
306b0c95 | 448 | |
8c921b2b | 449 | page = bvec->bv_page; |
924bd88d JM |
450 | if (is_partial_io(bvec)) { |
451 | /* | |
452 | * This is a partial IO. We need to read the full page | |
453 | * before to write the changes. | |
454 | */ | |
7e5a5104 | 455 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
924bd88d | 456 | if (!uncmem) { |
924bd88d JM |
457 | ret = -ENOMEM; |
458 | goto out; | |
459 | } | |
37b51fdd | 460 | ret = zram_decompress_page(zram, uncmem, index); |
397c6066 | 461 | if (ret) |
924bd88d | 462 | goto out; |
924bd88d JM |
463 | } |
464 | ||
b7ca232e | 465 | zstrm = zcomp_strm_find(zram->comp); |
e46e3315 | 466 | locked = true; |
ba82fe2e | 467 | user_mem = kmap_atomic(page); |
924bd88d | 468 | |
397c6066 | 469 | if (is_partial_io(bvec)) { |
924bd88d JM |
470 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
471 | bvec->bv_len); | |
397c6066 NG |
472 | kunmap_atomic(user_mem); |
473 | user_mem = NULL; | |
474 | } else { | |
924bd88d | 475 | uncmem = user_mem; |
397c6066 | 476 | } |
924bd88d JM |
477 | |
478 | if (page_zero_filled(uncmem)) { | |
ba82fe2e | 479 | kunmap_atomic(user_mem); |
f40ac2ae | 480 | /* Free memory associated with this sector now. */ |
d2d5e762 | 481 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae | 482 | zram_free_page(zram, index); |
92967471 | 483 | zram_set_flag(meta, index, ZRAM_ZERO); |
d2d5e762 | 484 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae | 485 | |
90a7806e | 486 | atomic64_inc(&zram->stats.zero_pages); |
924bd88d JM |
487 | ret = 0; |
488 | goto out; | |
8c921b2b | 489 | } |
306b0c95 | 490 | |
b7ca232e | 491 | ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); |
397c6066 NG |
492 | if (!is_partial_io(bvec)) { |
493 | kunmap_atomic(user_mem); | |
494 | user_mem = NULL; | |
495 | uncmem = NULL; | |
496 | } | |
306b0c95 | 497 | |
b7ca232e | 498 | if (unlikely(ret)) { |
8c921b2b | 499 | pr_err("Compression failed! err=%d\n", ret); |
924bd88d | 500 | goto out; |
8c921b2b | 501 | } |
b7ca232e | 502 | src = zstrm->buffer; |
c8f2f0db | 503 | if (unlikely(clen > max_zpage_size)) { |
c8f2f0db | 504 | clen = PAGE_SIZE; |
397c6066 NG |
505 | if (is_partial_io(bvec)) |
506 | src = uncmem; | |
c8f2f0db | 507 | } |
a1dd52af | 508 | |
8b3cc3ed | 509 | handle = zs_malloc(meta->mem_pool, clen); |
fd1a30de | 510 | if (!handle) { |
596b3dd4 MR |
511 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", |
512 | index, clen); | |
924bd88d JM |
513 | ret = -ENOMEM; |
514 | goto out; | |
8c921b2b | 515 | } |
8b3cc3ed | 516 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
306b0c95 | 517 | |
42e99bd9 | 518 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
397c6066 | 519 | src = kmap_atomic(page); |
42e99bd9 | 520 | copy_page(cmem, src); |
397c6066 | 521 | kunmap_atomic(src); |
42e99bd9 JL |
522 | } else { |
523 | memcpy(cmem, src, clen); | |
524 | } | |
306b0c95 | 525 | |
b7ca232e SS |
526 | zcomp_strm_release(zram->comp, zstrm); |
527 | locked = false; | |
8b3cc3ed | 528 | zs_unmap_object(meta->mem_pool, handle); |
fd1a30de | 529 | |
f40ac2ae SS |
530 | /* |
531 | * Free memory associated with this sector | |
532 | * before overwriting unused sectors. | |
533 | */ | |
d2d5e762 | 534 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae SS |
535 | zram_free_page(zram, index); |
536 | ||
8b3cc3ed | 537 | meta->table[index].handle = handle; |
d2d5e762 WY |
538 | zram_set_obj_size(meta, index, clen); |
539 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
306b0c95 | 540 | |
8c921b2b | 541 | /* Update stats */ |
90a7806e SS |
542 | atomic64_add(clen, &zram->stats.compr_data_size); |
543 | atomic64_inc(&zram->stats.pages_stored); | |
924bd88d | 544 | out: |
e46e3315 | 545 | if (locked) |
b7ca232e | 546 | zcomp_strm_release(zram->comp, zstrm); |
397c6066 NG |
547 | if (is_partial_io(bvec)) |
548 | kfree(uncmem); | |
924bd88d | 549 | return ret; |
8c921b2b JM |
550 | } |
551 | ||
552 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |
be257c61 | 553 | int offset, struct bio *bio) |
8c921b2b | 554 | { |
c5bde238 | 555 | int ret; |
be257c61 | 556 | int rw = bio_data_dir(bio); |
8c921b2b | 557 | |
be257c61 SS |
558 | if (rw == READ) { |
559 | atomic64_inc(&zram->stats.num_reads); | |
c5bde238 | 560 | ret = zram_bvec_read(zram, bvec, index, offset, bio); |
be257c61 SS |
561 | } else { |
562 | atomic64_inc(&zram->stats.num_writes); | |
c5bde238 | 563 | ret = zram_bvec_write(zram, bvec, index, offset); |
be257c61 | 564 | } |
c5bde238 | 565 | |
0cf1e9d6 CY |
566 | if (unlikely(ret)) { |
567 | if (rw == READ) | |
568 | atomic64_inc(&zram->stats.failed_reads); | |
569 | else | |
570 | atomic64_inc(&zram->stats.failed_writes); | |
571 | } | |
572 | ||
c5bde238 | 573 | return ret; |
924bd88d JM |
574 | } |
575 | ||
f4659d8e JK |
576 | /* |
577 | * zram_bio_discard - handler on discard request | |
578 | * @index: physical block index in PAGE_SIZE units | |
579 | * @offset: byte offset within physical block | |
580 | */ | |
581 | static void zram_bio_discard(struct zram *zram, u32 index, | |
582 | int offset, struct bio *bio) | |
583 | { | |
584 | size_t n = bio->bi_iter.bi_size; | |
d2d5e762 | 585 | struct zram_meta *meta = zram->meta; |
f4659d8e JK |
586 | |
587 | /* | |
588 | * zram manages data in physical block size units. Because logical block | |
589 | * size isn't identical with physical block size on some arch, we | |
590 | * could get a discard request pointing to a specific offset within a | |
591 | * certain physical block. Although we can handle this request by | |
592 | * reading that physiclal block and decompressing and partially zeroing | |
593 | * and re-compressing and then re-storing it, this isn't reasonable | |
594 | * because our intent with a discard request is to save memory. So | |
595 | * skipping this logical block is appropriate here. | |
596 | */ | |
597 | if (offset) { | |
38515c73 | 598 | if (n <= (PAGE_SIZE - offset)) |
f4659d8e JK |
599 | return; |
600 | ||
38515c73 | 601 | n -= (PAGE_SIZE - offset); |
f4659d8e JK |
602 | index++; |
603 | } | |
604 | ||
605 | while (n >= PAGE_SIZE) { | |
d2d5e762 | 606 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f4659d8e | 607 | zram_free_page(zram, index); |
d2d5e762 | 608 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f4659d8e JK |
609 | index++; |
610 | n -= PAGE_SIZE; | |
611 | } | |
612 | } | |
613 | ||
2b86ab9c | 614 | static void zram_reset_device(struct zram *zram, bool reset_capacity) |
924bd88d | 615 | { |
9b3bb7ab SS |
616 | size_t index; |
617 | struct zram_meta *meta; | |
618 | ||
644d4787 | 619 | down_write(&zram->init_lock); |
be2d1d56 | 620 | if (!init_done(zram)) { |
644d4787 | 621 | up_write(&zram->init_lock); |
9b3bb7ab | 622 | return; |
644d4787 | 623 | } |
9b3bb7ab SS |
624 | |
625 | meta = zram->meta; | |
9b3bb7ab SS |
626 | /* Free all pages that are still in this zram device */ |
627 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { | |
628 | unsigned long handle = meta->table[index].handle; | |
629 | if (!handle) | |
630 | continue; | |
631 | ||
632 | zs_free(meta->mem_pool, handle); | |
633 | } | |
634 | ||
b7ca232e | 635 | zcomp_destroy(zram->comp); |
beca3ec7 SS |
636 | zram->max_comp_streams = 1; |
637 | ||
9b3bb7ab SS |
638 | zram_meta_free(zram->meta); |
639 | zram->meta = NULL; | |
640 | /* Reset stats */ | |
641 | memset(&zram->stats, 0, sizeof(zram->stats)); | |
642 | ||
643 | zram->disksize = 0; | |
b4c5c609 | 644 | if (reset_capacity) |
2b86ab9c | 645 | set_capacity(zram->disk, 0); |
b4c5c609 | 646 | |
644d4787 | 647 | up_write(&zram->init_lock); |
b4c5c609 MK |
648 | |
649 | /* | |
650 | * Revalidate disk out of the init_lock to avoid lockdep splat. | |
651 | * It's okay because disk's capacity is protected by init_lock | |
652 | * so that revalidate_disk always sees up-to-date capacity. | |
653 | */ | |
654 | if (reset_capacity) | |
655 | revalidate_disk(zram->disk); | |
9b3bb7ab SS |
656 | } |
657 | ||
9b3bb7ab SS |
658 | static ssize_t disksize_store(struct device *dev, |
659 | struct device_attribute *attr, const char *buf, size_t len) | |
660 | { | |
661 | u64 disksize; | |
d61f98c7 | 662 | struct zcomp *comp; |
9b3bb7ab SS |
663 | struct zram_meta *meta; |
664 | struct zram *zram = dev_to_zram(dev); | |
fcfa8d95 | 665 | int err; |
9b3bb7ab SS |
666 | |
667 | disksize = memparse(buf, NULL); | |
668 | if (!disksize) | |
669 | return -EINVAL; | |
670 | ||
671 | disksize = PAGE_ALIGN(disksize); | |
672 | meta = zram_meta_alloc(disksize); | |
db5d711e MK |
673 | if (!meta) |
674 | return -ENOMEM; | |
b67d1ec1 | 675 | |
d61f98c7 | 676 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); |
fcfa8d95 | 677 | if (IS_ERR(comp)) { |
d61f98c7 SS |
678 | pr_info("Cannot initialise %s compressing backend\n", |
679 | zram->compressor); | |
fcfa8d95 SS |
680 | err = PTR_ERR(comp); |
681 | goto out_free_meta; | |
d61f98c7 SS |
682 | } |
683 | ||
9b3bb7ab | 684 | down_write(&zram->init_lock); |
be2d1d56 | 685 | if (init_done(zram)) { |
9b3bb7ab | 686 | pr_info("Cannot change disksize for initialized device\n"); |
b7ca232e | 687 | err = -EBUSY; |
fcfa8d95 | 688 | goto out_destroy_comp; |
9b3bb7ab SS |
689 | } |
690 | ||
b67d1ec1 | 691 | zram->meta = meta; |
d61f98c7 | 692 | zram->comp = comp; |
9b3bb7ab SS |
693 | zram->disksize = disksize; |
694 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | |
9b3bb7ab | 695 | up_write(&zram->init_lock); |
b4c5c609 MK |
696 | |
697 | /* | |
698 | * Revalidate disk out of the init_lock to avoid lockdep splat. | |
699 | * It's okay because disk's capacity is protected by init_lock | |
700 | * so that revalidate_disk always sees up-to-date capacity. | |
701 | */ | |
702 | revalidate_disk(zram->disk); | |
703 | ||
9b3bb7ab | 704 | return len; |
b7ca232e | 705 | |
fcfa8d95 SS |
706 | out_destroy_comp: |
707 | up_write(&zram->init_lock); | |
708 | zcomp_destroy(comp); | |
709 | out_free_meta: | |
b7ca232e SS |
710 | zram_meta_free(meta); |
711 | return err; | |
9b3bb7ab SS |
712 | } |
713 | ||
714 | static ssize_t reset_store(struct device *dev, | |
715 | struct device_attribute *attr, const char *buf, size_t len) | |
716 | { | |
717 | int ret; | |
718 | unsigned short do_reset; | |
719 | struct zram *zram; | |
720 | struct block_device *bdev; | |
721 | ||
722 | zram = dev_to_zram(dev); | |
723 | bdev = bdget_disk(zram->disk, 0); | |
724 | ||
46a51c80 RK |
725 | if (!bdev) |
726 | return -ENOMEM; | |
727 | ||
9b3bb7ab | 728 | /* Do not reset an active device! */ |
1b672224 RK |
729 | if (bdev->bd_holders) { |
730 | ret = -EBUSY; | |
731 | goto out; | |
732 | } | |
9b3bb7ab SS |
733 | |
734 | ret = kstrtou16(buf, 10, &do_reset); | |
735 | if (ret) | |
1b672224 | 736 | goto out; |
9b3bb7ab | 737 | |
1b672224 RK |
738 | if (!do_reset) { |
739 | ret = -EINVAL; | |
740 | goto out; | |
741 | } | |
9b3bb7ab SS |
742 | |
743 | /* Make sure all pending I/O is finished */ | |
46a51c80 | 744 | fsync_bdev(bdev); |
1b672224 | 745 | bdput(bdev); |
9b3bb7ab | 746 | |
2b86ab9c | 747 | zram_reset_device(zram, true); |
9b3bb7ab | 748 | return len; |
1b672224 RK |
749 | |
750 | out: | |
751 | bdput(bdev); | |
752 | return ret; | |
8c921b2b JM |
753 | } |
754 | ||
be257c61 | 755 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
8c921b2b | 756 | { |
7988613b | 757 | int offset; |
8c921b2b | 758 | u32 index; |
7988613b KO |
759 | struct bio_vec bvec; |
760 | struct bvec_iter iter; | |
8c921b2b | 761 | |
4f024f37 KO |
762 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
763 | offset = (bio->bi_iter.bi_sector & | |
764 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | |
8c921b2b | 765 | |
f4659d8e JK |
766 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
767 | zram_bio_discard(zram, index, offset, bio); | |
768 | bio_endio(bio, 0); | |
769 | return; | |
770 | } | |
771 | ||
7988613b | 772 | bio_for_each_segment(bvec, bio, iter) { |
924bd88d JM |
773 | int max_transfer_size = PAGE_SIZE - offset; |
774 | ||
7988613b | 775 | if (bvec.bv_len > max_transfer_size) { |
924bd88d JM |
776 | /* |
777 | * zram_bvec_rw() can only make operation on a single | |
778 | * zram page. Split the bio vector. | |
779 | */ | |
780 | struct bio_vec bv; | |
781 | ||
7988613b | 782 | bv.bv_page = bvec.bv_page; |
924bd88d | 783 | bv.bv_len = max_transfer_size; |
7988613b | 784 | bv.bv_offset = bvec.bv_offset; |
924bd88d | 785 | |
be257c61 | 786 | if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0) |
924bd88d JM |
787 | goto out; |
788 | ||
7988613b | 789 | bv.bv_len = bvec.bv_len - max_transfer_size; |
924bd88d | 790 | bv.bv_offset += max_transfer_size; |
be257c61 | 791 | if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0) |
924bd88d JM |
792 | goto out; |
793 | } else | |
be257c61 | 794 | if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0) |
924bd88d JM |
795 | goto out; |
796 | ||
7988613b | 797 | update_position(&index, &offset, &bvec); |
a1dd52af | 798 | } |
306b0c95 NG |
799 | |
800 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
801 | bio_endio(bio, 0); | |
7d7854b4 | 802 | return; |
306b0c95 NG |
803 | |
804 | out: | |
306b0c95 | 805 | bio_io_error(bio); |
306b0c95 NG |
806 | } |
807 | ||
306b0c95 | 808 | /* |
f1e3cfff | 809 | * Handler function for all zram I/O requests. |
306b0c95 | 810 | */ |
5a7bbad2 | 811 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
306b0c95 | 812 | { |
f1e3cfff | 813 | struct zram *zram = queue->queuedata; |
306b0c95 | 814 | |
0900beae | 815 | down_read(&zram->init_lock); |
be2d1d56 | 816 | if (unlikely(!init_done(zram))) |
3de738cd | 817 | goto error; |
0900beae | 818 | |
f1e3cfff | 819 | if (!valid_io_request(zram, bio)) { |
da5cc7d3 | 820 | atomic64_inc(&zram->stats.invalid_io); |
3de738cd | 821 | goto error; |
6642a67c JM |
822 | } |
823 | ||
be257c61 | 824 | __zram_make_request(zram, bio); |
0900beae | 825 | up_read(&zram->init_lock); |
306b0c95 | 826 | |
b4fdcb02 | 827 | return; |
0900beae | 828 | |
0900beae | 829 | error: |
3de738cd | 830 | up_read(&zram->init_lock); |
0900beae | 831 | bio_io_error(bio); |
306b0c95 NG |
832 | } |
833 | ||
2ccbec05 NG |
834 | static void zram_slot_free_notify(struct block_device *bdev, |
835 | unsigned long index) | |
107c161b | 836 | { |
f1e3cfff | 837 | struct zram *zram; |
f614a9f4 | 838 | struct zram_meta *meta; |
107c161b | 839 | |
f1e3cfff | 840 | zram = bdev->bd_disk->private_data; |
f614a9f4 | 841 | meta = zram->meta; |
a0c516cb | 842 | |
d2d5e762 | 843 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f614a9f4 | 844 | zram_free_page(zram, index); |
d2d5e762 | 845 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f614a9f4 | 846 | atomic64_inc(&zram->stats.notify_free); |
107c161b NG |
847 | } |
848 | ||
f1e3cfff | 849 | static const struct block_device_operations zram_devops = { |
f1e3cfff | 850 | .swap_slot_free_notify = zram_slot_free_notify, |
107c161b | 851 | .owner = THIS_MODULE |
306b0c95 NG |
852 | }; |
853 | ||
9b3bb7ab SS |
854 | static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, |
855 | disksize_show, disksize_store); | |
856 | static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); | |
857 | static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); | |
9b3bb7ab | 858 | static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); |
9b3bb7ab | 859 | static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); |
beca3ec7 SS |
860 | static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR, |
861 | max_comp_streams_show, max_comp_streams_store); | |
e46b8a03 SS |
862 | static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR, |
863 | comp_algorithm_show, comp_algorithm_store); | |
9b3bb7ab | 864 | |
a68eb3b6 SS |
865 | ZRAM_ATTR_RO(num_reads); |
866 | ZRAM_ATTR_RO(num_writes); | |
64447249 SS |
867 | ZRAM_ATTR_RO(failed_reads); |
868 | ZRAM_ATTR_RO(failed_writes); | |
a68eb3b6 SS |
869 | ZRAM_ATTR_RO(invalid_io); |
870 | ZRAM_ATTR_RO(notify_free); | |
871 | ZRAM_ATTR_RO(zero_pages); | |
872 | ZRAM_ATTR_RO(compr_data_size); | |
873 | ||
9b3bb7ab SS |
874 | static struct attribute *zram_disk_attrs[] = { |
875 | &dev_attr_disksize.attr, | |
876 | &dev_attr_initstate.attr, | |
877 | &dev_attr_reset.attr, | |
878 | &dev_attr_num_reads.attr, | |
879 | &dev_attr_num_writes.attr, | |
64447249 SS |
880 | &dev_attr_failed_reads.attr, |
881 | &dev_attr_failed_writes.attr, | |
9b3bb7ab SS |
882 | &dev_attr_invalid_io.attr, |
883 | &dev_attr_notify_free.attr, | |
884 | &dev_attr_zero_pages.attr, | |
885 | &dev_attr_orig_data_size.attr, | |
886 | &dev_attr_compr_data_size.attr, | |
887 | &dev_attr_mem_used_total.attr, | |
beca3ec7 | 888 | &dev_attr_max_comp_streams.attr, |
e46b8a03 | 889 | &dev_attr_comp_algorithm.attr, |
9b3bb7ab SS |
890 | NULL, |
891 | }; | |
892 | ||
893 | static struct attribute_group zram_disk_attr_group = { | |
894 | .attrs = zram_disk_attrs, | |
895 | }; | |
896 | ||
f1e3cfff | 897 | static int create_device(struct zram *zram, int device_id) |
306b0c95 | 898 | { |
39a9b8ac | 899 | int ret = -ENOMEM; |
de1a21a0 | 900 | |
0900beae | 901 | init_rwsem(&zram->init_lock); |
306b0c95 | 902 | |
f1e3cfff NG |
903 | zram->queue = blk_alloc_queue(GFP_KERNEL); |
904 | if (!zram->queue) { | |
306b0c95 NG |
905 | pr_err("Error allocating disk queue for device %d\n", |
906 | device_id); | |
de1a21a0 | 907 | goto out; |
306b0c95 NG |
908 | } |
909 | ||
f1e3cfff NG |
910 | blk_queue_make_request(zram->queue, zram_make_request); |
911 | zram->queue->queuedata = zram; | |
306b0c95 NG |
912 | |
913 | /* gendisk structure */ | |
f1e3cfff NG |
914 | zram->disk = alloc_disk(1); |
915 | if (!zram->disk) { | |
94b8435f | 916 | pr_warn("Error allocating disk structure for device %d\n", |
306b0c95 | 917 | device_id); |
39a9b8ac | 918 | goto out_free_queue; |
306b0c95 NG |
919 | } |
920 | ||
f1e3cfff NG |
921 | zram->disk->major = zram_major; |
922 | zram->disk->first_minor = device_id; | |
923 | zram->disk->fops = &zram_devops; | |
924 | zram->disk->queue = zram->queue; | |
925 | zram->disk->private_data = zram; | |
926 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
306b0c95 | 927 | |
33863c21 | 928 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
f1e3cfff | 929 | set_capacity(zram->disk, 0); |
b67d1ec1 SS |
930 | /* zram devices sort of resembles non-rotational disks */ |
931 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | |
a1dd52af NG |
932 | /* |
933 | * To ensure that we always get PAGE_SIZE aligned | |
934 | * and n*PAGE_SIZED sized I/O requests. | |
935 | */ | |
f1e3cfff | 936 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
7b19b8d4 RJ |
937 | blk_queue_logical_block_size(zram->disk->queue, |
938 | ZRAM_LOGICAL_BLOCK_SIZE); | |
f1e3cfff NG |
939 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
940 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | |
f4659d8e JK |
941 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
942 | zram->disk->queue->limits.max_discard_sectors = UINT_MAX; | |
943 | /* | |
944 | * zram_bio_discard() will clear all logical blocks if logical block | |
945 | * size is identical with physical block size(PAGE_SIZE). But if it is | |
946 | * different, we will skip discarding some parts of logical blocks in | |
947 | * the part of the request range which isn't aligned to physical block | |
948 | * size. So we can't ensure that all discarded logical blocks are | |
949 | * zeroed. | |
950 | */ | |
951 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) | |
952 | zram->disk->queue->limits.discard_zeroes_data = 1; | |
953 | else | |
954 | zram->disk->queue->limits.discard_zeroes_data = 0; | |
955 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); | |
5d83d5a0 | 956 | |
f1e3cfff | 957 | add_disk(zram->disk); |
306b0c95 | 958 | |
33863c21 NG |
959 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
960 | &zram_disk_attr_group); | |
961 | if (ret < 0) { | |
94b8435f | 962 | pr_warn("Error creating sysfs group"); |
39a9b8ac | 963 | goto out_free_disk; |
33863c21 | 964 | } |
e46b8a03 | 965 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
be2d1d56 | 966 | zram->meta = NULL; |
beca3ec7 | 967 | zram->max_comp_streams = 1; |
39a9b8ac | 968 | return 0; |
de1a21a0 | 969 | |
39a9b8ac JL |
970 | out_free_disk: |
971 | del_gendisk(zram->disk); | |
972 | put_disk(zram->disk); | |
973 | out_free_queue: | |
974 | blk_cleanup_queue(zram->queue); | |
de1a21a0 NG |
975 | out: |
976 | return ret; | |
306b0c95 NG |
977 | } |
978 | ||
f1e3cfff | 979 | static void destroy_device(struct zram *zram) |
306b0c95 | 980 | { |
33863c21 NG |
981 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
982 | &zram_disk_attr_group); | |
33863c21 | 983 | |
59d3fe54 RK |
984 | del_gendisk(zram->disk); |
985 | put_disk(zram->disk); | |
306b0c95 | 986 | |
59d3fe54 | 987 | blk_cleanup_queue(zram->queue); |
306b0c95 NG |
988 | } |
989 | ||
f1e3cfff | 990 | static int __init zram_init(void) |
306b0c95 | 991 | { |
de1a21a0 | 992 | int ret, dev_id; |
306b0c95 | 993 | |
5fa5a901 | 994 | if (num_devices > max_num_devices) { |
94b8435f | 995 | pr_warn("Invalid value for num_devices: %u\n", |
5fa5a901 | 996 | num_devices); |
de1a21a0 NG |
997 | ret = -EINVAL; |
998 | goto out; | |
306b0c95 NG |
999 | } |
1000 | ||
f1e3cfff NG |
1001 | zram_major = register_blkdev(0, "zram"); |
1002 | if (zram_major <= 0) { | |
94b8435f | 1003 | pr_warn("Unable to get major number\n"); |
de1a21a0 NG |
1004 | ret = -EBUSY; |
1005 | goto out; | |
306b0c95 NG |
1006 | } |
1007 | ||
306b0c95 | 1008 | /* Allocate the device array and initialize each one */ |
5fa5a901 | 1009 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
43801f6e | 1010 | if (!zram_devices) { |
de1a21a0 NG |
1011 | ret = -ENOMEM; |
1012 | goto unregister; | |
1013 | } | |
306b0c95 | 1014 | |
5fa5a901 | 1015 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
43801f6e | 1016 | ret = create_device(&zram_devices[dev_id], dev_id); |
de1a21a0 | 1017 | if (ret) |
3bf040c7 | 1018 | goto free_devices; |
de1a21a0 NG |
1019 | } |
1020 | ||
ca3d70bd DB |
1021 | pr_info("Created %u device(s) ...\n", num_devices); |
1022 | ||
306b0c95 | 1023 | return 0; |
de1a21a0 | 1024 | |
3bf040c7 | 1025 | free_devices: |
de1a21a0 | 1026 | while (dev_id) |
43801f6e NW |
1027 | destroy_device(&zram_devices[--dev_id]); |
1028 | kfree(zram_devices); | |
de1a21a0 | 1029 | unregister: |
f1e3cfff | 1030 | unregister_blkdev(zram_major, "zram"); |
de1a21a0 | 1031 | out: |
306b0c95 NG |
1032 | return ret; |
1033 | } | |
1034 | ||
f1e3cfff | 1035 | static void __exit zram_exit(void) |
306b0c95 NG |
1036 | { |
1037 | int i; | |
f1e3cfff | 1038 | struct zram *zram; |
306b0c95 | 1039 | |
5fa5a901 | 1040 | for (i = 0; i < num_devices; i++) { |
43801f6e | 1041 | zram = &zram_devices[i]; |
306b0c95 | 1042 | |
f1e3cfff | 1043 | destroy_device(zram); |
2b86ab9c MK |
1044 | /* |
1045 | * Shouldn't access zram->disk after destroy_device | |
1046 | * because destroy_device already released zram->disk. | |
1047 | */ | |
1048 | zram_reset_device(zram, false); | |
306b0c95 NG |
1049 | } |
1050 | ||
f1e3cfff | 1051 | unregister_blkdev(zram_major, "zram"); |
306b0c95 | 1052 | |
43801f6e | 1053 | kfree(zram_devices); |
306b0c95 NG |
1054 | pr_debug("Cleanup done!\n"); |
1055 | } | |
1056 | ||
f1e3cfff NG |
1057 | module_init(zram_init); |
1058 | module_exit(zram_exit); | |
306b0c95 | 1059 | |
9b3bb7ab SS |
1060 | module_param(num_devices, uint, 0); |
1061 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | |
1062 | ||
306b0c95 NG |
1063 | MODULE_LICENSE("Dual BSD/GPL"); |
1064 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
f1e3cfff | 1065 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |