Commit | Line | Data |
---|---|---|
306b0c95 | 1 | /* |
f1e3cfff | 2 | * Compressed RAM block device |
306b0c95 | 3 | * |
1130ebba | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
306b0c95 NG |
5 | * |
6 | * This code is released using a dual license strategy: BSD/GPL | |
7 | * You can choose the licence that better fits your requirements. | |
8 | * | |
9 | * Released under the terms of 3-clause BSD License | |
10 | * Released under the terms of GNU General Public License Version 2.0 | |
11 | * | |
12 | * Project home: http://compcache.googlecode.com | |
13 | */ | |
14 | ||
f1e3cfff | 15 | #define KMSG_COMPONENT "zram" |
306b0c95 NG |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | ||
b1f5b81e RJ |
18 | #ifdef CONFIG_ZRAM_DEBUG |
19 | #define DEBUG | |
20 | #endif | |
21 | ||
306b0c95 NG |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
8946a086 | 24 | #include <linux/bio.h> |
306b0c95 NG |
25 | #include <linux/bitops.h> |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/genhd.h> | |
30 | #include <linux/highmem.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
306b0c95 | 32 | #include <linux/lzo.h> |
306b0c95 | 33 | #include <linux/string.h> |
306b0c95 | 34 | #include <linux/vmalloc.h> |
306b0c95 | 35 | |
16a4bfb9 | 36 | #include "zram_drv.h" |
306b0c95 NG |
37 | |
38 | /* Globals */ | |
f1e3cfff | 39 | static int zram_major; |
0f0e3ba3 | 40 | static struct zram *zram_devices; |
306b0c95 | 41 | |
306b0c95 | 42 | /* Module params (documentation at end) */ |
ca3d70bd | 43 | static unsigned int num_devices = 1; |
33863c21 | 44 | |
9b3bb7ab SS |
45 | static inline struct zram *dev_to_zram(struct device *dev) |
46 | { | |
47 | return (struct zram *)dev_to_disk(dev)->private_data; | |
48 | } | |
49 | ||
50 | static ssize_t disksize_show(struct device *dev, | |
51 | struct device_attribute *attr, char *buf) | |
52 | { | |
53 | struct zram *zram = dev_to_zram(dev); | |
54 | ||
55 | return sprintf(buf, "%llu\n", zram->disksize); | |
56 | } | |
57 | ||
58 | static ssize_t initstate_show(struct device *dev, | |
59 | struct device_attribute *attr, char *buf) | |
60 | { | |
61 | struct zram *zram = dev_to_zram(dev); | |
62 | ||
63 | return sprintf(buf, "%u\n", zram->init_done); | |
64 | } | |
65 | ||
66 | static ssize_t num_reads_show(struct device *dev, | |
67 | struct device_attribute *attr, char *buf) | |
68 | { | |
69 | struct zram *zram = dev_to_zram(dev); | |
70 | ||
71 | return sprintf(buf, "%llu\n", | |
72 | (u64)atomic64_read(&zram->stats.num_reads)); | |
73 | } | |
74 | ||
75 | static ssize_t num_writes_show(struct device *dev, | |
76 | struct device_attribute *attr, char *buf) | |
77 | { | |
78 | struct zram *zram = dev_to_zram(dev); | |
79 | ||
80 | return sprintf(buf, "%llu\n", | |
81 | (u64)atomic64_read(&zram->stats.num_writes)); | |
82 | } | |
83 | ||
84 | static ssize_t invalid_io_show(struct device *dev, | |
85 | struct device_attribute *attr, char *buf) | |
86 | { | |
87 | struct zram *zram = dev_to_zram(dev); | |
88 | ||
89 | return sprintf(buf, "%llu\n", | |
90 | (u64)atomic64_read(&zram->stats.invalid_io)); | |
91 | } | |
92 | ||
93 | static ssize_t notify_free_show(struct device *dev, | |
94 | struct device_attribute *attr, char *buf) | |
95 | { | |
96 | struct zram *zram = dev_to_zram(dev); | |
97 | ||
98 | return sprintf(buf, "%llu\n", | |
99 | (u64)atomic64_read(&zram->stats.notify_free)); | |
100 | } | |
101 | ||
102 | static ssize_t zero_pages_show(struct device *dev, | |
103 | struct device_attribute *attr, char *buf) | |
104 | { | |
105 | struct zram *zram = dev_to_zram(dev); | |
106 | ||
107 | return sprintf(buf, "%u\n", zram->stats.pages_zero); | |
108 | } | |
109 | ||
110 | static ssize_t orig_data_size_show(struct device *dev, | |
111 | struct device_attribute *attr, char *buf) | |
112 | { | |
113 | struct zram *zram = dev_to_zram(dev); | |
114 | ||
115 | return sprintf(buf, "%llu\n", | |
116 | (u64)(zram->stats.pages_stored) << PAGE_SHIFT); | |
117 | } | |
118 | ||
119 | static ssize_t compr_data_size_show(struct device *dev, | |
120 | struct device_attribute *attr, char *buf) | |
121 | { | |
122 | struct zram *zram = dev_to_zram(dev); | |
123 | ||
124 | return sprintf(buf, "%llu\n", | |
125 | (u64)atomic64_read(&zram->stats.compr_size)); | |
126 | } | |
127 | ||
128 | static ssize_t mem_used_total_show(struct device *dev, | |
129 | struct device_attribute *attr, char *buf) | |
130 | { | |
131 | u64 val = 0; | |
132 | struct zram *zram = dev_to_zram(dev); | |
133 | struct zram_meta *meta = zram->meta; | |
134 | ||
135 | down_read(&zram->init_lock); | |
136 | if (zram->init_done) | |
137 | val = zs_get_total_size_bytes(meta->mem_pool); | |
138 | up_read(&zram->init_lock); | |
139 | ||
140 | return sprintf(buf, "%llu\n", val); | |
141 | } | |
142 | ||
8b3cc3ed | 143 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 144 | enum zram_pageflags flag) |
306b0c95 | 145 | { |
8b3cc3ed | 146 | return meta->table[index].flags & BIT(flag); |
306b0c95 NG |
147 | } |
148 | ||
8b3cc3ed | 149 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 150 | enum zram_pageflags flag) |
306b0c95 | 151 | { |
8b3cc3ed | 152 | meta->table[index].flags |= BIT(flag); |
306b0c95 NG |
153 | } |
154 | ||
8b3cc3ed | 155 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 156 | enum zram_pageflags flag) |
306b0c95 | 157 | { |
8b3cc3ed | 158 | meta->table[index].flags &= ~BIT(flag); |
306b0c95 NG |
159 | } |
160 | ||
9b3bb7ab SS |
161 | static inline int is_partial_io(struct bio_vec *bvec) |
162 | { | |
163 | return bvec->bv_len != PAGE_SIZE; | |
164 | } | |
165 | ||
166 | /* | |
167 | * Check if request is within bounds and aligned on zram logical blocks. | |
168 | */ | |
169 | static inline int valid_io_request(struct zram *zram, struct bio *bio) | |
170 | { | |
171 | u64 start, end, bound; | |
a539c72a | 172 | |
9b3bb7ab SS |
173 | /* unaligned request */ |
174 | if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | |
175 | return 0; | |
176 | if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) | |
177 | return 0; | |
178 | ||
179 | start = bio->bi_sector; | |
180 | end = start + (bio->bi_size >> SECTOR_SHIFT); | |
181 | bound = zram->disksize >> SECTOR_SHIFT; | |
182 | /* out of range range */ | |
75c7caf5 | 183 | if (unlikely(start >= bound || end > bound || start > end)) |
9b3bb7ab SS |
184 | return 0; |
185 | ||
186 | /* I/O request is valid */ | |
187 | return 1; | |
188 | } | |
189 | ||
190 | static void zram_meta_free(struct zram_meta *meta) | |
191 | { | |
192 | zs_destroy_pool(meta->mem_pool); | |
193 | kfree(meta->compress_workmem); | |
194 | free_pages((unsigned long)meta->compress_buffer, 1); | |
195 | vfree(meta->table); | |
196 | kfree(meta); | |
197 | } | |
198 | ||
199 | static struct zram_meta *zram_meta_alloc(u64 disksize) | |
200 | { | |
201 | size_t num_pages; | |
202 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); | |
203 | if (!meta) | |
204 | goto out; | |
205 | ||
206 | meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); | |
207 | if (!meta->compress_workmem) | |
208 | goto free_meta; | |
209 | ||
210 | meta->compress_buffer = | |
211 | (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); | |
212 | if (!meta->compress_buffer) { | |
213 | pr_err("Error allocating compressor buffer space\n"); | |
214 | goto free_workmem; | |
215 | } | |
216 | ||
217 | num_pages = disksize >> PAGE_SHIFT; | |
218 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | |
219 | if (!meta->table) { | |
220 | pr_err("Error allocating zram address table\n"); | |
221 | goto free_buffer; | |
222 | } | |
223 | ||
224 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); | |
225 | if (!meta->mem_pool) { | |
226 | pr_err("Error creating memory pool\n"); | |
227 | goto free_table; | |
228 | } | |
229 | ||
230 | return meta; | |
231 | ||
232 | free_table: | |
233 | vfree(meta->table); | |
234 | free_buffer: | |
235 | free_pages((unsigned long)meta->compress_buffer, 1); | |
236 | free_workmem: | |
237 | kfree(meta->compress_workmem); | |
238 | free_meta: | |
239 | kfree(meta); | |
240 | meta = NULL; | |
241 | out: | |
242 | return meta; | |
243 | } | |
244 | ||
245 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | |
246 | { | |
247 | if (*offset + bvec->bv_len >= PAGE_SIZE) | |
248 | (*index)++; | |
249 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | |
250 | } | |
251 | ||
306b0c95 NG |
252 | static int page_zero_filled(void *ptr) |
253 | { | |
254 | unsigned int pos; | |
255 | unsigned long *page; | |
256 | ||
257 | page = (unsigned long *)ptr; | |
258 | ||
259 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | |
260 | if (page[pos]) | |
261 | return 0; | |
262 | } | |
263 | ||
264 | return 1; | |
265 | } | |
266 | ||
9b3bb7ab SS |
267 | static void handle_zero_page(struct bio_vec *bvec) |
268 | { | |
269 | struct page *page = bvec->bv_page; | |
270 | void *user_mem; | |
271 | ||
272 | user_mem = kmap_atomic(page); | |
273 | if (is_partial_io(bvec)) | |
274 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | |
275 | else | |
276 | clear_page(user_mem); | |
277 | kunmap_atomic(user_mem); | |
278 | ||
279 | flush_dcache_page(page); | |
280 | } | |
281 | ||
f1e3cfff | 282 | static void zram_free_page(struct zram *zram, size_t index) |
306b0c95 | 283 | { |
8b3cc3ed MK |
284 | struct zram_meta *meta = zram->meta; |
285 | unsigned long handle = meta->table[index].handle; | |
286 | u16 size = meta->table[index].size; | |
306b0c95 | 287 | |
fd1a30de | 288 | if (unlikely(!handle)) { |
2e882281 NG |
289 | /* |
290 | * No memory is allocated for zero filled pages. | |
291 | * Simply clear zero page flag. | |
292 | */ | |
8b3cc3ed MK |
293 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
294 | zram_clear_flag(meta, index, ZRAM_ZERO); | |
d178a07c | 295 | zram->stats.pages_zero--; |
306b0c95 NG |
296 | } |
297 | return; | |
298 | } | |
299 | ||
130f315a | 300 | if (unlikely(size > max_zpage_size)) |
d178a07c | 301 | zram->stats.bad_compress--; |
306b0c95 | 302 | |
8b3cc3ed | 303 | zs_free(meta->mem_pool, handle); |
306b0c95 | 304 | |
130f315a | 305 | if (size <= PAGE_SIZE / 2) |
d178a07c | 306 | zram->stats.good_compress--; |
306b0c95 | 307 | |
da5cc7d3 | 308 | atomic64_sub(meta->table[index].size, &zram->stats.compr_size); |
d178a07c | 309 | zram->stats.pages_stored--; |
306b0c95 | 310 | |
8b3cc3ed MK |
311 | meta->table[index].handle = 0; |
312 | meta->table[index].size = 0; | |
306b0c95 NG |
313 | } |
314 | ||
37b51fdd | 315 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
306b0c95 | 316 | { |
37b51fdd SS |
317 | int ret = LZO_E_OK; |
318 | size_t clen = PAGE_SIZE; | |
319 | unsigned char *cmem; | |
8b3cc3ed MK |
320 | struct zram_meta *meta = zram->meta; |
321 | unsigned long handle = meta->table[index].handle; | |
306b0c95 | 322 | |
8b3cc3ed | 323 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
42e99bd9 | 324 | clear_page(mem); |
8c921b2b JM |
325 | return 0; |
326 | } | |
306b0c95 | 327 | |
8b3cc3ed MK |
328 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
329 | if (meta->table[index].size == PAGE_SIZE) | |
42e99bd9 | 330 | copy_page(mem, cmem); |
37b51fdd | 331 | else |
8b3cc3ed | 332 | ret = lzo1x_decompress_safe(cmem, meta->table[index].size, |
37b51fdd | 333 | mem, &clen); |
8b3cc3ed | 334 | zs_unmap_object(meta->mem_pool, handle); |
a1dd52af | 335 | |
8c921b2b JM |
336 | /* Should NEVER happen. Return bio error if it does. */ |
337 | if (unlikely(ret != LZO_E_OK)) { | |
338 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); | |
da5cc7d3 | 339 | atomic64_inc(&zram->stats.failed_reads); |
8c921b2b | 340 | return ret; |
a1dd52af | 341 | } |
306b0c95 | 342 | |
8c921b2b | 343 | return 0; |
306b0c95 NG |
344 | } |
345 | ||
37b51fdd SS |
346 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
347 | u32 index, int offset, struct bio *bio) | |
924bd88d JM |
348 | { |
349 | int ret; | |
37b51fdd SS |
350 | struct page *page; |
351 | unsigned char *user_mem, *uncmem = NULL; | |
8b3cc3ed | 352 | struct zram_meta *meta = zram->meta; |
37b51fdd SS |
353 | page = bvec->bv_page; |
354 | ||
8b3cc3ed MK |
355 | if (unlikely(!meta->table[index].handle) || |
356 | zram_test_flag(meta, index, ZRAM_ZERO)) { | |
37b51fdd | 357 | handle_zero_page(bvec); |
924bd88d JM |
358 | return 0; |
359 | } | |
360 | ||
37b51fdd SS |
361 | if (is_partial_io(bvec)) |
362 | /* Use a temporary buffer to decompress the page */ | |
7e5a5104 MK |
363 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
364 | ||
365 | user_mem = kmap_atomic(page); | |
366 | if (!is_partial_io(bvec)) | |
37b51fdd SS |
367 | uncmem = user_mem; |
368 | ||
369 | if (!uncmem) { | |
370 | pr_info("Unable to allocate temp memory\n"); | |
371 | ret = -ENOMEM; | |
372 | goto out_cleanup; | |
373 | } | |
924bd88d | 374 | |
37b51fdd | 375 | ret = zram_decompress_page(zram, uncmem, index); |
924bd88d | 376 | /* Should NEVER happen. Return bio error if it does. */ |
25eeb667 | 377 | if (unlikely(ret != LZO_E_OK)) |
37b51fdd | 378 | goto out_cleanup; |
924bd88d | 379 | |
37b51fdd SS |
380 | if (is_partial_io(bvec)) |
381 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, | |
382 | bvec->bv_len); | |
383 | ||
384 | flush_dcache_page(page); | |
385 | ret = 0; | |
386 | out_cleanup: | |
387 | kunmap_atomic(user_mem); | |
388 | if (is_partial_io(bvec)) | |
389 | kfree(uncmem); | |
390 | return ret; | |
924bd88d JM |
391 | } |
392 | ||
393 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |
394 | int offset) | |
306b0c95 | 395 | { |
397c6066 | 396 | int ret = 0; |
8c921b2b | 397 | size_t clen; |
c2344348 | 398 | unsigned long handle; |
130f315a | 399 | struct page *page; |
924bd88d | 400 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
8b3cc3ed | 401 | struct zram_meta *meta = zram->meta; |
306b0c95 | 402 | |
8c921b2b | 403 | page = bvec->bv_page; |
8b3cc3ed | 404 | src = meta->compress_buffer; |
306b0c95 | 405 | |
924bd88d JM |
406 | if (is_partial_io(bvec)) { |
407 | /* | |
408 | * This is a partial IO. We need to read the full page | |
409 | * before to write the changes. | |
410 | */ | |
7e5a5104 | 411 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
924bd88d | 412 | if (!uncmem) { |
924bd88d JM |
413 | ret = -ENOMEM; |
414 | goto out; | |
415 | } | |
37b51fdd | 416 | ret = zram_decompress_page(zram, uncmem, index); |
397c6066 | 417 | if (ret) |
924bd88d | 418 | goto out; |
924bd88d JM |
419 | } |
420 | ||
ba82fe2e | 421 | user_mem = kmap_atomic(page); |
924bd88d | 422 | |
397c6066 | 423 | if (is_partial_io(bvec)) { |
924bd88d JM |
424 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
425 | bvec->bv_len); | |
397c6066 NG |
426 | kunmap_atomic(user_mem); |
427 | user_mem = NULL; | |
428 | } else { | |
924bd88d | 429 | uncmem = user_mem; |
397c6066 | 430 | } |
924bd88d JM |
431 | |
432 | if (page_zero_filled(uncmem)) { | |
ba82fe2e | 433 | kunmap_atomic(user_mem); |
f40ac2ae SS |
434 | /* Free memory associated with this sector now. */ |
435 | zram_free_page(zram, index); | |
436 | ||
d178a07c | 437 | zram->stats.pages_zero++; |
8b3cc3ed | 438 | zram_set_flag(meta, index, ZRAM_ZERO); |
924bd88d JM |
439 | ret = 0; |
440 | goto out; | |
8c921b2b | 441 | } |
306b0c95 | 442 | |
a0c516cb MK |
443 | /* |
444 | * zram_slot_free_notify could miss free so that let's | |
445 | * double check. | |
446 | */ | |
447 | if (unlikely(meta->table[index].handle || | |
448 | zram_test_flag(meta, index, ZRAM_ZERO))) | |
449 | zram_free_page(zram, index); | |
450 | ||
924bd88d | 451 | ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, |
8b3cc3ed | 452 | meta->compress_workmem); |
306b0c95 | 453 | |
397c6066 NG |
454 | if (!is_partial_io(bvec)) { |
455 | kunmap_atomic(user_mem); | |
456 | user_mem = NULL; | |
457 | uncmem = NULL; | |
458 | } | |
306b0c95 | 459 | |
8c921b2b | 460 | if (unlikely(ret != LZO_E_OK)) { |
8c921b2b | 461 | pr_err("Compression failed! err=%d\n", ret); |
924bd88d | 462 | goto out; |
8c921b2b | 463 | } |
306b0c95 | 464 | |
c8f2f0db | 465 | if (unlikely(clen > max_zpage_size)) { |
d178a07c | 466 | zram->stats.bad_compress++; |
c8f2f0db | 467 | clen = PAGE_SIZE; |
397c6066 NG |
468 | src = NULL; |
469 | if (is_partial_io(bvec)) | |
470 | src = uncmem; | |
c8f2f0db | 471 | } |
a1dd52af | 472 | |
8b3cc3ed | 473 | handle = zs_malloc(meta->mem_pool, clen); |
fd1a30de | 474 | if (!handle) { |
596b3dd4 MR |
475 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", |
476 | index, clen); | |
924bd88d JM |
477 | ret = -ENOMEM; |
478 | goto out; | |
8c921b2b | 479 | } |
8b3cc3ed | 480 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
306b0c95 | 481 | |
42e99bd9 | 482 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
397c6066 | 483 | src = kmap_atomic(page); |
42e99bd9 | 484 | copy_page(cmem, src); |
397c6066 | 485 | kunmap_atomic(src); |
42e99bd9 JL |
486 | } else { |
487 | memcpy(cmem, src, clen); | |
488 | } | |
306b0c95 | 489 | |
8b3cc3ed | 490 | zs_unmap_object(meta->mem_pool, handle); |
fd1a30de | 491 | |
f40ac2ae SS |
492 | /* |
493 | * Free memory associated with this sector | |
494 | * before overwriting unused sectors. | |
495 | */ | |
496 | zram_free_page(zram, index); | |
497 | ||
8b3cc3ed MK |
498 | meta->table[index].handle = handle; |
499 | meta->table[index].size = clen; | |
306b0c95 | 500 | |
8c921b2b | 501 | /* Update stats */ |
da5cc7d3 | 502 | atomic64_add(clen, &zram->stats.compr_size); |
d178a07c | 503 | zram->stats.pages_stored++; |
8c921b2b | 504 | if (clen <= PAGE_SIZE / 2) |
d178a07c | 505 | zram->stats.good_compress++; |
306b0c95 | 506 | |
924bd88d | 507 | out: |
397c6066 NG |
508 | if (is_partial_io(bvec)) |
509 | kfree(uncmem); | |
510 | ||
924bd88d | 511 | if (ret) |
da5cc7d3 | 512 | atomic64_inc(&zram->stats.failed_writes); |
924bd88d | 513 | return ret; |
8c921b2b JM |
514 | } |
515 | ||
a0c516cb MK |
516 | static void handle_pending_slot_free(struct zram *zram) |
517 | { | |
518 | struct zram_slot_free *free_rq; | |
519 | ||
520 | spin_lock(&zram->slot_free_lock); | |
521 | while (zram->slot_free_rq) { | |
522 | free_rq = zram->slot_free_rq; | |
523 | zram->slot_free_rq = free_rq->next; | |
524 | zram_free_page(zram, free_rq->index); | |
525 | kfree(free_rq); | |
526 | } | |
527 | spin_unlock(&zram->slot_free_lock); | |
528 | } | |
529 | ||
8c921b2b | 530 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
924bd88d | 531 | int offset, struct bio *bio, int rw) |
8c921b2b | 532 | { |
c5bde238 | 533 | int ret; |
8c921b2b | 534 | |
c5bde238 JM |
535 | if (rw == READ) { |
536 | down_read(&zram->lock); | |
a0c516cb | 537 | handle_pending_slot_free(zram); |
c5bde238 JM |
538 | ret = zram_bvec_read(zram, bvec, index, offset, bio); |
539 | up_read(&zram->lock); | |
540 | } else { | |
541 | down_write(&zram->lock); | |
a0c516cb | 542 | handle_pending_slot_free(zram); |
c5bde238 JM |
543 | ret = zram_bvec_write(zram, bvec, index, offset); |
544 | up_write(&zram->lock); | |
545 | } | |
546 | ||
547 | return ret; | |
924bd88d JM |
548 | } |
549 | ||
2b86ab9c | 550 | static void zram_reset_device(struct zram *zram, bool reset_capacity) |
924bd88d | 551 | { |
9b3bb7ab SS |
552 | size_t index; |
553 | struct zram_meta *meta; | |
554 | ||
a0c516cb MK |
555 | flush_work(&zram->free_work); |
556 | ||
644d4787 SS |
557 | down_write(&zram->init_lock); |
558 | if (!zram->init_done) { | |
559 | up_write(&zram->init_lock); | |
9b3bb7ab | 560 | return; |
644d4787 | 561 | } |
9b3bb7ab SS |
562 | |
563 | meta = zram->meta; | |
564 | zram->init_done = 0; | |
565 | ||
566 | /* Free all pages that are still in this zram device */ | |
567 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { | |
568 | unsigned long handle = meta->table[index].handle; | |
569 | if (!handle) | |
570 | continue; | |
571 | ||
572 | zs_free(meta->mem_pool, handle); | |
573 | } | |
574 | ||
575 | zram_meta_free(zram->meta); | |
576 | zram->meta = NULL; | |
577 | /* Reset stats */ | |
578 | memset(&zram->stats, 0, sizeof(zram->stats)); | |
579 | ||
580 | zram->disksize = 0; | |
2b86ab9c MK |
581 | if (reset_capacity) |
582 | set_capacity(zram->disk, 0); | |
644d4787 | 583 | up_write(&zram->init_lock); |
9b3bb7ab SS |
584 | } |
585 | ||
586 | static void zram_init_device(struct zram *zram, struct zram_meta *meta) | |
587 | { | |
588 | if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) { | |
589 | pr_info( | |
590 | "There is little point creating a zram of greater than " | |
591 | "twice the size of memory since we expect a 2:1 compression " | |
592 | "ratio. Note that zram uses about 0.1%% of the size of " | |
593 | "the disk when not in use so a huge zram is " | |
594 | "wasteful.\n" | |
595 | "\tMemory Size: %lu kB\n" | |
596 | "\tSize you selected: %llu kB\n" | |
597 | "Continuing anyway ...\n", | |
598 | (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10 | |
599 | ); | |
600 | } | |
601 | ||
602 | /* zram devices sort of resembles non-rotational disks */ | |
603 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | |
604 | ||
605 | zram->meta = meta; | |
606 | zram->init_done = 1; | |
607 | ||
608 | pr_debug("Initialization done!\n"); | |
609 | } | |
610 | ||
611 | static ssize_t disksize_store(struct device *dev, | |
612 | struct device_attribute *attr, const char *buf, size_t len) | |
613 | { | |
614 | u64 disksize; | |
615 | struct zram_meta *meta; | |
616 | struct zram *zram = dev_to_zram(dev); | |
617 | ||
618 | disksize = memparse(buf, NULL); | |
619 | if (!disksize) | |
620 | return -EINVAL; | |
621 | ||
622 | disksize = PAGE_ALIGN(disksize); | |
623 | meta = zram_meta_alloc(disksize); | |
624 | down_write(&zram->init_lock); | |
625 | if (zram->init_done) { | |
626 | up_write(&zram->init_lock); | |
627 | zram_meta_free(meta); | |
628 | pr_info("Cannot change disksize for initialized device\n"); | |
629 | return -EBUSY; | |
630 | } | |
631 | ||
632 | zram->disksize = disksize; | |
633 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | |
634 | zram_init_device(zram, meta); | |
635 | up_write(&zram->init_lock); | |
636 | ||
637 | return len; | |
638 | } | |
639 | ||
640 | static ssize_t reset_store(struct device *dev, | |
641 | struct device_attribute *attr, const char *buf, size_t len) | |
642 | { | |
643 | int ret; | |
644 | unsigned short do_reset; | |
645 | struct zram *zram; | |
646 | struct block_device *bdev; | |
647 | ||
648 | zram = dev_to_zram(dev); | |
649 | bdev = bdget_disk(zram->disk, 0); | |
650 | ||
46a51c80 RK |
651 | if (!bdev) |
652 | return -ENOMEM; | |
653 | ||
9b3bb7ab SS |
654 | /* Do not reset an active device! */ |
655 | if (bdev->bd_holders) | |
656 | return -EBUSY; | |
657 | ||
658 | ret = kstrtou16(buf, 10, &do_reset); | |
659 | if (ret) | |
660 | return ret; | |
661 | ||
662 | if (!do_reset) | |
663 | return -EINVAL; | |
664 | ||
665 | /* Make sure all pending I/O is finished */ | |
46a51c80 | 666 | fsync_bdev(bdev); |
9b3bb7ab | 667 | |
2b86ab9c | 668 | zram_reset_device(zram, true); |
9b3bb7ab | 669 | return len; |
8c921b2b JM |
670 | } |
671 | ||
672 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) | |
673 | { | |
924bd88d | 674 | int i, offset; |
8c921b2b JM |
675 | u32 index; |
676 | struct bio_vec *bvec; | |
677 | ||
678 | switch (rw) { | |
679 | case READ: | |
da5cc7d3 | 680 | atomic64_inc(&zram->stats.num_reads); |
8c921b2b JM |
681 | break; |
682 | case WRITE: | |
da5cc7d3 | 683 | atomic64_inc(&zram->stats.num_writes); |
8c921b2b JM |
684 | break; |
685 | } | |
686 | ||
687 | index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; | |
924bd88d | 688 | offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
8c921b2b JM |
689 | |
690 | bio_for_each_segment(bvec, bio, i) { | |
924bd88d JM |
691 | int max_transfer_size = PAGE_SIZE - offset; |
692 | ||
693 | if (bvec->bv_len > max_transfer_size) { | |
694 | /* | |
695 | * zram_bvec_rw() can only make operation on a single | |
696 | * zram page. Split the bio vector. | |
697 | */ | |
698 | struct bio_vec bv; | |
699 | ||
700 | bv.bv_page = bvec->bv_page; | |
701 | bv.bv_len = max_transfer_size; | |
702 | bv.bv_offset = bvec->bv_offset; | |
703 | ||
704 | if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) | |
705 | goto out; | |
706 | ||
707 | bv.bv_len = bvec->bv_len - max_transfer_size; | |
708 | bv.bv_offset += max_transfer_size; | |
709 | if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) | |
710 | goto out; | |
711 | } else | |
712 | if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) | |
713 | < 0) | |
714 | goto out; | |
715 | ||
716 | update_position(&index, &offset, bvec); | |
a1dd52af | 717 | } |
306b0c95 NG |
718 | |
719 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
720 | bio_endio(bio, 0); | |
7d7854b4 | 721 | return; |
306b0c95 NG |
722 | |
723 | out: | |
306b0c95 | 724 | bio_io_error(bio); |
306b0c95 NG |
725 | } |
726 | ||
306b0c95 | 727 | /* |
f1e3cfff | 728 | * Handler function for all zram I/O requests. |
306b0c95 | 729 | */ |
5a7bbad2 | 730 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
306b0c95 | 731 | { |
f1e3cfff | 732 | struct zram *zram = queue->queuedata; |
306b0c95 | 733 | |
0900beae JM |
734 | down_read(&zram->init_lock); |
735 | if (unlikely(!zram->init_done)) | |
3de738cd | 736 | goto error; |
0900beae | 737 | |
f1e3cfff | 738 | if (!valid_io_request(zram, bio)) { |
da5cc7d3 | 739 | atomic64_inc(&zram->stats.invalid_io); |
3de738cd | 740 | goto error; |
6642a67c JM |
741 | } |
742 | ||
8c921b2b | 743 | __zram_make_request(zram, bio, bio_data_dir(bio)); |
0900beae | 744 | up_read(&zram->init_lock); |
306b0c95 | 745 | |
b4fdcb02 | 746 | return; |
0900beae | 747 | |
0900beae | 748 | error: |
3de738cd | 749 | up_read(&zram->init_lock); |
0900beae | 750 | bio_io_error(bio); |
306b0c95 NG |
751 | } |
752 | ||
a0c516cb MK |
753 | static void zram_slot_free(struct work_struct *work) |
754 | { | |
755 | struct zram *zram; | |
756 | ||
757 | zram = container_of(work, struct zram, free_work); | |
758 | down_write(&zram->lock); | |
759 | handle_pending_slot_free(zram); | |
760 | up_write(&zram->lock); | |
761 | } | |
762 | ||
763 | static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq) | |
764 | { | |
765 | spin_lock(&zram->slot_free_lock); | |
766 | free_rq->next = zram->slot_free_rq; | |
767 | zram->slot_free_rq = free_rq; | |
768 | spin_unlock(&zram->slot_free_lock); | |
769 | } | |
770 | ||
2ccbec05 NG |
771 | static void zram_slot_free_notify(struct block_device *bdev, |
772 | unsigned long index) | |
107c161b | 773 | { |
f1e3cfff | 774 | struct zram *zram; |
a0c516cb | 775 | struct zram_slot_free *free_rq; |
107c161b | 776 | |
f1e3cfff | 777 | zram = bdev->bd_disk->private_data; |
da5cc7d3 | 778 | atomic64_inc(&zram->stats.notify_free); |
a0c516cb MK |
779 | |
780 | free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC); | |
781 | if (!free_rq) | |
782 | return; | |
783 | ||
784 | free_rq->index = index; | |
785 | add_slot_free(zram, free_rq); | |
786 | schedule_work(&zram->free_work); | |
107c161b NG |
787 | } |
788 | ||
f1e3cfff | 789 | static const struct block_device_operations zram_devops = { |
f1e3cfff | 790 | .swap_slot_free_notify = zram_slot_free_notify, |
107c161b | 791 | .owner = THIS_MODULE |
306b0c95 NG |
792 | }; |
793 | ||
9b3bb7ab SS |
794 | static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, |
795 | disksize_show, disksize_store); | |
796 | static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); | |
797 | static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); | |
798 | static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); | |
799 | static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); | |
800 | static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); | |
801 | static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL); | |
802 | static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL); | |
803 | static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); | |
804 | static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL); | |
805 | static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); | |
806 | ||
807 | static struct attribute *zram_disk_attrs[] = { | |
808 | &dev_attr_disksize.attr, | |
809 | &dev_attr_initstate.attr, | |
810 | &dev_attr_reset.attr, | |
811 | &dev_attr_num_reads.attr, | |
812 | &dev_attr_num_writes.attr, | |
813 | &dev_attr_invalid_io.attr, | |
814 | &dev_attr_notify_free.attr, | |
815 | &dev_attr_zero_pages.attr, | |
816 | &dev_attr_orig_data_size.attr, | |
817 | &dev_attr_compr_data_size.attr, | |
818 | &dev_attr_mem_used_total.attr, | |
819 | NULL, | |
820 | }; | |
821 | ||
822 | static struct attribute_group zram_disk_attr_group = { | |
823 | .attrs = zram_disk_attrs, | |
824 | }; | |
825 | ||
f1e3cfff | 826 | static int create_device(struct zram *zram, int device_id) |
306b0c95 | 827 | { |
39a9b8ac | 828 | int ret = -ENOMEM; |
de1a21a0 | 829 | |
c5bde238 | 830 | init_rwsem(&zram->lock); |
0900beae | 831 | init_rwsem(&zram->init_lock); |
306b0c95 | 832 | |
a0c516cb MK |
833 | INIT_WORK(&zram->free_work, zram_slot_free); |
834 | spin_lock_init(&zram->slot_free_lock); | |
835 | zram->slot_free_rq = NULL; | |
836 | ||
f1e3cfff NG |
837 | zram->queue = blk_alloc_queue(GFP_KERNEL); |
838 | if (!zram->queue) { | |
306b0c95 NG |
839 | pr_err("Error allocating disk queue for device %d\n", |
840 | device_id); | |
de1a21a0 | 841 | goto out; |
306b0c95 NG |
842 | } |
843 | ||
f1e3cfff NG |
844 | blk_queue_make_request(zram->queue, zram_make_request); |
845 | zram->queue->queuedata = zram; | |
306b0c95 NG |
846 | |
847 | /* gendisk structure */ | |
f1e3cfff NG |
848 | zram->disk = alloc_disk(1); |
849 | if (!zram->disk) { | |
94b8435f | 850 | pr_warn("Error allocating disk structure for device %d\n", |
306b0c95 | 851 | device_id); |
39a9b8ac | 852 | goto out_free_queue; |
306b0c95 NG |
853 | } |
854 | ||
f1e3cfff NG |
855 | zram->disk->major = zram_major; |
856 | zram->disk->first_minor = device_id; | |
857 | zram->disk->fops = &zram_devops; | |
858 | zram->disk->queue = zram->queue; | |
859 | zram->disk->private_data = zram; | |
860 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
306b0c95 | 861 | |
33863c21 | 862 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
f1e3cfff | 863 | set_capacity(zram->disk, 0); |
5d83d5a0 | 864 | |
a1dd52af NG |
865 | /* |
866 | * To ensure that we always get PAGE_SIZE aligned | |
867 | * and n*PAGE_SIZED sized I/O requests. | |
868 | */ | |
f1e3cfff | 869 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
7b19b8d4 RJ |
870 | blk_queue_logical_block_size(zram->disk->queue, |
871 | ZRAM_LOGICAL_BLOCK_SIZE); | |
f1e3cfff NG |
872 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
873 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | |
5d83d5a0 | 874 | |
f1e3cfff | 875 | add_disk(zram->disk); |
306b0c95 | 876 | |
33863c21 NG |
877 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
878 | &zram_disk_attr_group); | |
879 | if (ret < 0) { | |
94b8435f | 880 | pr_warn("Error creating sysfs group"); |
39a9b8ac | 881 | goto out_free_disk; |
33863c21 | 882 | } |
33863c21 | 883 | |
f1e3cfff | 884 | zram->init_done = 0; |
39a9b8ac | 885 | return 0; |
de1a21a0 | 886 | |
39a9b8ac JL |
887 | out_free_disk: |
888 | del_gendisk(zram->disk); | |
889 | put_disk(zram->disk); | |
890 | out_free_queue: | |
891 | blk_cleanup_queue(zram->queue); | |
de1a21a0 NG |
892 | out: |
893 | return ret; | |
306b0c95 NG |
894 | } |
895 | ||
f1e3cfff | 896 | static void destroy_device(struct zram *zram) |
306b0c95 | 897 | { |
33863c21 NG |
898 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
899 | &zram_disk_attr_group); | |
33863c21 | 900 | |
59d3fe54 RK |
901 | del_gendisk(zram->disk); |
902 | put_disk(zram->disk); | |
306b0c95 | 903 | |
59d3fe54 | 904 | blk_cleanup_queue(zram->queue); |
306b0c95 NG |
905 | } |
906 | ||
f1e3cfff | 907 | static int __init zram_init(void) |
306b0c95 | 908 | { |
de1a21a0 | 909 | int ret, dev_id; |
306b0c95 | 910 | |
5fa5a901 | 911 | if (num_devices > max_num_devices) { |
94b8435f | 912 | pr_warn("Invalid value for num_devices: %u\n", |
5fa5a901 | 913 | num_devices); |
de1a21a0 NG |
914 | ret = -EINVAL; |
915 | goto out; | |
306b0c95 NG |
916 | } |
917 | ||
f1e3cfff NG |
918 | zram_major = register_blkdev(0, "zram"); |
919 | if (zram_major <= 0) { | |
94b8435f | 920 | pr_warn("Unable to get major number\n"); |
de1a21a0 NG |
921 | ret = -EBUSY; |
922 | goto out; | |
306b0c95 NG |
923 | } |
924 | ||
306b0c95 | 925 | /* Allocate the device array and initialize each one */ |
5fa5a901 | 926 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
43801f6e | 927 | if (!zram_devices) { |
de1a21a0 NG |
928 | ret = -ENOMEM; |
929 | goto unregister; | |
930 | } | |
306b0c95 | 931 | |
5fa5a901 | 932 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
43801f6e | 933 | ret = create_device(&zram_devices[dev_id], dev_id); |
de1a21a0 | 934 | if (ret) |
3bf040c7 | 935 | goto free_devices; |
de1a21a0 NG |
936 | } |
937 | ||
ca3d70bd DB |
938 | pr_info("Created %u device(s) ...\n", num_devices); |
939 | ||
306b0c95 | 940 | return 0; |
de1a21a0 | 941 | |
3bf040c7 | 942 | free_devices: |
de1a21a0 | 943 | while (dev_id) |
43801f6e NW |
944 | destroy_device(&zram_devices[--dev_id]); |
945 | kfree(zram_devices); | |
de1a21a0 | 946 | unregister: |
f1e3cfff | 947 | unregister_blkdev(zram_major, "zram"); |
de1a21a0 | 948 | out: |
306b0c95 NG |
949 | return ret; |
950 | } | |
951 | ||
f1e3cfff | 952 | static void __exit zram_exit(void) |
306b0c95 NG |
953 | { |
954 | int i; | |
f1e3cfff | 955 | struct zram *zram; |
306b0c95 | 956 | |
5fa5a901 | 957 | for (i = 0; i < num_devices; i++) { |
43801f6e | 958 | zram = &zram_devices[i]; |
306b0c95 | 959 | |
f1e3cfff | 960 | destroy_device(zram); |
2b86ab9c MK |
961 | /* |
962 | * Shouldn't access zram->disk after destroy_device | |
963 | * because destroy_device already released zram->disk. | |
964 | */ | |
965 | zram_reset_device(zram, false); | |
306b0c95 NG |
966 | } |
967 | ||
f1e3cfff | 968 | unregister_blkdev(zram_major, "zram"); |
306b0c95 | 969 | |
43801f6e | 970 | kfree(zram_devices); |
306b0c95 NG |
971 | pr_debug("Cleanup done!\n"); |
972 | } | |
973 | ||
f1e3cfff NG |
974 | module_init(zram_init); |
975 | module_exit(zram_exit); | |
306b0c95 | 976 | |
9b3bb7ab SS |
977 | module_param(num_devices, uint, 0); |
978 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | |
979 | ||
306b0c95 NG |
980 | MODULE_LICENSE("Dual BSD/GPL"); |
981 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
f1e3cfff | 982 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |