Commit | Line | Data |
---|---|---|
306b0c95 | 1 | /* |
f1e3cfff | 2 | * Compressed RAM block device |
306b0c95 | 3 | * |
1130ebba | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
7bfb3de8 | 5 | * 2012, 2013 Minchan Kim |
306b0c95 NG |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the licence that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | * | |
306b0c95 NG |
13 | */ |
14 | ||
f1e3cfff | 15 | #define KMSG_COMPONENT "zram" |
306b0c95 NG |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | ||
b1f5b81e RJ |
18 | #ifdef CONFIG_ZRAM_DEBUG |
19 | #define DEBUG | |
20 | #endif | |
21 | ||
306b0c95 NG |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
8946a086 | 24 | #include <linux/bio.h> |
306b0c95 NG |
25 | #include <linux/bitops.h> |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/genhd.h> | |
30 | #include <linux/highmem.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
306b0c95 | 32 | #include <linux/string.h> |
306b0c95 | 33 | #include <linux/vmalloc.h> |
fcfa8d95 | 34 | #include <linux/err.h> |
306b0c95 | 35 | |
16a4bfb9 | 36 | #include "zram_drv.h" |
306b0c95 NG |
37 | |
38 | /* Globals */ | |
f1e3cfff | 39 | static int zram_major; |
0f0e3ba3 | 40 | static struct zram *zram_devices; |
b7ca232e | 41 | static const char *default_compressor = "lzo"; |
306b0c95 | 42 | |
306b0c95 | 43 | /* Module params (documentation at end) */ |
ca3d70bd | 44 | static unsigned int num_devices = 1; |
33863c21 | 45 | |
a68eb3b6 | 46 | #define ZRAM_ATTR_RO(name) \ |
083914ea | 47 | static ssize_t name##_show(struct device *d, \ |
a68eb3b6 SS |
48 | struct device_attribute *attr, char *b) \ |
49 | { \ | |
50 | struct zram *zram = dev_to_zram(d); \ | |
56b4e8cb | 51 | return scnprintf(b, PAGE_SIZE, "%llu\n", \ |
a68eb3b6 SS |
52 | (u64)atomic64_read(&zram->stats.name)); \ |
53 | } \ | |
083914ea | 54 | static DEVICE_ATTR_RO(name); |
a68eb3b6 | 55 | |
be2d1d56 SS |
56 | static inline int init_done(struct zram *zram) |
57 | { | |
58 | return zram->meta != NULL; | |
59 | } | |
60 | ||
9b3bb7ab SS |
61 | static inline struct zram *dev_to_zram(struct device *dev) |
62 | { | |
63 | return (struct zram *)dev_to_disk(dev)->private_data; | |
64 | } | |
65 | ||
66 | static ssize_t disksize_show(struct device *dev, | |
67 | struct device_attribute *attr, char *buf) | |
68 | { | |
69 | struct zram *zram = dev_to_zram(dev); | |
70 | ||
56b4e8cb | 71 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); |
9b3bb7ab SS |
72 | } |
73 | ||
74 | static ssize_t initstate_show(struct device *dev, | |
75 | struct device_attribute *attr, char *buf) | |
76 | { | |
a68eb3b6 | 77 | u32 val; |
9b3bb7ab SS |
78 | struct zram *zram = dev_to_zram(dev); |
79 | ||
a68eb3b6 SS |
80 | down_read(&zram->init_lock); |
81 | val = init_done(zram); | |
82 | up_read(&zram->init_lock); | |
9b3bb7ab | 83 | |
56b4e8cb | 84 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
9b3bb7ab SS |
85 | } |
86 | ||
87 | static ssize_t orig_data_size_show(struct device *dev, | |
88 | struct device_attribute *attr, char *buf) | |
89 | { | |
90 | struct zram *zram = dev_to_zram(dev); | |
91 | ||
56b4e8cb | 92 | return scnprintf(buf, PAGE_SIZE, "%llu\n", |
90a7806e | 93 | (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); |
9b3bb7ab SS |
94 | } |
95 | ||
9b3bb7ab SS |
96 | static ssize_t mem_used_total_show(struct device *dev, |
97 | struct device_attribute *attr, char *buf) | |
98 | { | |
99 | u64 val = 0; | |
100 | struct zram *zram = dev_to_zram(dev); | |
9b3bb7ab SS |
101 | |
102 | down_read(&zram->init_lock); | |
5a99e95b WY |
103 | if (init_done(zram)) { |
104 | struct zram_meta *meta = zram->meta; | |
722cdc17 | 105 | val = zs_get_total_pages(meta->mem_pool); |
5a99e95b | 106 | } |
9b3bb7ab SS |
107 | up_read(&zram->init_lock); |
108 | ||
722cdc17 | 109 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
9b3bb7ab SS |
110 | } |
111 | ||
beca3ec7 SS |
112 | static ssize_t max_comp_streams_show(struct device *dev, |
113 | struct device_attribute *attr, char *buf) | |
114 | { | |
115 | int val; | |
116 | struct zram *zram = dev_to_zram(dev); | |
117 | ||
118 | down_read(&zram->init_lock); | |
119 | val = zram->max_comp_streams; | |
120 | up_read(&zram->init_lock); | |
121 | ||
56b4e8cb | 122 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); |
beca3ec7 SS |
123 | } |
124 | ||
9ada9da9 MK |
125 | static ssize_t mem_limit_show(struct device *dev, |
126 | struct device_attribute *attr, char *buf) | |
127 | { | |
128 | u64 val; | |
129 | struct zram *zram = dev_to_zram(dev); | |
130 | ||
131 | down_read(&zram->init_lock); | |
132 | val = zram->limit_pages; | |
133 | up_read(&zram->init_lock); | |
134 | ||
135 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
136 | } | |
137 | ||
138 | static ssize_t mem_limit_store(struct device *dev, | |
139 | struct device_attribute *attr, const char *buf, size_t len) | |
140 | { | |
141 | u64 limit; | |
142 | char *tmp; | |
143 | struct zram *zram = dev_to_zram(dev); | |
144 | ||
145 | limit = memparse(buf, &tmp); | |
146 | if (buf == tmp) /* no chars parsed, invalid input */ | |
147 | return -EINVAL; | |
148 | ||
149 | down_write(&zram->init_lock); | |
150 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; | |
151 | up_write(&zram->init_lock); | |
152 | ||
153 | return len; | |
154 | } | |
155 | ||
461a8eee MK |
156 | static ssize_t mem_used_max_show(struct device *dev, |
157 | struct device_attribute *attr, char *buf) | |
158 | { | |
159 | u64 val = 0; | |
160 | struct zram *zram = dev_to_zram(dev); | |
161 | ||
162 | down_read(&zram->init_lock); | |
163 | if (init_done(zram)) | |
164 | val = atomic_long_read(&zram->stats.max_used_pages); | |
165 | up_read(&zram->init_lock); | |
166 | ||
167 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
168 | } | |
169 | ||
170 | static ssize_t mem_used_max_store(struct device *dev, | |
171 | struct device_attribute *attr, const char *buf, size_t len) | |
172 | { | |
173 | int err; | |
174 | unsigned long val; | |
175 | struct zram *zram = dev_to_zram(dev); | |
461a8eee MK |
176 | |
177 | err = kstrtoul(buf, 10, &val); | |
178 | if (err || val != 0) | |
179 | return -EINVAL; | |
180 | ||
181 | down_read(&zram->init_lock); | |
5a99e95b WY |
182 | if (init_done(zram)) { |
183 | struct zram_meta *meta = zram->meta; | |
461a8eee MK |
184 | atomic_long_set(&zram->stats.max_used_pages, |
185 | zs_get_total_pages(meta->mem_pool)); | |
5a99e95b | 186 | } |
461a8eee MK |
187 | up_read(&zram->init_lock); |
188 | ||
189 | return len; | |
190 | } | |
191 | ||
beca3ec7 SS |
192 | static ssize_t max_comp_streams_store(struct device *dev, |
193 | struct device_attribute *attr, const char *buf, size_t len) | |
194 | { | |
195 | int num; | |
196 | struct zram *zram = dev_to_zram(dev); | |
60a726e3 | 197 | int ret; |
beca3ec7 | 198 | |
60a726e3 MK |
199 | ret = kstrtoint(buf, 0, &num); |
200 | if (ret < 0) | |
201 | return ret; | |
beca3ec7 SS |
202 | if (num < 1) |
203 | return -EINVAL; | |
60a726e3 | 204 | |
beca3ec7 SS |
205 | down_write(&zram->init_lock); |
206 | if (init_done(zram)) { | |
60a726e3 | 207 | if (!zcomp_set_max_streams(zram->comp, num)) { |
fe8eb122 | 208 | pr_info("Cannot change max compression streams\n"); |
60a726e3 MK |
209 | ret = -EINVAL; |
210 | goto out; | |
211 | } | |
beca3ec7 | 212 | } |
60a726e3 | 213 | |
beca3ec7 | 214 | zram->max_comp_streams = num; |
60a726e3 MK |
215 | ret = len; |
216 | out: | |
beca3ec7 | 217 | up_write(&zram->init_lock); |
60a726e3 | 218 | return ret; |
beca3ec7 SS |
219 | } |
220 | ||
e46b8a03 SS |
221 | static ssize_t comp_algorithm_show(struct device *dev, |
222 | struct device_attribute *attr, char *buf) | |
223 | { | |
224 | size_t sz; | |
225 | struct zram *zram = dev_to_zram(dev); | |
226 | ||
227 | down_read(&zram->init_lock); | |
228 | sz = zcomp_available_show(zram->compressor, buf); | |
229 | up_read(&zram->init_lock); | |
230 | ||
231 | return sz; | |
232 | } | |
233 | ||
234 | static ssize_t comp_algorithm_store(struct device *dev, | |
235 | struct device_attribute *attr, const char *buf, size_t len) | |
236 | { | |
237 | struct zram *zram = dev_to_zram(dev); | |
238 | down_write(&zram->init_lock); | |
239 | if (init_done(zram)) { | |
240 | up_write(&zram->init_lock); | |
241 | pr_info("Can't change algorithm for initialized device\n"); | |
242 | return -EBUSY; | |
243 | } | |
244 | strlcpy(zram->compressor, buf, sizeof(zram->compressor)); | |
245 | up_write(&zram->init_lock); | |
246 | return len; | |
247 | } | |
248 | ||
92967471 | 249 | /* flag operations needs meta->tb_lock */ |
8b3cc3ed | 250 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 251 | enum zram_pageflags flag) |
306b0c95 | 252 | { |
d2d5e762 | 253 | return meta->table[index].value & BIT(flag); |
306b0c95 NG |
254 | } |
255 | ||
8b3cc3ed | 256 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 257 | enum zram_pageflags flag) |
306b0c95 | 258 | { |
d2d5e762 | 259 | meta->table[index].value |= BIT(flag); |
306b0c95 NG |
260 | } |
261 | ||
8b3cc3ed | 262 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 263 | enum zram_pageflags flag) |
306b0c95 | 264 | { |
d2d5e762 WY |
265 | meta->table[index].value &= ~BIT(flag); |
266 | } | |
267 | ||
268 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) | |
269 | { | |
270 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | |
271 | } | |
272 | ||
273 | static void zram_set_obj_size(struct zram_meta *meta, | |
274 | u32 index, size_t size) | |
275 | { | |
276 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; | |
277 | ||
278 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | |
306b0c95 NG |
279 | } |
280 | ||
9b3bb7ab SS |
281 | static inline int is_partial_io(struct bio_vec *bvec) |
282 | { | |
283 | return bvec->bv_len != PAGE_SIZE; | |
284 | } | |
285 | ||
286 | /* | |
287 | * Check if request is within bounds and aligned on zram logical blocks. | |
288 | */ | |
54850e73 | 289 | static inline int valid_io_request(struct zram *zram, |
290 | sector_t start, unsigned int size) | |
9b3bb7ab | 291 | { |
54850e73 | 292 | u64 end, bound; |
a539c72a | 293 | |
9b3bb7ab | 294 | /* unaligned request */ |
54850e73 | 295 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) |
9b3bb7ab | 296 | return 0; |
54850e73 | 297 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
9b3bb7ab SS |
298 | return 0; |
299 | ||
54850e73 | 300 | end = start + (size >> SECTOR_SHIFT); |
9b3bb7ab SS |
301 | bound = zram->disksize >> SECTOR_SHIFT; |
302 | /* out of range range */ | |
75c7caf5 | 303 | if (unlikely(start >= bound || end > bound || start > end)) |
9b3bb7ab SS |
304 | return 0; |
305 | ||
306 | /* I/O request is valid */ | |
307 | return 1; | |
308 | } | |
309 | ||
310 | static void zram_meta_free(struct zram_meta *meta) | |
311 | { | |
312 | zs_destroy_pool(meta->mem_pool); | |
9b3bb7ab SS |
313 | vfree(meta->table); |
314 | kfree(meta); | |
315 | } | |
316 | ||
317 | static struct zram_meta *zram_meta_alloc(u64 disksize) | |
318 | { | |
319 | size_t num_pages; | |
320 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); | |
b8179958 | 321 | |
9b3bb7ab | 322 | if (!meta) |
b8179958 | 323 | return NULL; |
9b3bb7ab | 324 | |
9b3bb7ab SS |
325 | num_pages = disksize >> PAGE_SHIFT; |
326 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | |
327 | if (!meta->table) { | |
328 | pr_err("Error allocating zram address table\n"); | |
b8179958 | 329 | goto out_error; |
9b3bb7ab SS |
330 | } |
331 | ||
332 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); | |
333 | if (!meta->mem_pool) { | |
334 | pr_err("Error creating memory pool\n"); | |
b8179958 | 335 | goto out_error; |
9b3bb7ab SS |
336 | } |
337 | ||
338 | return meta; | |
339 | ||
b8179958 | 340 | out_error: |
9b3bb7ab | 341 | vfree(meta->table); |
9b3bb7ab | 342 | kfree(meta); |
b8179958 | 343 | return NULL; |
9b3bb7ab SS |
344 | } |
345 | ||
346 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | |
347 | { | |
348 | if (*offset + bvec->bv_len >= PAGE_SIZE) | |
349 | (*index)++; | |
350 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | |
351 | } | |
352 | ||
306b0c95 NG |
353 | static int page_zero_filled(void *ptr) |
354 | { | |
355 | unsigned int pos; | |
356 | unsigned long *page; | |
357 | ||
358 | page = (unsigned long *)ptr; | |
359 | ||
360 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | |
361 | if (page[pos]) | |
362 | return 0; | |
363 | } | |
364 | ||
365 | return 1; | |
366 | } | |
367 | ||
9b3bb7ab SS |
368 | static void handle_zero_page(struct bio_vec *bvec) |
369 | { | |
370 | struct page *page = bvec->bv_page; | |
371 | void *user_mem; | |
372 | ||
373 | user_mem = kmap_atomic(page); | |
374 | if (is_partial_io(bvec)) | |
375 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | |
376 | else | |
377 | clear_page(user_mem); | |
378 | kunmap_atomic(user_mem); | |
379 | ||
380 | flush_dcache_page(page); | |
381 | } | |
382 | ||
d2d5e762 WY |
383 | |
384 | /* | |
385 | * To protect concurrent access to the same index entry, | |
386 | * caller should hold this table index entry's bit_spinlock to | |
387 | * indicate this index entry is accessing. | |
388 | */ | |
f1e3cfff | 389 | static void zram_free_page(struct zram *zram, size_t index) |
306b0c95 | 390 | { |
8b3cc3ed MK |
391 | struct zram_meta *meta = zram->meta; |
392 | unsigned long handle = meta->table[index].handle; | |
306b0c95 | 393 | |
fd1a30de | 394 | if (unlikely(!handle)) { |
2e882281 NG |
395 | /* |
396 | * No memory is allocated for zero filled pages. | |
397 | * Simply clear zero page flag. | |
398 | */ | |
8b3cc3ed MK |
399 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
400 | zram_clear_flag(meta, index, ZRAM_ZERO); | |
90a7806e | 401 | atomic64_dec(&zram->stats.zero_pages); |
306b0c95 NG |
402 | } |
403 | return; | |
404 | } | |
405 | ||
8b3cc3ed | 406 | zs_free(meta->mem_pool, handle); |
306b0c95 | 407 | |
d2d5e762 WY |
408 | atomic64_sub(zram_get_obj_size(meta, index), |
409 | &zram->stats.compr_data_size); | |
90a7806e | 410 | atomic64_dec(&zram->stats.pages_stored); |
306b0c95 | 411 | |
8b3cc3ed | 412 | meta->table[index].handle = 0; |
d2d5e762 | 413 | zram_set_obj_size(meta, index, 0); |
306b0c95 NG |
414 | } |
415 | ||
37b51fdd | 416 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
306b0c95 | 417 | { |
b7ca232e | 418 | int ret = 0; |
37b51fdd | 419 | unsigned char *cmem; |
8b3cc3ed | 420 | struct zram_meta *meta = zram->meta; |
92967471 | 421 | unsigned long handle; |
023b409f | 422 | size_t size; |
92967471 | 423 | |
d2d5e762 | 424 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
92967471 | 425 | handle = meta->table[index].handle; |
d2d5e762 | 426 | size = zram_get_obj_size(meta, index); |
306b0c95 | 427 | |
8b3cc3ed | 428 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
d2d5e762 | 429 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
42e99bd9 | 430 | clear_page(mem); |
8c921b2b JM |
431 | return 0; |
432 | } | |
306b0c95 | 433 | |
8b3cc3ed | 434 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
92967471 | 435 | if (size == PAGE_SIZE) |
42e99bd9 | 436 | copy_page(mem, cmem); |
37b51fdd | 437 | else |
b7ca232e | 438 | ret = zcomp_decompress(zram->comp, cmem, size, mem); |
8b3cc3ed | 439 | zs_unmap_object(meta->mem_pool, handle); |
d2d5e762 | 440 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
a1dd52af | 441 | |
8c921b2b | 442 | /* Should NEVER happen. Return bio error if it does. */ |
b7ca232e | 443 | if (unlikely(ret)) { |
8c921b2b | 444 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
8c921b2b | 445 | return ret; |
a1dd52af | 446 | } |
306b0c95 | 447 | |
8c921b2b | 448 | return 0; |
306b0c95 NG |
449 | } |
450 | ||
37b51fdd | 451 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
b627cff3 | 452 | u32 index, int offset) |
924bd88d JM |
453 | { |
454 | int ret; | |
37b51fdd SS |
455 | struct page *page; |
456 | unsigned char *user_mem, *uncmem = NULL; | |
8b3cc3ed | 457 | struct zram_meta *meta = zram->meta; |
37b51fdd SS |
458 | page = bvec->bv_page; |
459 | ||
d2d5e762 | 460 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
8b3cc3ed MK |
461 | if (unlikely(!meta->table[index].handle) || |
462 | zram_test_flag(meta, index, ZRAM_ZERO)) { | |
d2d5e762 | 463 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
37b51fdd | 464 | handle_zero_page(bvec); |
924bd88d JM |
465 | return 0; |
466 | } | |
d2d5e762 | 467 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
924bd88d | 468 | |
37b51fdd SS |
469 | if (is_partial_io(bvec)) |
470 | /* Use a temporary buffer to decompress the page */ | |
7e5a5104 MK |
471 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
472 | ||
473 | user_mem = kmap_atomic(page); | |
474 | if (!is_partial_io(bvec)) | |
37b51fdd SS |
475 | uncmem = user_mem; |
476 | ||
477 | if (!uncmem) { | |
478 | pr_info("Unable to allocate temp memory\n"); | |
479 | ret = -ENOMEM; | |
480 | goto out_cleanup; | |
481 | } | |
924bd88d | 482 | |
37b51fdd | 483 | ret = zram_decompress_page(zram, uncmem, index); |
924bd88d | 484 | /* Should NEVER happen. Return bio error if it does. */ |
b7ca232e | 485 | if (unlikely(ret)) |
37b51fdd | 486 | goto out_cleanup; |
924bd88d | 487 | |
37b51fdd SS |
488 | if (is_partial_io(bvec)) |
489 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, | |
490 | bvec->bv_len); | |
491 | ||
492 | flush_dcache_page(page); | |
493 | ret = 0; | |
494 | out_cleanup: | |
495 | kunmap_atomic(user_mem); | |
496 | if (is_partial_io(bvec)) | |
497 | kfree(uncmem); | |
498 | return ret; | |
924bd88d JM |
499 | } |
500 | ||
461a8eee MK |
501 | static inline void update_used_max(struct zram *zram, |
502 | const unsigned long pages) | |
503 | { | |
504 | int old_max, cur_max; | |
505 | ||
506 | old_max = atomic_long_read(&zram->stats.max_used_pages); | |
507 | ||
508 | do { | |
509 | cur_max = old_max; | |
510 | if (pages > cur_max) | |
511 | old_max = atomic_long_cmpxchg( | |
512 | &zram->stats.max_used_pages, cur_max, pages); | |
513 | } while (old_max != cur_max); | |
514 | } | |
515 | ||
924bd88d JM |
516 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
517 | int offset) | |
306b0c95 | 518 | { |
397c6066 | 519 | int ret = 0; |
8c921b2b | 520 | size_t clen; |
c2344348 | 521 | unsigned long handle; |
130f315a | 522 | struct page *page; |
924bd88d | 523 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
8b3cc3ed | 524 | struct zram_meta *meta = zram->meta; |
b7ca232e | 525 | struct zcomp_strm *zstrm; |
e46e3315 | 526 | bool locked = false; |
461a8eee | 527 | unsigned long alloced_pages; |
306b0c95 | 528 | |
8c921b2b | 529 | page = bvec->bv_page; |
924bd88d JM |
530 | if (is_partial_io(bvec)) { |
531 | /* | |
532 | * This is a partial IO. We need to read the full page | |
533 | * before to write the changes. | |
534 | */ | |
7e5a5104 | 535 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
924bd88d | 536 | if (!uncmem) { |
924bd88d JM |
537 | ret = -ENOMEM; |
538 | goto out; | |
539 | } | |
37b51fdd | 540 | ret = zram_decompress_page(zram, uncmem, index); |
397c6066 | 541 | if (ret) |
924bd88d | 542 | goto out; |
924bd88d JM |
543 | } |
544 | ||
b7ca232e | 545 | zstrm = zcomp_strm_find(zram->comp); |
e46e3315 | 546 | locked = true; |
ba82fe2e | 547 | user_mem = kmap_atomic(page); |
924bd88d | 548 | |
397c6066 | 549 | if (is_partial_io(bvec)) { |
924bd88d JM |
550 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
551 | bvec->bv_len); | |
397c6066 NG |
552 | kunmap_atomic(user_mem); |
553 | user_mem = NULL; | |
554 | } else { | |
924bd88d | 555 | uncmem = user_mem; |
397c6066 | 556 | } |
924bd88d JM |
557 | |
558 | if (page_zero_filled(uncmem)) { | |
c4065152 WY |
559 | if (user_mem) |
560 | kunmap_atomic(user_mem); | |
f40ac2ae | 561 | /* Free memory associated with this sector now. */ |
d2d5e762 | 562 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae | 563 | zram_free_page(zram, index); |
92967471 | 564 | zram_set_flag(meta, index, ZRAM_ZERO); |
d2d5e762 | 565 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae | 566 | |
90a7806e | 567 | atomic64_inc(&zram->stats.zero_pages); |
924bd88d JM |
568 | ret = 0; |
569 | goto out; | |
8c921b2b | 570 | } |
306b0c95 | 571 | |
b7ca232e | 572 | ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); |
397c6066 NG |
573 | if (!is_partial_io(bvec)) { |
574 | kunmap_atomic(user_mem); | |
575 | user_mem = NULL; | |
576 | uncmem = NULL; | |
577 | } | |
306b0c95 | 578 | |
b7ca232e | 579 | if (unlikely(ret)) { |
8c921b2b | 580 | pr_err("Compression failed! err=%d\n", ret); |
924bd88d | 581 | goto out; |
8c921b2b | 582 | } |
b7ca232e | 583 | src = zstrm->buffer; |
c8f2f0db | 584 | if (unlikely(clen > max_zpage_size)) { |
c8f2f0db | 585 | clen = PAGE_SIZE; |
397c6066 NG |
586 | if (is_partial_io(bvec)) |
587 | src = uncmem; | |
c8f2f0db | 588 | } |
a1dd52af | 589 | |
8b3cc3ed | 590 | handle = zs_malloc(meta->mem_pool, clen); |
fd1a30de | 591 | if (!handle) { |
596b3dd4 MR |
592 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", |
593 | index, clen); | |
924bd88d JM |
594 | ret = -ENOMEM; |
595 | goto out; | |
8c921b2b | 596 | } |
9ada9da9 | 597 | |
461a8eee MK |
598 | alloced_pages = zs_get_total_pages(meta->mem_pool); |
599 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { | |
9ada9da9 MK |
600 | zs_free(meta->mem_pool, handle); |
601 | ret = -ENOMEM; | |
602 | goto out; | |
603 | } | |
604 | ||
461a8eee MK |
605 | update_used_max(zram, alloced_pages); |
606 | ||
8b3cc3ed | 607 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
306b0c95 | 608 | |
42e99bd9 | 609 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
397c6066 | 610 | src = kmap_atomic(page); |
42e99bd9 | 611 | copy_page(cmem, src); |
397c6066 | 612 | kunmap_atomic(src); |
42e99bd9 JL |
613 | } else { |
614 | memcpy(cmem, src, clen); | |
615 | } | |
306b0c95 | 616 | |
b7ca232e SS |
617 | zcomp_strm_release(zram->comp, zstrm); |
618 | locked = false; | |
8b3cc3ed | 619 | zs_unmap_object(meta->mem_pool, handle); |
fd1a30de | 620 | |
f40ac2ae SS |
621 | /* |
622 | * Free memory associated with this sector | |
623 | * before overwriting unused sectors. | |
624 | */ | |
d2d5e762 | 625 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae SS |
626 | zram_free_page(zram, index); |
627 | ||
8b3cc3ed | 628 | meta->table[index].handle = handle; |
d2d5e762 WY |
629 | zram_set_obj_size(meta, index, clen); |
630 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
306b0c95 | 631 | |
8c921b2b | 632 | /* Update stats */ |
90a7806e SS |
633 | atomic64_add(clen, &zram->stats.compr_data_size); |
634 | atomic64_inc(&zram->stats.pages_stored); | |
924bd88d | 635 | out: |
e46e3315 | 636 | if (locked) |
b7ca232e | 637 | zcomp_strm_release(zram->comp, zstrm); |
397c6066 NG |
638 | if (is_partial_io(bvec)) |
639 | kfree(uncmem); | |
924bd88d | 640 | return ret; |
8c921b2b JM |
641 | } |
642 | ||
643 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |
b627cff3 | 644 | int offset, int rw) |
8c921b2b | 645 | { |
c5bde238 | 646 | int ret; |
8c921b2b | 647 | |
be257c61 SS |
648 | if (rw == READ) { |
649 | atomic64_inc(&zram->stats.num_reads); | |
b627cff3 | 650 | ret = zram_bvec_read(zram, bvec, index, offset); |
be257c61 SS |
651 | } else { |
652 | atomic64_inc(&zram->stats.num_writes); | |
c5bde238 | 653 | ret = zram_bvec_write(zram, bvec, index, offset); |
be257c61 | 654 | } |
c5bde238 | 655 | |
0cf1e9d6 CY |
656 | if (unlikely(ret)) { |
657 | if (rw == READ) | |
658 | atomic64_inc(&zram->stats.failed_reads); | |
659 | else | |
660 | atomic64_inc(&zram->stats.failed_writes); | |
661 | } | |
662 | ||
c5bde238 | 663 | return ret; |
924bd88d JM |
664 | } |
665 | ||
f4659d8e JK |
666 | /* |
667 | * zram_bio_discard - handler on discard request | |
668 | * @index: physical block index in PAGE_SIZE units | |
669 | * @offset: byte offset within physical block | |
670 | */ | |
671 | static void zram_bio_discard(struct zram *zram, u32 index, | |
672 | int offset, struct bio *bio) | |
673 | { | |
674 | size_t n = bio->bi_iter.bi_size; | |
d2d5e762 | 675 | struct zram_meta *meta = zram->meta; |
f4659d8e JK |
676 | |
677 | /* | |
678 | * zram manages data in physical block size units. Because logical block | |
679 | * size isn't identical with physical block size on some arch, we | |
680 | * could get a discard request pointing to a specific offset within a | |
681 | * certain physical block. Although we can handle this request by | |
682 | * reading that physiclal block and decompressing and partially zeroing | |
683 | * and re-compressing and then re-storing it, this isn't reasonable | |
684 | * because our intent with a discard request is to save memory. So | |
685 | * skipping this logical block is appropriate here. | |
686 | */ | |
687 | if (offset) { | |
38515c73 | 688 | if (n <= (PAGE_SIZE - offset)) |
f4659d8e JK |
689 | return; |
690 | ||
38515c73 | 691 | n -= (PAGE_SIZE - offset); |
f4659d8e JK |
692 | index++; |
693 | } | |
694 | ||
695 | while (n >= PAGE_SIZE) { | |
d2d5e762 | 696 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f4659d8e | 697 | zram_free_page(zram, index); |
d2d5e762 | 698 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
015254da | 699 | atomic64_inc(&zram->stats.notify_free); |
f4659d8e JK |
700 | index++; |
701 | n -= PAGE_SIZE; | |
702 | } | |
703 | } | |
704 | ||
2b86ab9c | 705 | static void zram_reset_device(struct zram *zram, bool reset_capacity) |
924bd88d | 706 | { |
9b3bb7ab SS |
707 | size_t index; |
708 | struct zram_meta *meta; | |
709 | ||
644d4787 | 710 | down_write(&zram->init_lock); |
9ada9da9 MK |
711 | |
712 | zram->limit_pages = 0; | |
713 | ||
be2d1d56 | 714 | if (!init_done(zram)) { |
644d4787 | 715 | up_write(&zram->init_lock); |
9b3bb7ab | 716 | return; |
644d4787 | 717 | } |
9b3bb7ab SS |
718 | |
719 | meta = zram->meta; | |
9b3bb7ab SS |
720 | /* Free all pages that are still in this zram device */ |
721 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { | |
722 | unsigned long handle = meta->table[index].handle; | |
723 | if (!handle) | |
724 | continue; | |
725 | ||
726 | zs_free(meta->mem_pool, handle); | |
727 | } | |
728 | ||
b7ca232e | 729 | zcomp_destroy(zram->comp); |
beca3ec7 SS |
730 | zram->max_comp_streams = 1; |
731 | ||
9b3bb7ab SS |
732 | zram_meta_free(zram->meta); |
733 | zram->meta = NULL; | |
734 | /* Reset stats */ | |
735 | memset(&zram->stats, 0, sizeof(zram->stats)); | |
736 | ||
737 | zram->disksize = 0; | |
b4c5c609 | 738 | if (reset_capacity) |
2b86ab9c | 739 | set_capacity(zram->disk, 0); |
b4c5c609 | 740 | |
644d4787 | 741 | up_write(&zram->init_lock); |
b4c5c609 MK |
742 | |
743 | /* | |
744 | * Revalidate disk out of the init_lock to avoid lockdep splat. | |
745 | * It's okay because disk's capacity is protected by init_lock | |
746 | * so that revalidate_disk always sees up-to-date capacity. | |
747 | */ | |
748 | if (reset_capacity) | |
749 | revalidate_disk(zram->disk); | |
9b3bb7ab SS |
750 | } |
751 | ||
9b3bb7ab SS |
752 | static ssize_t disksize_store(struct device *dev, |
753 | struct device_attribute *attr, const char *buf, size_t len) | |
754 | { | |
755 | u64 disksize; | |
d61f98c7 | 756 | struct zcomp *comp; |
9b3bb7ab SS |
757 | struct zram_meta *meta; |
758 | struct zram *zram = dev_to_zram(dev); | |
fcfa8d95 | 759 | int err; |
9b3bb7ab SS |
760 | |
761 | disksize = memparse(buf, NULL); | |
762 | if (!disksize) | |
763 | return -EINVAL; | |
764 | ||
765 | disksize = PAGE_ALIGN(disksize); | |
766 | meta = zram_meta_alloc(disksize); | |
db5d711e MK |
767 | if (!meta) |
768 | return -ENOMEM; | |
b67d1ec1 | 769 | |
d61f98c7 | 770 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); |
fcfa8d95 | 771 | if (IS_ERR(comp)) { |
d61f98c7 SS |
772 | pr_info("Cannot initialise %s compressing backend\n", |
773 | zram->compressor); | |
fcfa8d95 SS |
774 | err = PTR_ERR(comp); |
775 | goto out_free_meta; | |
d61f98c7 SS |
776 | } |
777 | ||
9b3bb7ab | 778 | down_write(&zram->init_lock); |
be2d1d56 | 779 | if (init_done(zram)) { |
9b3bb7ab | 780 | pr_info("Cannot change disksize for initialized device\n"); |
b7ca232e | 781 | err = -EBUSY; |
fcfa8d95 | 782 | goto out_destroy_comp; |
9b3bb7ab SS |
783 | } |
784 | ||
b67d1ec1 | 785 | zram->meta = meta; |
d61f98c7 | 786 | zram->comp = comp; |
9b3bb7ab SS |
787 | zram->disksize = disksize; |
788 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | |
9b3bb7ab | 789 | up_write(&zram->init_lock); |
b4c5c609 MK |
790 | |
791 | /* | |
792 | * Revalidate disk out of the init_lock to avoid lockdep splat. | |
793 | * It's okay because disk's capacity is protected by init_lock | |
794 | * so that revalidate_disk always sees up-to-date capacity. | |
795 | */ | |
796 | revalidate_disk(zram->disk); | |
797 | ||
9b3bb7ab | 798 | return len; |
b7ca232e | 799 | |
fcfa8d95 SS |
800 | out_destroy_comp: |
801 | up_write(&zram->init_lock); | |
802 | zcomp_destroy(comp); | |
803 | out_free_meta: | |
b7ca232e SS |
804 | zram_meta_free(meta); |
805 | return err; | |
9b3bb7ab SS |
806 | } |
807 | ||
808 | static ssize_t reset_store(struct device *dev, | |
809 | struct device_attribute *attr, const char *buf, size_t len) | |
810 | { | |
811 | int ret; | |
812 | unsigned short do_reset; | |
813 | struct zram *zram; | |
814 | struct block_device *bdev; | |
815 | ||
816 | zram = dev_to_zram(dev); | |
817 | bdev = bdget_disk(zram->disk, 0); | |
818 | ||
46a51c80 RK |
819 | if (!bdev) |
820 | return -ENOMEM; | |
821 | ||
9b3bb7ab | 822 | /* Do not reset an active device! */ |
1b672224 RK |
823 | if (bdev->bd_holders) { |
824 | ret = -EBUSY; | |
825 | goto out; | |
826 | } | |
9b3bb7ab SS |
827 | |
828 | ret = kstrtou16(buf, 10, &do_reset); | |
829 | if (ret) | |
1b672224 | 830 | goto out; |
9b3bb7ab | 831 | |
1b672224 RK |
832 | if (!do_reset) { |
833 | ret = -EINVAL; | |
834 | goto out; | |
835 | } | |
9b3bb7ab SS |
836 | |
837 | /* Make sure all pending I/O is finished */ | |
46a51c80 | 838 | fsync_bdev(bdev); |
1b672224 | 839 | bdput(bdev); |
9b3bb7ab | 840 | |
2b86ab9c | 841 | zram_reset_device(zram, true); |
9b3bb7ab | 842 | return len; |
1b672224 RK |
843 | |
844 | out: | |
845 | bdput(bdev); | |
846 | return ret; | |
8c921b2b JM |
847 | } |
848 | ||
be257c61 | 849 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
8c921b2b | 850 | { |
b627cff3 | 851 | int offset, rw; |
8c921b2b | 852 | u32 index; |
7988613b KO |
853 | struct bio_vec bvec; |
854 | struct bvec_iter iter; | |
8c921b2b | 855 | |
4f024f37 KO |
856 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
857 | offset = (bio->bi_iter.bi_sector & | |
858 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | |
8c921b2b | 859 | |
f4659d8e JK |
860 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
861 | zram_bio_discard(zram, index, offset, bio); | |
862 | bio_endio(bio, 0); | |
863 | return; | |
864 | } | |
865 | ||
b627cff3 | 866 | rw = bio_data_dir(bio); |
7988613b | 867 | bio_for_each_segment(bvec, bio, iter) { |
924bd88d JM |
868 | int max_transfer_size = PAGE_SIZE - offset; |
869 | ||
7988613b | 870 | if (bvec.bv_len > max_transfer_size) { |
924bd88d JM |
871 | /* |
872 | * zram_bvec_rw() can only make operation on a single | |
873 | * zram page. Split the bio vector. | |
874 | */ | |
875 | struct bio_vec bv; | |
876 | ||
7988613b | 877 | bv.bv_page = bvec.bv_page; |
924bd88d | 878 | bv.bv_len = max_transfer_size; |
7988613b | 879 | bv.bv_offset = bvec.bv_offset; |
924bd88d | 880 | |
b627cff3 | 881 | if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) |
924bd88d JM |
882 | goto out; |
883 | ||
7988613b | 884 | bv.bv_len = bvec.bv_len - max_transfer_size; |
924bd88d | 885 | bv.bv_offset += max_transfer_size; |
b627cff3 | 886 | if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) |
924bd88d JM |
887 | goto out; |
888 | } else | |
b627cff3 | 889 | if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) |
924bd88d JM |
890 | goto out; |
891 | ||
7988613b | 892 | update_position(&index, &offset, &bvec); |
a1dd52af | 893 | } |
306b0c95 NG |
894 | |
895 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
896 | bio_endio(bio, 0); | |
7d7854b4 | 897 | return; |
306b0c95 NG |
898 | |
899 | out: | |
306b0c95 | 900 | bio_io_error(bio); |
306b0c95 NG |
901 | } |
902 | ||
306b0c95 | 903 | /* |
f1e3cfff | 904 | * Handler function for all zram I/O requests. |
306b0c95 | 905 | */ |
5a7bbad2 | 906 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
306b0c95 | 907 | { |
f1e3cfff | 908 | struct zram *zram = queue->queuedata; |
306b0c95 | 909 | |
0900beae | 910 | down_read(&zram->init_lock); |
be2d1d56 | 911 | if (unlikely(!init_done(zram))) |
3de738cd | 912 | goto error; |
0900beae | 913 | |
54850e73 | 914 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
915 | bio->bi_iter.bi_size)) { | |
da5cc7d3 | 916 | atomic64_inc(&zram->stats.invalid_io); |
3de738cd | 917 | goto error; |
6642a67c JM |
918 | } |
919 | ||
be257c61 | 920 | __zram_make_request(zram, bio); |
0900beae | 921 | up_read(&zram->init_lock); |
306b0c95 | 922 | |
b4fdcb02 | 923 | return; |
0900beae | 924 | |
0900beae | 925 | error: |
3de738cd | 926 | up_read(&zram->init_lock); |
0900beae | 927 | bio_io_error(bio); |
306b0c95 NG |
928 | } |
929 | ||
2ccbec05 NG |
930 | static void zram_slot_free_notify(struct block_device *bdev, |
931 | unsigned long index) | |
107c161b | 932 | { |
f1e3cfff | 933 | struct zram *zram; |
f614a9f4 | 934 | struct zram_meta *meta; |
107c161b | 935 | |
f1e3cfff | 936 | zram = bdev->bd_disk->private_data; |
f614a9f4 | 937 | meta = zram->meta; |
a0c516cb | 938 | |
d2d5e762 | 939 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f614a9f4 | 940 | zram_free_page(zram, index); |
d2d5e762 | 941 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f614a9f4 | 942 | atomic64_inc(&zram->stats.notify_free); |
107c161b NG |
943 | } |
944 | ||
8c7f0102 | 945 | static int zram_rw_page(struct block_device *bdev, sector_t sector, |
946 | struct page *page, int rw) | |
947 | { | |
948 | int offset, err; | |
949 | u32 index; | |
950 | struct zram *zram; | |
951 | struct bio_vec bv; | |
952 | ||
953 | zram = bdev->bd_disk->private_data; | |
954 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { | |
955 | atomic64_inc(&zram->stats.invalid_io); | |
956 | return -EINVAL; | |
957 | } | |
958 | ||
959 | down_read(&zram->init_lock); | |
960 | if (unlikely(!init_done(zram))) { | |
961 | err = -EIO; | |
962 | goto out_unlock; | |
963 | } | |
964 | ||
965 | index = sector >> SECTORS_PER_PAGE_SHIFT; | |
966 | offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; | |
967 | ||
968 | bv.bv_page = page; | |
969 | bv.bv_len = PAGE_SIZE; | |
970 | bv.bv_offset = 0; | |
971 | ||
972 | err = zram_bvec_rw(zram, &bv, index, offset, rw); | |
973 | out_unlock: | |
974 | up_read(&zram->init_lock); | |
975 | /* | |
976 | * If I/O fails, just return error(ie, non-zero) without | |
977 | * calling page_endio. | |
978 | * It causes resubmit the I/O with bio request by upper functions | |
979 | * of rw_page(e.g., swap_readpage, __swap_writepage) and | |
980 | * bio->bi_end_io does things to handle the error | |
981 | * (e.g., SetPageError, set_page_dirty and extra works). | |
982 | */ | |
983 | if (err == 0) | |
984 | page_endio(page, rw, 0); | |
985 | return err; | |
986 | } | |
987 | ||
f1e3cfff | 988 | static const struct block_device_operations zram_devops = { |
f1e3cfff | 989 | .swap_slot_free_notify = zram_slot_free_notify, |
8c7f0102 | 990 | .rw_page = zram_rw_page, |
107c161b | 991 | .owner = THIS_MODULE |
306b0c95 NG |
992 | }; |
993 | ||
083914ea GM |
994 | static DEVICE_ATTR_RW(disksize); |
995 | static DEVICE_ATTR_RO(initstate); | |
996 | static DEVICE_ATTR_WO(reset); | |
997 | static DEVICE_ATTR_RO(orig_data_size); | |
998 | static DEVICE_ATTR_RO(mem_used_total); | |
999 | static DEVICE_ATTR_RW(mem_limit); | |
1000 | static DEVICE_ATTR_RW(mem_used_max); | |
1001 | static DEVICE_ATTR_RW(max_comp_streams); | |
1002 | static DEVICE_ATTR_RW(comp_algorithm); | |
9b3bb7ab | 1003 | |
a68eb3b6 SS |
1004 | ZRAM_ATTR_RO(num_reads); |
1005 | ZRAM_ATTR_RO(num_writes); | |
64447249 SS |
1006 | ZRAM_ATTR_RO(failed_reads); |
1007 | ZRAM_ATTR_RO(failed_writes); | |
a68eb3b6 SS |
1008 | ZRAM_ATTR_RO(invalid_io); |
1009 | ZRAM_ATTR_RO(notify_free); | |
1010 | ZRAM_ATTR_RO(zero_pages); | |
1011 | ZRAM_ATTR_RO(compr_data_size); | |
1012 | ||
9b3bb7ab SS |
1013 | static struct attribute *zram_disk_attrs[] = { |
1014 | &dev_attr_disksize.attr, | |
1015 | &dev_attr_initstate.attr, | |
1016 | &dev_attr_reset.attr, | |
1017 | &dev_attr_num_reads.attr, | |
1018 | &dev_attr_num_writes.attr, | |
64447249 SS |
1019 | &dev_attr_failed_reads.attr, |
1020 | &dev_attr_failed_writes.attr, | |
9b3bb7ab SS |
1021 | &dev_attr_invalid_io.attr, |
1022 | &dev_attr_notify_free.attr, | |
1023 | &dev_attr_zero_pages.attr, | |
1024 | &dev_attr_orig_data_size.attr, | |
1025 | &dev_attr_compr_data_size.attr, | |
1026 | &dev_attr_mem_used_total.attr, | |
9ada9da9 | 1027 | &dev_attr_mem_limit.attr, |
461a8eee | 1028 | &dev_attr_mem_used_max.attr, |
beca3ec7 | 1029 | &dev_attr_max_comp_streams.attr, |
e46b8a03 | 1030 | &dev_attr_comp_algorithm.attr, |
9b3bb7ab SS |
1031 | NULL, |
1032 | }; | |
1033 | ||
1034 | static struct attribute_group zram_disk_attr_group = { | |
1035 | .attrs = zram_disk_attrs, | |
1036 | }; | |
1037 | ||
f1e3cfff | 1038 | static int create_device(struct zram *zram, int device_id) |
306b0c95 | 1039 | { |
39a9b8ac | 1040 | int ret = -ENOMEM; |
de1a21a0 | 1041 | |
0900beae | 1042 | init_rwsem(&zram->init_lock); |
306b0c95 | 1043 | |
f1e3cfff NG |
1044 | zram->queue = blk_alloc_queue(GFP_KERNEL); |
1045 | if (!zram->queue) { | |
306b0c95 NG |
1046 | pr_err("Error allocating disk queue for device %d\n", |
1047 | device_id); | |
de1a21a0 | 1048 | goto out; |
306b0c95 NG |
1049 | } |
1050 | ||
f1e3cfff NG |
1051 | blk_queue_make_request(zram->queue, zram_make_request); |
1052 | zram->queue->queuedata = zram; | |
306b0c95 NG |
1053 | |
1054 | /* gendisk structure */ | |
f1e3cfff NG |
1055 | zram->disk = alloc_disk(1); |
1056 | if (!zram->disk) { | |
94b8435f | 1057 | pr_warn("Error allocating disk structure for device %d\n", |
306b0c95 | 1058 | device_id); |
39a9b8ac | 1059 | goto out_free_queue; |
306b0c95 NG |
1060 | } |
1061 | ||
f1e3cfff NG |
1062 | zram->disk->major = zram_major; |
1063 | zram->disk->first_minor = device_id; | |
1064 | zram->disk->fops = &zram_devops; | |
1065 | zram->disk->queue = zram->queue; | |
1066 | zram->disk->private_data = zram; | |
1067 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
306b0c95 | 1068 | |
33863c21 | 1069 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
f1e3cfff | 1070 | set_capacity(zram->disk, 0); |
b67d1ec1 SS |
1071 | /* zram devices sort of resembles non-rotational disks */ |
1072 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | |
b277da0a | 1073 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); |
a1dd52af NG |
1074 | /* |
1075 | * To ensure that we always get PAGE_SIZE aligned | |
1076 | * and n*PAGE_SIZED sized I/O requests. | |
1077 | */ | |
f1e3cfff | 1078 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
7b19b8d4 RJ |
1079 | blk_queue_logical_block_size(zram->disk->queue, |
1080 | ZRAM_LOGICAL_BLOCK_SIZE); | |
f1e3cfff NG |
1081 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
1082 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | |
f4659d8e JK |
1083 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
1084 | zram->disk->queue->limits.max_discard_sectors = UINT_MAX; | |
1085 | /* | |
1086 | * zram_bio_discard() will clear all logical blocks if logical block | |
1087 | * size is identical with physical block size(PAGE_SIZE). But if it is | |
1088 | * different, we will skip discarding some parts of logical blocks in | |
1089 | * the part of the request range which isn't aligned to physical block | |
1090 | * size. So we can't ensure that all discarded logical blocks are | |
1091 | * zeroed. | |
1092 | */ | |
1093 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) | |
1094 | zram->disk->queue->limits.discard_zeroes_data = 1; | |
1095 | else | |
1096 | zram->disk->queue->limits.discard_zeroes_data = 0; | |
1097 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); | |
5d83d5a0 | 1098 | |
f1e3cfff | 1099 | add_disk(zram->disk); |
306b0c95 | 1100 | |
33863c21 NG |
1101 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
1102 | &zram_disk_attr_group); | |
1103 | if (ret < 0) { | |
94b8435f | 1104 | pr_warn("Error creating sysfs group"); |
39a9b8ac | 1105 | goto out_free_disk; |
33863c21 | 1106 | } |
e46b8a03 | 1107 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
be2d1d56 | 1108 | zram->meta = NULL; |
beca3ec7 | 1109 | zram->max_comp_streams = 1; |
39a9b8ac | 1110 | return 0; |
de1a21a0 | 1111 | |
39a9b8ac JL |
1112 | out_free_disk: |
1113 | del_gendisk(zram->disk); | |
1114 | put_disk(zram->disk); | |
1115 | out_free_queue: | |
1116 | blk_cleanup_queue(zram->queue); | |
de1a21a0 NG |
1117 | out: |
1118 | return ret; | |
306b0c95 NG |
1119 | } |
1120 | ||
f1e3cfff | 1121 | static void destroy_device(struct zram *zram) |
306b0c95 | 1122 | { |
33863c21 NG |
1123 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
1124 | &zram_disk_attr_group); | |
33863c21 | 1125 | |
59d3fe54 RK |
1126 | del_gendisk(zram->disk); |
1127 | put_disk(zram->disk); | |
306b0c95 | 1128 | |
59d3fe54 | 1129 | blk_cleanup_queue(zram->queue); |
306b0c95 NG |
1130 | } |
1131 | ||
f1e3cfff | 1132 | static int __init zram_init(void) |
306b0c95 | 1133 | { |
de1a21a0 | 1134 | int ret, dev_id; |
306b0c95 | 1135 | |
5fa5a901 | 1136 | if (num_devices > max_num_devices) { |
94b8435f | 1137 | pr_warn("Invalid value for num_devices: %u\n", |
5fa5a901 | 1138 | num_devices); |
de1a21a0 NG |
1139 | ret = -EINVAL; |
1140 | goto out; | |
306b0c95 NG |
1141 | } |
1142 | ||
f1e3cfff NG |
1143 | zram_major = register_blkdev(0, "zram"); |
1144 | if (zram_major <= 0) { | |
94b8435f | 1145 | pr_warn("Unable to get major number\n"); |
de1a21a0 NG |
1146 | ret = -EBUSY; |
1147 | goto out; | |
306b0c95 NG |
1148 | } |
1149 | ||
306b0c95 | 1150 | /* Allocate the device array and initialize each one */ |
5fa5a901 | 1151 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
43801f6e | 1152 | if (!zram_devices) { |
de1a21a0 NG |
1153 | ret = -ENOMEM; |
1154 | goto unregister; | |
1155 | } | |
306b0c95 | 1156 | |
5fa5a901 | 1157 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
43801f6e | 1158 | ret = create_device(&zram_devices[dev_id], dev_id); |
de1a21a0 | 1159 | if (ret) |
3bf040c7 | 1160 | goto free_devices; |
de1a21a0 NG |
1161 | } |
1162 | ||
ca3d70bd DB |
1163 | pr_info("Created %u device(s) ...\n", num_devices); |
1164 | ||
306b0c95 | 1165 | return 0; |
de1a21a0 | 1166 | |
3bf040c7 | 1167 | free_devices: |
de1a21a0 | 1168 | while (dev_id) |
43801f6e NW |
1169 | destroy_device(&zram_devices[--dev_id]); |
1170 | kfree(zram_devices); | |
de1a21a0 | 1171 | unregister: |
f1e3cfff | 1172 | unregister_blkdev(zram_major, "zram"); |
de1a21a0 | 1173 | out: |
306b0c95 NG |
1174 | return ret; |
1175 | } | |
1176 | ||
f1e3cfff | 1177 | static void __exit zram_exit(void) |
306b0c95 NG |
1178 | { |
1179 | int i; | |
f1e3cfff | 1180 | struct zram *zram; |
306b0c95 | 1181 | |
5fa5a901 | 1182 | for (i = 0; i < num_devices; i++) { |
43801f6e | 1183 | zram = &zram_devices[i]; |
306b0c95 | 1184 | |
f1e3cfff | 1185 | destroy_device(zram); |
2b86ab9c MK |
1186 | /* |
1187 | * Shouldn't access zram->disk after destroy_device | |
1188 | * because destroy_device already released zram->disk. | |
1189 | */ | |
1190 | zram_reset_device(zram, false); | |
306b0c95 NG |
1191 | } |
1192 | ||
f1e3cfff | 1193 | unregister_blkdev(zram_major, "zram"); |
306b0c95 | 1194 | |
43801f6e | 1195 | kfree(zram_devices); |
306b0c95 NG |
1196 | pr_debug("Cleanup done!\n"); |
1197 | } | |
1198 | ||
f1e3cfff NG |
1199 | module_init(zram_init); |
1200 | module_exit(zram_exit); | |
306b0c95 | 1201 | |
9b3bb7ab SS |
1202 | module_param(num_devices, uint, 0); |
1203 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | |
1204 | ||
306b0c95 NG |
1205 | MODULE_LICENSE("Dual BSD/GPL"); |
1206 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
f1e3cfff | 1207 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |