Commit | Line | Data |
---|---|---|
306b0c95 | 1 | /* |
f1e3cfff | 2 | * Compressed RAM block device |
306b0c95 | 3 | * |
1130ebba | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
7bfb3de8 | 5 | * 2012, 2013 Minchan Kim |
306b0c95 NG |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the licence that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | * | |
306b0c95 NG |
13 | */ |
14 | ||
f1e3cfff | 15 | #define KMSG_COMPONENT "zram" |
306b0c95 NG |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <linux/kernel.h> | |
8946a086 | 20 | #include <linux/bio.h> |
306b0c95 NG |
21 | #include <linux/bitops.h> |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/buffer_head.h> | |
24 | #include <linux/device.h> | |
306b0c95 | 25 | #include <linux/highmem.h> |
5a0e3ad6 | 26 | #include <linux/slab.h> |
b09ab054 | 27 | #include <linux/backing-dev.h> |
306b0c95 | 28 | #include <linux/string.h> |
306b0c95 | 29 | #include <linux/vmalloc.h> |
fcfa8d95 | 30 | #include <linux/err.h> |
85508ec6 | 31 | #include <linux/idr.h> |
6566d1a3 | 32 | #include <linux/sysfs.h> |
c0265342 | 33 | #include <linux/debugfs.h> |
1dd6c834 | 34 | #include <linux/cpuhotplug.h> |
c6a564ff | 35 | #include <linux/part_stat.h> |
306b0c95 | 36 | |
16a4bfb9 | 37 | #include "zram_drv.h" |
306b0c95 | 38 | |
85508ec6 | 39 | static DEFINE_IDR(zram_index_idr); |
6566d1a3 SS |
40 | /* idr index must be protected */ |
41 | static DEFINE_MUTEX(zram_index_mutex); | |
42 | ||
f1e3cfff | 43 | static int zram_major; |
3d711a38 | 44 | static const char *default_compressor = CONFIG_ZRAM_DEF_COMP; |
306b0c95 | 45 | |
306b0c95 | 46 | /* Module params (documentation at end) */ |
ca3d70bd | 47 | static unsigned int num_devices = 1; |
60f5921a SS |
48 | /* |
49 | * Pages that compress to sizes equals or greater than this are stored | |
50 | * uncompressed in memory. | |
51 | */ | |
52 | static size_t huge_class_size; | |
33863c21 | 53 | |
a8b456d0 | 54 | static const struct block_device_operations zram_devops; |
a8b456d0 | 55 | |
1f7319c7 | 56 | static void zram_free_page(struct zram *zram, size_t index); |
79c744ee | 57 | static int zram_read_page(struct zram *zram, struct page *page, u32 index, |
4e3c87b9 | 58 | struct bio *parent); |
1f7319c7 | 59 | |
3c9959e0 MK |
60 | static int zram_slot_trylock(struct zram *zram, u32 index) |
61 | { | |
7e529283 | 62 | return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); |
3c9959e0 MK |
63 | } |
64 | ||
c4d6c4cc MK |
65 | static void zram_slot_lock(struct zram *zram, u32 index) |
66 | { | |
7e529283 | 67 | bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); |
c4d6c4cc MK |
68 | } |
69 | ||
70 | static void zram_slot_unlock(struct zram *zram, u32 index) | |
71 | { | |
7e529283 | 72 | bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); |
c4d6c4cc MK |
73 | } |
74 | ||
08eee69f | 75 | static inline bool init_done(struct zram *zram) |
be2d1d56 | 76 | { |
08eee69f | 77 | return zram->disksize; |
be2d1d56 SS |
78 | } |
79 | ||
9b3bb7ab SS |
80 | static inline struct zram *dev_to_zram(struct device *dev) |
81 | { | |
82 | return (struct zram *)dev_to_disk(dev)->private_data; | |
83 | } | |
84 | ||
643ae61d MK |
85 | static unsigned long zram_get_handle(struct zram *zram, u32 index) |
86 | { | |
87 | return zram->table[index].handle; | |
88 | } | |
89 | ||
90 | static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) | |
91 | { | |
92 | zram->table[index].handle = handle; | |
93 | } | |
94 | ||
b31177f2 | 95 | /* flag operations require table entry bit_spin_lock() being held */ |
c0265342 | 96 | static bool zram_test_flag(struct zram *zram, u32 index, |
522698d7 | 97 | enum zram_pageflags flag) |
99ebbd30 | 98 | { |
7e529283 | 99 | return zram->table[index].flags & BIT(flag); |
522698d7 | 100 | } |
99ebbd30 | 101 | |
beb6602c | 102 | static void zram_set_flag(struct zram *zram, u32 index, |
522698d7 SS |
103 | enum zram_pageflags flag) |
104 | { | |
7e529283 | 105 | zram->table[index].flags |= BIT(flag); |
522698d7 | 106 | } |
99ebbd30 | 107 | |
beb6602c | 108 | static void zram_clear_flag(struct zram *zram, u32 index, |
522698d7 SS |
109 | enum zram_pageflags flag) |
110 | { | |
7e529283 | 111 | zram->table[index].flags &= ~BIT(flag); |
522698d7 | 112 | } |
99ebbd30 | 113 | |
beb6602c | 114 | static inline void zram_set_element(struct zram *zram, u32 index, |
8e19d540 | 115 | unsigned long element) |
116 | { | |
beb6602c | 117 | zram->table[index].element = element; |
8e19d540 | 118 | } |
119 | ||
643ae61d | 120 | static unsigned long zram_get_element(struct zram *zram, u32 index) |
8e19d540 | 121 | { |
643ae61d | 122 | return zram->table[index].element; |
8e19d540 | 123 | } |
124 | ||
beb6602c | 125 | static size_t zram_get_obj_size(struct zram *zram, u32 index) |
522698d7 | 126 | { |
7e529283 | 127 | return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); |
99ebbd30 AM |
128 | } |
129 | ||
beb6602c | 130 | static void zram_set_obj_size(struct zram *zram, |
522698d7 | 131 | u32 index, size_t size) |
9b3bb7ab | 132 | { |
7e529283 | 133 | unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; |
9b3bb7ab | 134 | |
7e529283 | 135 | zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; |
522698d7 SS |
136 | } |
137 | ||
a939888e MK |
138 | static inline bool zram_allocated(struct zram *zram, u32 index) |
139 | { | |
140 | return zram_get_obj_size(zram, index) || | |
141 | zram_test_flag(zram, index, ZRAM_SAME) || | |
142 | zram_test_flag(zram, index, ZRAM_WB); | |
143 | } | |
144 | ||
1f7319c7 | 145 | #if PAGE_SIZE != 4096 |
1c53e0d2 | 146 | static inline bool is_partial_io(struct bio_vec *bvec) |
522698d7 SS |
147 | { |
148 | return bvec->bv_len != PAGE_SIZE; | |
149 | } | |
a70aae12 | 150 | #define ZRAM_PARTIAL_IO 1 |
1f7319c7 MK |
151 | #else |
152 | static inline bool is_partial_io(struct bio_vec *bvec) | |
153 | { | |
154 | return false; | |
155 | } | |
156 | #endif | |
522698d7 | 157 | |
84b33bf7 SS |
158 | static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio) |
159 | { | |
160 | prio &= ZRAM_COMP_PRIORITY_MASK; | |
161 | /* | |
162 | * Clear previous priority value first, in case if we recompress | |
163 | * further an already recompressed page | |
164 | */ | |
165 | zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK << | |
166 | ZRAM_COMP_PRIORITY_BIT1); | |
167 | zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); | |
168 | } | |
169 | ||
170 | static inline u32 zram_get_priority(struct zram *zram, u32 index) | |
171 | { | |
172 | u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; | |
173 | ||
174 | return prio & ZRAM_COMP_PRIORITY_MASK; | |
175 | } | |
176 | ||
a7a03505 SS |
177 | static void zram_accessed(struct zram *zram, u32 index) |
178 | { | |
179 | zram_clear_flag(zram, index, ZRAM_IDLE); | |
180 | #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME | |
181 | zram->table[index].ac_time = ktime_get_boottime(); | |
182 | #endif | |
183 | } | |
184 | ||
522698d7 SS |
185 | static inline void update_used_max(struct zram *zram, |
186 | const unsigned long pages) | |
187 | { | |
70ec04f3 | 188 | unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages); |
522698d7 SS |
189 | |
190 | do { | |
70ec04f3 UB |
191 | if (cur_max >= pages) |
192 | return; | |
193 | } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages, | |
194 | &cur_max, pages)); | |
522698d7 SS |
195 | } |
196 | ||
48ad1abe | 197 | static inline void zram_fill_page(void *ptr, unsigned long len, |
8e19d540 | 198 | unsigned long value) |
199 | { | |
8e19d540 | 200 | WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); |
48ad1abe | 201 | memset_l(ptr, value, len / sizeof(unsigned long)); |
8e19d540 | 202 | } |
203 | ||
204 | static bool page_same_filled(void *ptr, unsigned long *element) | |
522698d7 | 205 | { |
522698d7 | 206 | unsigned long *page; |
f0fe9984 | 207 | unsigned long val; |
90f82cbf | 208 | unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; |
522698d7 SS |
209 | |
210 | page = (unsigned long *)ptr; | |
f0fe9984 | 211 | val = page[0]; |
522698d7 | 212 | |
90f82cbf TS |
213 | if (val != page[last_pos]) |
214 | return false; | |
215 | ||
216 | for (pos = 1; pos < last_pos; pos++) { | |
f0fe9984 | 217 | if (val != page[pos]) |
1c53e0d2 | 218 | return false; |
522698d7 SS |
219 | } |
220 | ||
f0fe9984 | 221 | *element = val; |
8e19d540 | 222 | |
1c53e0d2 | 223 | return true; |
522698d7 SS |
224 | } |
225 | ||
9b3bb7ab SS |
226 | static ssize_t initstate_show(struct device *dev, |
227 | struct device_attribute *attr, char *buf) | |
228 | { | |
a68eb3b6 | 229 | u32 val; |
9b3bb7ab SS |
230 | struct zram *zram = dev_to_zram(dev); |
231 | ||
a68eb3b6 SS |
232 | down_read(&zram->init_lock); |
233 | val = init_done(zram); | |
234 | up_read(&zram->init_lock); | |
9b3bb7ab | 235 | |
56b4e8cb | 236 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
9b3bb7ab SS |
237 | } |
238 | ||
522698d7 SS |
239 | static ssize_t disksize_show(struct device *dev, |
240 | struct device_attribute *attr, char *buf) | |
241 | { | |
242 | struct zram *zram = dev_to_zram(dev); | |
243 | ||
244 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); | |
245 | } | |
246 | ||
9ada9da9 MK |
247 | static ssize_t mem_limit_store(struct device *dev, |
248 | struct device_attribute *attr, const char *buf, size_t len) | |
249 | { | |
250 | u64 limit; | |
251 | char *tmp; | |
252 | struct zram *zram = dev_to_zram(dev); | |
253 | ||
254 | limit = memparse(buf, &tmp); | |
255 | if (buf == tmp) /* no chars parsed, invalid input */ | |
256 | return -EINVAL; | |
257 | ||
258 | down_write(&zram->init_lock); | |
259 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; | |
260 | up_write(&zram->init_lock); | |
261 | ||
262 | return len; | |
263 | } | |
264 | ||
461a8eee MK |
265 | static ssize_t mem_used_max_store(struct device *dev, |
266 | struct device_attribute *attr, const char *buf, size_t len) | |
267 | { | |
268 | int err; | |
269 | unsigned long val; | |
270 | struct zram *zram = dev_to_zram(dev); | |
461a8eee MK |
271 | |
272 | err = kstrtoul(buf, 10, &val); | |
273 | if (err || val != 0) | |
274 | return -EINVAL; | |
275 | ||
276 | down_read(&zram->init_lock); | |
5a99e95b | 277 | if (init_done(zram)) { |
461a8eee | 278 | atomic_long_set(&zram->stats.max_used_pages, |
beb6602c | 279 | zs_get_total_pages(zram->mem_pool)); |
5a99e95b | 280 | } |
461a8eee MK |
281 | up_read(&zram->init_lock); |
282 | ||
283 | return len; | |
284 | } | |
285 | ||
755804d1 BG |
286 | /* |
287 | * Mark all pages which are older than or equal to cutoff as IDLE. | |
288 | * Callers should hold the zram init lock in read mode | |
289 | */ | |
290 | static void mark_idle(struct zram *zram, ktime_t cutoff) | |
e82592c4 | 291 | { |
755804d1 | 292 | int is_idle = 1; |
e82592c4 MK |
293 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
294 | int index; | |
e82592c4 | 295 | |
e82592c4 | 296 | for (index = 0; index < nr_pages; index++) { |
a939888e MK |
297 | /* |
298 | * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race. | |
299 | * See the comment in writeback_store. | |
300 | */ | |
e82592c4 | 301 | zram_slot_lock(zram, index); |
1d69a3f8 | 302 | if (zram_allocated(zram, index) && |
755804d1 | 303 | !zram_test_flag(zram, index, ZRAM_UNDER_WB)) { |
a7a03505 SS |
304 | #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME |
305 | is_idle = !cutoff || ktime_after(cutoff, | |
306 | zram->table[index].ac_time); | |
755804d1 BG |
307 | #endif |
308 | if (is_idle) | |
309 | zram_set_flag(zram, index, ZRAM_IDLE); | |
310 | } | |
e82592c4 MK |
311 | zram_slot_unlock(zram, index); |
312 | } | |
755804d1 | 313 | } |
e82592c4 | 314 | |
755804d1 BG |
315 | static ssize_t idle_store(struct device *dev, |
316 | struct device_attribute *attr, const char *buf, size_t len) | |
317 | { | |
318 | struct zram *zram = dev_to_zram(dev); | |
319 | ktime_t cutoff_time = 0; | |
320 | ssize_t rv = -EINVAL; | |
e82592c4 | 321 | |
755804d1 BG |
322 | if (!sysfs_streq(buf, "all")) { |
323 | /* | |
f9bceb2f SS |
324 | * If it did not parse as 'all' try to treat it as an integer |
325 | * when we have memory tracking enabled. | |
755804d1 BG |
326 | */ |
327 | u64 age_sec; | |
328 | ||
a7a03505 | 329 | if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec)) |
755804d1 BG |
330 | cutoff_time = ktime_sub(ktime_get_boottime(), |
331 | ns_to_ktime(age_sec * NSEC_PER_SEC)); | |
332 | else | |
333 | goto out; | |
334 | } | |
335 | ||
336 | down_read(&zram->init_lock); | |
337 | if (!init_done(zram)) | |
338 | goto out_unlock; | |
339 | ||
f9bceb2f SS |
340 | /* |
341 | * A cutoff_time of 0 marks everything as idle, this is the | |
342 | * "all" behavior. | |
343 | */ | |
755804d1 BG |
344 | mark_idle(zram, cutoff_time); |
345 | rv = len; | |
346 | ||
347 | out_unlock: | |
348 | up_read(&zram->init_lock); | |
349 | out: | |
350 | return rv; | |
e82592c4 MK |
351 | } |
352 | ||
013bf95a | 353 | #ifdef CONFIG_ZRAM_WRITEBACK |
1d69a3f8 MK |
354 | static ssize_t writeback_limit_enable_store(struct device *dev, |
355 | struct device_attribute *attr, const char *buf, size_t len) | |
356 | { | |
357 | struct zram *zram = dev_to_zram(dev); | |
358 | u64 val; | |
359 | ssize_t ret = -EINVAL; | |
360 | ||
361 | if (kstrtoull(buf, 10, &val)) | |
362 | return ret; | |
363 | ||
364 | down_read(&zram->init_lock); | |
365 | spin_lock(&zram->wb_limit_lock); | |
366 | zram->wb_limit_enable = val; | |
367 | spin_unlock(&zram->wb_limit_lock); | |
368 | up_read(&zram->init_lock); | |
369 | ret = len; | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
374 | static ssize_t writeback_limit_enable_show(struct device *dev, | |
375 | struct device_attribute *attr, char *buf) | |
376 | { | |
377 | bool val; | |
378 | struct zram *zram = dev_to_zram(dev); | |
379 | ||
380 | down_read(&zram->init_lock); | |
381 | spin_lock(&zram->wb_limit_lock); | |
382 | val = zram->wb_limit_enable; | |
383 | spin_unlock(&zram->wb_limit_lock); | |
384 | up_read(&zram->init_lock); | |
385 | ||
386 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | |
387 | } | |
388 | ||
bb416d18 MK |
389 | static ssize_t writeback_limit_store(struct device *dev, |
390 | struct device_attribute *attr, const char *buf, size_t len) | |
391 | { | |
392 | struct zram *zram = dev_to_zram(dev); | |
393 | u64 val; | |
394 | ssize_t ret = -EINVAL; | |
395 | ||
396 | if (kstrtoull(buf, 10, &val)) | |
397 | return ret; | |
398 | ||
399 | down_read(&zram->init_lock); | |
1d69a3f8 MK |
400 | spin_lock(&zram->wb_limit_lock); |
401 | zram->bd_wb_limit = val; | |
402 | spin_unlock(&zram->wb_limit_lock); | |
bb416d18 MK |
403 | up_read(&zram->init_lock); |
404 | ret = len; | |
405 | ||
406 | return ret; | |
407 | } | |
408 | ||
409 | static ssize_t writeback_limit_show(struct device *dev, | |
410 | struct device_attribute *attr, char *buf) | |
411 | { | |
412 | u64 val; | |
413 | struct zram *zram = dev_to_zram(dev); | |
414 | ||
415 | down_read(&zram->init_lock); | |
1d69a3f8 MK |
416 | spin_lock(&zram->wb_limit_lock); |
417 | val = zram->bd_wb_limit; | |
418 | spin_unlock(&zram->wb_limit_lock); | |
bb416d18 MK |
419 | up_read(&zram->init_lock); |
420 | ||
421 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val); | |
422 | } | |
423 | ||
013bf95a MK |
424 | static void reset_bdev(struct zram *zram) |
425 | { | |
7e529283 | 426 | if (!zram->backing_dev) |
013bf95a MK |
427 | return; |
428 | ||
be914f8f | 429 | fput(zram->bdev_file); |
013bf95a MK |
430 | /* hope filp_close flush all of IO */ |
431 | filp_close(zram->backing_dev, NULL); | |
432 | zram->backing_dev = NULL; | |
be914f8f | 433 | zram->bdev_file = NULL; |
a8b456d0 | 434 | zram->disk->fops = &zram_devops; |
1363d466 MK |
435 | kvfree(zram->bitmap); |
436 | zram->bitmap = NULL; | |
013bf95a MK |
437 | } |
438 | ||
439 | static ssize_t backing_dev_show(struct device *dev, | |
440 | struct device_attribute *attr, char *buf) | |
441 | { | |
f7daefe4 | 442 | struct file *file; |
013bf95a | 443 | struct zram *zram = dev_to_zram(dev); |
013bf95a MK |
444 | char *p; |
445 | ssize_t ret; | |
446 | ||
447 | down_read(&zram->init_lock); | |
f7daefe4 C |
448 | file = zram->backing_dev; |
449 | if (!file) { | |
013bf95a MK |
450 | memcpy(buf, "none\n", 5); |
451 | up_read(&zram->init_lock); | |
452 | return 5; | |
453 | } | |
454 | ||
455 | p = file_path(file, buf, PAGE_SIZE - 1); | |
456 | if (IS_ERR(p)) { | |
457 | ret = PTR_ERR(p); | |
458 | goto out; | |
459 | } | |
460 | ||
461 | ret = strlen(p); | |
462 | memmove(buf, p, ret); | |
463 | buf[ret++] = '\n'; | |
464 | out: | |
465 | up_read(&zram->init_lock); | |
466 | return ret; | |
467 | } | |
468 | ||
469 | static ssize_t backing_dev_store(struct device *dev, | |
470 | struct device_attribute *attr, const char *buf, size_t len) | |
471 | { | |
472 | char *file_name; | |
c8bd134a | 473 | size_t sz; |
013bf95a MK |
474 | struct file *backing_dev = NULL; |
475 | struct inode *inode; | |
476 | struct address_space *mapping; | |
ee763e21 | 477 | unsigned int bitmap_sz; |
1363d466 | 478 | unsigned long nr_pages, *bitmap = NULL; |
be914f8f | 479 | struct file *bdev_file = NULL; |
013bf95a MK |
480 | int err; |
481 | struct zram *zram = dev_to_zram(dev); | |
482 | ||
483 | file_name = kmalloc(PATH_MAX, GFP_KERNEL); | |
484 | if (!file_name) | |
485 | return -ENOMEM; | |
486 | ||
487 | down_write(&zram->init_lock); | |
488 | if (init_done(zram)) { | |
489 | pr_info("Can't setup backing device for initialized device\n"); | |
490 | err = -EBUSY; | |
491 | goto out; | |
492 | } | |
493 | ||
e55e1b48 | 494 | strscpy(file_name, buf, PATH_MAX); |
c8bd134a PK |
495 | /* ignore trailing newline */ |
496 | sz = strlen(file_name); | |
497 | if (sz > 0 && file_name[sz - 1] == '\n') | |
498 | file_name[sz - 1] = 0x00; | |
013bf95a MK |
499 | |
500 | backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); | |
501 | if (IS_ERR(backing_dev)) { | |
502 | err = PTR_ERR(backing_dev); | |
503 | backing_dev = NULL; | |
504 | goto out; | |
505 | } | |
506 | ||
507 | mapping = backing_dev->f_mapping; | |
508 | inode = mapping->host; | |
509 | ||
510 | /* Support only block device in this moment */ | |
511 | if (!S_ISBLK(inode->i_mode)) { | |
512 | err = -ENOTBLK; | |
513 | goto out; | |
514 | } | |
515 | ||
be914f8f | 516 | bdev_file = bdev_file_open_by_dev(inode->i_rdev, |
eed993a0 | 517 | BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL); |
be914f8f CB |
518 | if (IS_ERR(bdev_file)) { |
519 | err = PTR_ERR(bdev_file); | |
520 | bdev_file = NULL; | |
013bf95a | 521 | goto out; |
5547932d | 522 | } |
013bf95a | 523 | |
1363d466 MK |
524 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; |
525 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); | |
526 | bitmap = kvzalloc(bitmap_sz, GFP_KERNEL); | |
527 | if (!bitmap) { | |
528 | err = -ENOMEM; | |
529 | goto out; | |
530 | } | |
531 | ||
013bf95a MK |
532 | reset_bdev(zram); |
533 | ||
be914f8f | 534 | zram->bdev_file = bdev_file; |
013bf95a | 535 | zram->backing_dev = backing_dev; |
1363d466 MK |
536 | zram->bitmap = bitmap; |
537 | zram->nr_pages = nr_pages; | |
013bf95a MK |
538 | up_write(&zram->init_lock); |
539 | ||
540 | pr_info("setup backing device %s\n", file_name); | |
541 | kfree(file_name); | |
542 | ||
543 | return len; | |
544 | out: | |
294ed6b9 | 545 | kvfree(bitmap); |
1363d466 | 546 | |
be914f8f CB |
547 | if (bdev_file) |
548 | fput(bdev_file); | |
013bf95a MK |
549 | |
550 | if (backing_dev) | |
551 | filp_close(backing_dev, NULL); | |
552 | ||
553 | up_write(&zram->init_lock); | |
554 | ||
555 | kfree(file_name); | |
556 | ||
557 | return err; | |
558 | } | |
559 | ||
7e529283 | 560 | static unsigned long alloc_block_bdev(struct zram *zram) |
1363d466 | 561 | { |
3c9959e0 MK |
562 | unsigned long blk_idx = 1; |
563 | retry: | |
1363d466 | 564 | /* skip 0 bit to confuse zram.handle = 0 */ |
3c9959e0 MK |
565 | blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx); |
566 | if (blk_idx == zram->nr_pages) | |
1363d466 | 567 | return 0; |
1363d466 | 568 | |
3c9959e0 MK |
569 | if (test_and_set_bit(blk_idx, zram->bitmap)) |
570 | goto retry; | |
1363d466 | 571 | |
23eddf39 | 572 | atomic64_inc(&zram->stats.bd_count); |
3c9959e0 | 573 | return blk_idx; |
1363d466 MK |
574 | } |
575 | ||
7e529283 | 576 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) |
1363d466 MK |
577 | { |
578 | int was_set; | |
579 | ||
7e529283 | 580 | was_set = test_and_clear_bit(blk_idx, zram->bitmap); |
1363d466 | 581 | WARN_ON_ONCE(!was_set); |
23eddf39 | 582 | atomic64_dec(&zram->stats.bd_count); |
1363d466 MK |
583 | } |
584 | ||
0cd97a03 | 585 | static void read_from_bdev_async(struct zram *zram, struct page *page, |
8e654f8f MK |
586 | unsigned long entry, struct bio *parent) |
587 | { | |
588 | struct bio *bio; | |
589 | ||
be914f8f | 590 | bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO); |
8e654f8f | 591 | bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); |
0cd97a03 | 592 | __bio_add_page(bio, page, PAGE_SIZE, 0); |
4e3c87b9 | 593 | bio_chain(bio, parent); |
8e654f8f | 594 | submit_bio(bio); |
8e654f8f MK |
595 | } |
596 | ||
0d835962 MK |
597 | #define PAGE_WB_SIG "page_index=" |
598 | ||
b46f9ea3 SS |
599 | #define PAGE_WRITEBACK 0 |
600 | #define HUGE_WRITEBACK (1<<0) | |
601 | #define IDLE_WRITEBACK (1<<1) | |
602 | #define INCOMPRESSIBLE_WRITEBACK (1<<2) | |
0d835962 | 603 | |
a939888e MK |
604 | static ssize_t writeback_store(struct device *dev, |
605 | struct device_attribute *attr, const char *buf, size_t len) | |
606 | { | |
607 | struct zram *zram = dev_to_zram(dev); | |
608 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; | |
0d835962 | 609 | unsigned long index = 0; |
a939888e MK |
610 | struct bio bio; |
611 | struct bio_vec bio_vec; | |
612 | struct page *page; | |
3b82a051 | 613 | ssize_t ret = len; |
57e0076e | 614 | int mode, err; |
a939888e MK |
615 | unsigned long blk_idx = 0; |
616 | ||
0bc9f5d1 | 617 | if (sysfs_streq(buf, "idle")) |
a939888e | 618 | mode = IDLE_WRITEBACK; |
0bc9f5d1 | 619 | else if (sysfs_streq(buf, "huge")) |
a939888e | 620 | mode = HUGE_WRITEBACK; |
30226b69 BG |
621 | else if (sysfs_streq(buf, "huge_idle")) |
622 | mode = IDLE_WRITEBACK | HUGE_WRITEBACK; | |
b46f9ea3 SS |
623 | else if (sysfs_streq(buf, "incompressible")) |
624 | mode = INCOMPRESSIBLE_WRITEBACK; | |
0d835962 MK |
625 | else { |
626 | if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) | |
627 | return -EINVAL; | |
628 | ||
2766f182 MK |
629 | if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || |
630 | index >= nr_pages) | |
0d835962 MK |
631 | return -EINVAL; |
632 | ||
633 | nr_pages = 1; | |
634 | mode = PAGE_WRITEBACK; | |
635 | } | |
a939888e MK |
636 | |
637 | down_read(&zram->init_lock); | |
638 | if (!init_done(zram)) { | |
639 | ret = -EINVAL; | |
640 | goto release_init_lock; | |
641 | } | |
642 | ||
643 | if (!zram->backing_dev) { | |
644 | ret = -ENODEV; | |
645 | goto release_init_lock; | |
646 | } | |
647 | ||
648 | page = alloc_page(GFP_KERNEL); | |
649 | if (!page) { | |
650 | ret = -ENOMEM; | |
651 | goto release_init_lock; | |
652 | } | |
653 | ||
2766f182 | 654 | for (; nr_pages != 0; index++, nr_pages--) { |
1d69a3f8 MK |
655 | spin_lock(&zram->wb_limit_lock); |
656 | if (zram->wb_limit_enable && !zram->bd_wb_limit) { | |
657 | spin_unlock(&zram->wb_limit_lock); | |
bb416d18 MK |
658 | ret = -EIO; |
659 | break; | |
660 | } | |
1d69a3f8 | 661 | spin_unlock(&zram->wb_limit_lock); |
bb416d18 | 662 | |
a939888e MK |
663 | if (!blk_idx) { |
664 | blk_idx = alloc_block_bdev(zram); | |
665 | if (!blk_idx) { | |
666 | ret = -ENOSPC; | |
667 | break; | |
668 | } | |
669 | } | |
670 | ||
671 | zram_slot_lock(zram, index); | |
672 | if (!zram_allocated(zram, index)) | |
673 | goto next; | |
674 | ||
675 | if (zram_test_flag(zram, index, ZRAM_WB) || | |
676 | zram_test_flag(zram, index, ZRAM_SAME) || | |
677 | zram_test_flag(zram, index, ZRAM_UNDER_WB)) | |
678 | goto next; | |
679 | ||
30226b69 | 680 | if (mode & IDLE_WRITEBACK && |
b46f9ea3 | 681 | !zram_test_flag(zram, index, ZRAM_IDLE)) |
1d69a3f8 | 682 | goto next; |
30226b69 | 683 | if (mode & HUGE_WRITEBACK && |
b46f9ea3 SS |
684 | !zram_test_flag(zram, index, ZRAM_HUGE)) |
685 | goto next; | |
686 | if (mode & INCOMPRESSIBLE_WRITEBACK && | |
687 | !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) | |
a939888e | 688 | goto next; |
b46f9ea3 | 689 | |
a939888e MK |
690 | /* |
691 | * Clearing ZRAM_UNDER_WB is duty of caller. | |
692 | * IOW, zram_free_page never clear it. | |
693 | */ | |
694 | zram_set_flag(zram, index, ZRAM_UNDER_WB); | |
695 | /* Need for hugepage writeback racing */ | |
696 | zram_set_flag(zram, index, ZRAM_IDLE); | |
697 | zram_slot_unlock(zram, index); | |
4e3c87b9 | 698 | if (zram_read_page(zram, page, index, NULL)) { |
a939888e MK |
699 | zram_slot_lock(zram, index); |
700 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | |
701 | zram_clear_flag(zram, index, ZRAM_IDLE); | |
702 | zram_slot_unlock(zram, index); | |
703 | continue; | |
704 | } | |
705 | ||
be914f8f | 706 | bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1, |
49add496 | 707 | REQ_OP_WRITE | REQ_SYNC); |
a939888e | 708 | bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); |
34848c91 | 709 | __bio_add_page(&bio, page, PAGE_SIZE, 0); |
a939888e | 710 | |
a939888e MK |
711 | /* |
712 | * XXX: A single page IO would be inefficient for write | |
713 | * but it would be not bad as starter. | |
714 | */ | |
57e0076e MK |
715 | err = submit_bio_wait(&bio); |
716 | if (err) { | |
a939888e MK |
717 | zram_slot_lock(zram, index); |
718 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | |
719 | zram_clear_flag(zram, index, ZRAM_IDLE); | |
720 | zram_slot_unlock(zram, index); | |
57e0076e | 721 | /* |
9fda785d SS |
722 | * BIO errors are not fatal, we continue and simply |
723 | * attempt to writeback the remaining objects (pages). | |
724 | * At the same time we need to signal user-space that | |
725 | * some writes (at least one, but also could be all of | |
726 | * them) were not successful and we do so by returning | |
727 | * the most recent BIO error. | |
57e0076e MK |
728 | */ |
729 | ret = err; | |
a939888e MK |
730 | continue; |
731 | } | |
732 | ||
23eddf39 | 733 | atomic64_inc(&zram->stats.bd_writes); |
a939888e MK |
734 | /* |
735 | * We released zram_slot_lock so need to check if the slot was | |
736 | * changed. If there is freeing for the slot, we can catch it | |
737 | * easily by zram_allocated. | |
738 | * A subtle case is the slot is freed/reallocated/marked as | |
739 | * ZRAM_IDLE again. To close the race, idle_store doesn't | |
740 | * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB. | |
741 | * Thus, we could close the race by checking ZRAM_IDLE bit. | |
742 | */ | |
743 | zram_slot_lock(zram, index); | |
744 | if (!zram_allocated(zram, index) || | |
745 | !zram_test_flag(zram, index, ZRAM_IDLE)) { | |
746 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | |
747 | zram_clear_flag(zram, index, ZRAM_IDLE); | |
748 | goto next; | |
749 | } | |
750 | ||
751 | zram_free_page(zram, index); | |
752 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); | |
753 | zram_set_flag(zram, index, ZRAM_WB); | |
754 | zram_set_element(zram, index, blk_idx); | |
755 | blk_idx = 0; | |
756 | atomic64_inc(&zram->stats.pages_stored); | |
1d69a3f8 MK |
757 | spin_lock(&zram->wb_limit_lock); |
758 | if (zram->wb_limit_enable && zram->bd_wb_limit > 0) | |
759 | zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); | |
760 | spin_unlock(&zram->wb_limit_lock); | |
a939888e MK |
761 | next: |
762 | zram_slot_unlock(zram, index); | |
763 | } | |
764 | ||
765 | if (blk_idx) | |
766 | free_block_bdev(zram, blk_idx); | |
a939888e MK |
767 | __free_page(page); |
768 | release_init_lock: | |
769 | up_read(&zram->init_lock); | |
770 | ||
771 | return ret; | |
772 | } | |
773 | ||
8e654f8f MK |
774 | struct zram_work { |
775 | struct work_struct work; | |
776 | struct zram *zram; | |
777 | unsigned long entry; | |
fd45af53 | 778 | struct page *page; |
1e9460d1 | 779 | int error; |
8e654f8f MK |
780 | }; |
781 | ||
8e654f8f MK |
782 | static void zram_sync_read(struct work_struct *work) |
783 | { | |
8e654f8f | 784 | struct zram_work *zw = container_of(work, struct zram_work, work); |
4e3c87b9 CH |
785 | struct bio_vec bv; |
786 | struct bio bio; | |
8e654f8f | 787 | |
be914f8f | 788 | bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ); |
4e3c87b9 CH |
789 | bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); |
790 | __bio_add_page(&bio, zw->page, PAGE_SIZE, 0); | |
1e9460d1 | 791 | zw->error = submit_bio_wait(&bio); |
8e654f8f MK |
792 | } |
793 | ||
794 | /* | |
c62b37d9 CH |
795 | * Block layer want one ->submit_bio to be active at a time, so if we use |
796 | * chained IO with parent IO in same context, it's a deadlock. To avoid that, | |
797 | * use a worker thread context. | |
8e654f8f | 798 | */ |
fd45af53 | 799 | static int read_from_bdev_sync(struct zram *zram, struct page *page, |
4e3c87b9 | 800 | unsigned long entry) |
8e654f8f MK |
801 | { |
802 | struct zram_work work; | |
803 | ||
fd45af53 | 804 | work.page = page; |
8e654f8f MK |
805 | work.zram = zram; |
806 | work.entry = entry; | |
8e654f8f MK |
807 | |
808 | INIT_WORK_ONSTACK(&work.work, zram_sync_read); | |
809 | queue_work(system_unbound_wq, &work.work); | |
810 | flush_work(&work.work); | |
811 | destroy_work_on_stack(&work.work); | |
812 | ||
1e9460d1 | 813 | return work.error; |
8e654f8f | 814 | } |
8e654f8f | 815 | |
fd45af53 | 816 | static int read_from_bdev(struct zram *zram, struct page *page, |
4e3c87b9 | 817 | unsigned long entry, struct bio *parent) |
8e654f8f | 818 | { |
23eddf39 | 819 | atomic64_inc(&zram->stats.bd_reads); |
4e3c87b9 | 820 | if (!parent) { |
a70aae12 CH |
821 | if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO))) |
822 | return -EIO; | |
4e3c87b9 | 823 | return read_from_bdev_sync(zram, page, entry); |
a70aae12 | 824 | } |
0cd97a03 | 825 | read_from_bdev_async(zram, page, entry, parent); |
1e9460d1 | 826 | return 0; |
8e654f8f | 827 | } |
013bf95a | 828 | #else |
013bf95a | 829 | static inline void reset_bdev(struct zram *zram) {}; |
fd45af53 | 830 | static int read_from_bdev(struct zram *zram, struct page *page, |
4e3c87b9 | 831 | unsigned long entry, struct bio *parent) |
8e654f8f MK |
832 | { |
833 | return -EIO; | |
834 | } | |
7e529283 MK |
835 | |
836 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; | |
013bf95a MK |
837 | #endif |
838 | ||
c0265342 MK |
839 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
840 | ||
841 | static struct dentry *zram_debugfs_root; | |
842 | ||
843 | static void zram_debugfs_create(void) | |
844 | { | |
845 | zram_debugfs_root = debugfs_create_dir("zram", NULL); | |
846 | } | |
847 | ||
848 | static void zram_debugfs_destroy(void) | |
849 | { | |
850 | debugfs_remove_recursive(zram_debugfs_root); | |
851 | } | |
852 | ||
c0265342 MK |
853 | static ssize_t read_block_state(struct file *file, char __user *buf, |
854 | size_t count, loff_t *ppos) | |
855 | { | |
856 | char *kbuf; | |
857 | ssize_t index, written = 0; | |
858 | struct zram *zram = file->private_data; | |
859 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; | |
860 | struct timespec64 ts; | |
861 | ||
862 | kbuf = kvmalloc(count, GFP_KERNEL); | |
863 | if (!kbuf) | |
864 | return -ENOMEM; | |
865 | ||
866 | down_read(&zram->init_lock); | |
867 | if (!init_done(zram)) { | |
868 | up_read(&zram->init_lock); | |
869 | kvfree(kbuf); | |
870 | return -EINVAL; | |
871 | } | |
872 | ||
873 | for (index = *ppos; index < nr_pages; index++) { | |
874 | int copied; | |
875 | ||
876 | zram_slot_lock(zram, index); | |
877 | if (!zram_allocated(zram, index)) | |
878 | goto next; | |
879 | ||
880 | ts = ktime_to_timespec64(zram->table[index].ac_time); | |
881 | copied = snprintf(kbuf + written, count, | |
77db7bb5 | 882 | "%12zd %12lld.%06lu %c%c%c%c%c%c\n", |
c0265342 MK |
883 | index, (s64)ts.tv_sec, |
884 | ts.tv_nsec / NSEC_PER_USEC, | |
885 | zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', | |
886 | zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', | |
e82592c4 | 887 | zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', |
60e9b39e | 888 | zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.', |
77db7bb5 SS |
889 | zram_get_priority(zram, index) ? 'r' : '.', |
890 | zram_test_flag(zram, index, | |
891 | ZRAM_INCOMPRESSIBLE) ? 'n' : '.'); | |
c0265342 | 892 | |
a88e03cf | 893 | if (count <= copied) { |
c0265342 MK |
894 | zram_slot_unlock(zram, index); |
895 | break; | |
896 | } | |
897 | written += copied; | |
898 | count -= copied; | |
899 | next: | |
900 | zram_slot_unlock(zram, index); | |
901 | *ppos += 1; | |
902 | } | |
903 | ||
904 | up_read(&zram->init_lock); | |
905 | if (copy_to_user(buf, kbuf, written)) | |
906 | written = -EFAULT; | |
907 | kvfree(kbuf); | |
908 | ||
909 | return written; | |
910 | } | |
911 | ||
912 | static const struct file_operations proc_zram_block_state_op = { | |
913 | .open = simple_open, | |
914 | .read = read_block_state, | |
915 | .llseek = default_llseek, | |
916 | }; | |
917 | ||
918 | static void zram_debugfs_register(struct zram *zram) | |
919 | { | |
920 | if (!zram_debugfs_root) | |
921 | return; | |
922 | ||
923 | zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name, | |
924 | zram_debugfs_root); | |
925 | debugfs_create_file("block_state", 0400, zram->debugfs_dir, | |
926 | zram, &proc_zram_block_state_op); | |
927 | } | |
928 | ||
929 | static void zram_debugfs_unregister(struct zram *zram) | |
930 | { | |
931 | debugfs_remove_recursive(zram->debugfs_dir); | |
932 | } | |
933 | #else | |
934 | static void zram_debugfs_create(void) {}; | |
935 | static void zram_debugfs_destroy(void) {}; | |
c0265342 MK |
936 | static void zram_debugfs_register(struct zram *zram) {}; |
937 | static void zram_debugfs_unregister(struct zram *zram) {}; | |
938 | #endif | |
013bf95a | 939 | |
43209ea2 SS |
940 | /* |
941 | * We switched to per-cpu streams and this attr is not needed anymore. | |
942 | * However, we will keep it around for some time, because: | |
943 | * a) we may revert per-cpu streams in the future | |
944 | * b) it's visible to user space and we need to follow our 2 years | |
945 | * retirement rule; but we already have a number of 'soon to be | |
946 | * altered' attrs, so max_comp_streams need to wait for the next | |
947 | * layoff cycle. | |
948 | */ | |
522698d7 SS |
949 | static ssize_t max_comp_streams_show(struct device *dev, |
950 | struct device_attribute *attr, char *buf) | |
951 | { | |
43209ea2 | 952 | return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); |
522698d7 SS |
953 | } |
954 | ||
beca3ec7 SS |
955 | static ssize_t max_comp_streams_store(struct device *dev, |
956 | struct device_attribute *attr, const char *buf, size_t len) | |
957 | { | |
43209ea2 | 958 | return len; |
beca3ec7 SS |
959 | } |
960 | ||
001d9273 | 961 | static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) |
e46b8a03 | 962 | { |
001d9273 SS |
963 | /* Do not free statically defined compression algorithms */ |
964 | if (zram->comp_algs[prio] != default_compressor) | |
965 | kfree(zram->comp_algs[prio]); | |
966 | ||
967 | zram->comp_algs[prio] = alg; | |
968 | } | |
969 | ||
970 | static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf) | |
971 | { | |
972 | ssize_t sz; | |
e46b8a03 SS |
973 | |
974 | down_read(&zram->init_lock); | |
001d9273 | 975 | sz = zcomp_available_show(zram->comp_algs[prio], buf); |
e46b8a03 SS |
976 | up_read(&zram->init_lock); |
977 | ||
978 | return sz; | |
979 | } | |
980 | ||
001d9273 | 981 | static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) |
e46b8a03 | 982 | { |
7ac07a26 | 983 | char *compressor; |
4bbacd51 SS |
984 | size_t sz; |
985 | ||
7ac07a26 SS |
986 | sz = strlen(buf); |
987 | if (sz >= CRYPTO_MAX_ALG_NAME) | |
988 | return -E2BIG; | |
989 | ||
990 | compressor = kstrdup(buf, GFP_KERNEL); | |
991 | if (!compressor) | |
992 | return -ENOMEM; | |
993 | ||
415403be | 994 | /* ignore trailing newline */ |
415403be SS |
995 | if (sz > 0 && compressor[sz - 1] == '\n') |
996 | compressor[sz - 1] = 0x00; | |
997 | ||
7ac07a26 SS |
998 | if (!zcomp_available_algorithm(compressor)) { |
999 | kfree(compressor); | |
1d5b43bf | 1000 | return -EINVAL; |
7ac07a26 | 1001 | } |
1d5b43bf | 1002 | |
e46b8a03 SS |
1003 | down_write(&zram->init_lock); |
1004 | if (init_done(zram)) { | |
1005 | up_write(&zram->init_lock); | |
7ac07a26 | 1006 | kfree(compressor); |
e46b8a03 SS |
1007 | pr_info("Can't change algorithm for initialized device\n"); |
1008 | return -EBUSY; | |
1009 | } | |
4bbacd51 | 1010 | |
001d9273 | 1011 | comp_algorithm_set(zram, prio, compressor); |
e46b8a03 | 1012 | up_write(&zram->init_lock); |
001d9273 SS |
1013 | return 0; |
1014 | } | |
1015 | ||
1016 | static ssize_t comp_algorithm_show(struct device *dev, | |
1017 | struct device_attribute *attr, | |
1018 | char *buf) | |
1019 | { | |
1020 | struct zram *zram = dev_to_zram(dev); | |
1021 | ||
1022 | return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf); | |
1023 | } | |
1024 | ||
1025 | static ssize_t comp_algorithm_store(struct device *dev, | |
1026 | struct device_attribute *attr, | |
1027 | const char *buf, | |
1028 | size_t len) | |
1029 | { | |
1030 | struct zram *zram = dev_to_zram(dev); | |
1031 | int ret; | |
1032 | ||
1033 | ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf); | |
1034 | return ret ? ret : len; | |
e46b8a03 SS |
1035 | } |
1036 | ||
001d9273 SS |
1037 | #ifdef CONFIG_ZRAM_MULTI_COMP |
1038 | static ssize_t recomp_algorithm_show(struct device *dev, | |
1039 | struct device_attribute *attr, | |
1040 | char *buf) | |
1041 | { | |
1042 | struct zram *zram = dev_to_zram(dev); | |
1043 | ssize_t sz = 0; | |
1044 | u32 prio; | |
1045 | ||
1046 | for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { | |
1047 | if (!zram->comp_algs[prio]) | |
1048 | continue; | |
1049 | ||
1050 | sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio); | |
1051 | sz += __comp_algorithm_show(zram, prio, buf + sz); | |
1052 | } | |
1053 | ||
1054 | return sz; | |
1055 | } | |
1056 | ||
1057 | static ssize_t recomp_algorithm_store(struct device *dev, | |
1058 | struct device_attribute *attr, | |
1059 | const char *buf, | |
1060 | size_t len) | |
1061 | { | |
1062 | struct zram *zram = dev_to_zram(dev); | |
1063 | int prio = ZRAM_SECONDARY_COMP; | |
1064 | char *args, *param, *val; | |
1065 | char *alg = NULL; | |
1066 | int ret; | |
1067 | ||
1068 | args = skip_spaces(buf); | |
1069 | while (*args) { | |
1070 | args = next_arg(args, ¶m, &val); | |
1071 | ||
df32de14 | 1072 | if (!val || !*val) |
001d9273 SS |
1073 | return -EINVAL; |
1074 | ||
1075 | if (!strcmp(param, "algo")) { | |
1076 | alg = val; | |
1077 | continue; | |
1078 | } | |
1079 | ||
1080 | if (!strcmp(param, "priority")) { | |
1081 | ret = kstrtoint(val, 10, &prio); | |
1082 | if (ret) | |
1083 | return ret; | |
1084 | continue; | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | if (!alg) | |
1089 | return -EINVAL; | |
1090 | ||
1091 | if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS) | |
1092 | return -EINVAL; | |
1093 | ||
1094 | ret = __comp_algorithm_store(zram, prio, alg); | |
1095 | return ret ? ret : len; | |
1096 | } | |
1097 | #endif | |
1098 | ||
522698d7 SS |
1099 | static ssize_t compact_store(struct device *dev, |
1100 | struct device_attribute *attr, const char *buf, size_t len) | |
306b0c95 | 1101 | { |
522698d7 | 1102 | struct zram *zram = dev_to_zram(dev); |
306b0c95 | 1103 | |
522698d7 SS |
1104 | down_read(&zram->init_lock); |
1105 | if (!init_done(zram)) { | |
1106 | up_read(&zram->init_lock); | |
1107 | return -EINVAL; | |
1108 | } | |
306b0c95 | 1109 | |
beb6602c | 1110 | zs_compact(zram->mem_pool); |
522698d7 | 1111 | up_read(&zram->init_lock); |
d2d5e762 | 1112 | |
522698d7 | 1113 | return len; |
d2d5e762 WY |
1114 | } |
1115 | ||
522698d7 SS |
1116 | static ssize_t io_stat_show(struct device *dev, |
1117 | struct device_attribute *attr, char *buf) | |
d2d5e762 | 1118 | { |
522698d7 SS |
1119 | struct zram *zram = dev_to_zram(dev); |
1120 | ssize_t ret; | |
d2d5e762 | 1121 | |
522698d7 SS |
1122 | down_read(&zram->init_lock); |
1123 | ret = scnprintf(buf, PAGE_SIZE, | |
9fe95bab | 1124 | "%8llu %8llu 0 %8llu\n", |
522698d7 SS |
1125 | (u64)atomic64_read(&zram->stats.failed_reads), |
1126 | (u64)atomic64_read(&zram->stats.failed_writes), | |
522698d7 SS |
1127 | (u64)atomic64_read(&zram->stats.notify_free)); |
1128 | up_read(&zram->init_lock); | |
306b0c95 | 1129 | |
522698d7 | 1130 | return ret; |
9b3bb7ab SS |
1131 | } |
1132 | ||
522698d7 SS |
1133 | static ssize_t mm_stat_show(struct device *dev, |
1134 | struct device_attribute *attr, char *buf) | |
9b3bb7ab | 1135 | { |
522698d7 | 1136 | struct zram *zram = dev_to_zram(dev); |
7d3f3938 | 1137 | struct zs_pool_stats pool_stats; |
522698d7 SS |
1138 | u64 orig_size, mem_used = 0; |
1139 | long max_used; | |
1140 | ssize_t ret; | |
a539c72a | 1141 | |
7d3f3938 SS |
1142 | memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); |
1143 | ||
522698d7 | 1144 | down_read(&zram->init_lock); |
7d3f3938 | 1145 | if (init_done(zram)) { |
beb6602c MK |
1146 | mem_used = zs_get_total_pages(zram->mem_pool); |
1147 | zs_pool_stats(zram->mem_pool, &pool_stats); | |
7d3f3938 | 1148 | } |
9b3bb7ab | 1149 | |
522698d7 SS |
1150 | orig_size = atomic64_read(&zram->stats.pages_stored); |
1151 | max_used = atomic_long_read(&zram->stats.max_used_pages); | |
9b3bb7ab | 1152 | |
522698d7 | 1153 | ret = scnprintf(buf, PAGE_SIZE, |
194e28da | 1154 | "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n", |
522698d7 SS |
1155 | orig_size << PAGE_SHIFT, |
1156 | (u64)atomic64_read(&zram->stats.compr_data_size), | |
1157 | mem_used << PAGE_SHIFT, | |
1158 | zram->limit_pages << PAGE_SHIFT, | |
1159 | max_used << PAGE_SHIFT, | |
8e19d540 | 1160 | (u64)atomic64_read(&zram->stats.same_pages), |
23959281 | 1161 | atomic_long_read(&pool_stats.pages_compacted), |
194e28da MK |
1162 | (u64)atomic64_read(&zram->stats.huge_pages), |
1163 | (u64)atomic64_read(&zram->stats.huge_pages_since)); | |
522698d7 | 1164 | up_read(&zram->init_lock); |
9b3bb7ab | 1165 | |
522698d7 SS |
1166 | return ret; |
1167 | } | |
1168 | ||
23eddf39 | 1169 | #ifdef CONFIG_ZRAM_WRITEBACK |
bb416d18 | 1170 | #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12))) |
23eddf39 MK |
1171 | static ssize_t bd_stat_show(struct device *dev, |
1172 | struct device_attribute *attr, char *buf) | |
1173 | { | |
1174 | struct zram *zram = dev_to_zram(dev); | |
1175 | ssize_t ret; | |
1176 | ||
1177 | down_read(&zram->init_lock); | |
1178 | ret = scnprintf(buf, PAGE_SIZE, | |
1179 | "%8llu %8llu %8llu\n", | |
bb416d18 MK |
1180 | FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), |
1181 | FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), | |
1182 | FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); | |
23eddf39 MK |
1183 | up_read(&zram->init_lock); |
1184 | ||
1185 | return ret; | |
1186 | } | |
1187 | #endif | |
1188 | ||
623e47fc SS |
1189 | static ssize_t debug_stat_show(struct device *dev, |
1190 | struct device_attribute *attr, char *buf) | |
1191 | { | |
37887783 | 1192 | int version = 1; |
623e47fc SS |
1193 | struct zram *zram = dev_to_zram(dev); |
1194 | ssize_t ret; | |
1195 | ||
1196 | down_read(&zram->init_lock); | |
1197 | ret = scnprintf(buf, PAGE_SIZE, | |
37887783 | 1198 | "version: %d\n%8llu %8llu\n", |
623e47fc | 1199 | version, |
37887783 | 1200 | (u64)atomic64_read(&zram->stats.writestall), |
3c9959e0 | 1201 | (u64)atomic64_read(&zram->stats.miss_free)); |
623e47fc SS |
1202 | up_read(&zram->init_lock); |
1203 | ||
1204 | return ret; | |
1205 | } | |
1206 | ||
522698d7 SS |
1207 | static DEVICE_ATTR_RO(io_stat); |
1208 | static DEVICE_ATTR_RO(mm_stat); | |
23eddf39 MK |
1209 | #ifdef CONFIG_ZRAM_WRITEBACK |
1210 | static DEVICE_ATTR_RO(bd_stat); | |
1211 | #endif | |
623e47fc | 1212 | static DEVICE_ATTR_RO(debug_stat); |
522698d7 | 1213 | |
beb6602c | 1214 | static void zram_meta_free(struct zram *zram, u64 disksize) |
522698d7 SS |
1215 | { |
1216 | size_t num_pages = disksize >> PAGE_SHIFT; | |
1217 | size_t index; | |
1fec1172 GM |
1218 | |
1219 | /* Free all pages that are still in this zram device */ | |
302128dc MK |
1220 | for (index = 0; index < num_pages; index++) |
1221 | zram_free_page(zram, index); | |
1fec1172 | 1222 | |
beb6602c MK |
1223 | zs_destroy_pool(zram->mem_pool); |
1224 | vfree(zram->table); | |
9b3bb7ab SS |
1225 | } |
1226 | ||
beb6602c | 1227 | static bool zram_meta_alloc(struct zram *zram, u64 disksize) |
9b3bb7ab SS |
1228 | { |
1229 | size_t num_pages; | |
9b3bb7ab | 1230 | |
9b3bb7ab | 1231 | num_pages = disksize >> PAGE_SHIFT; |
fad953ce | 1232 | zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); |
beb6602c MK |
1233 | if (!zram->table) |
1234 | return false; | |
9b3bb7ab | 1235 | |
beb6602c MK |
1236 | zram->mem_pool = zs_create_pool(zram->disk->disk_name); |
1237 | if (!zram->mem_pool) { | |
1238 | vfree(zram->table); | |
1239 | return false; | |
9b3bb7ab SS |
1240 | } |
1241 | ||
60f5921a SS |
1242 | if (!huge_class_size) |
1243 | huge_class_size = zs_huge_class_size(zram->mem_pool); | |
beb6602c | 1244 | return true; |
9b3bb7ab SS |
1245 | } |
1246 | ||
d2d5e762 WY |
1247 | /* |
1248 | * To protect concurrent access to the same index entry, | |
1249 | * caller should hold this table index entry's bit_spinlock to | |
1250 | * indicate this index entry is accessing. | |
1251 | */ | |
f1e3cfff | 1252 | static void zram_free_page(struct zram *zram, size_t index) |
306b0c95 | 1253 | { |
db8ffbd4 MK |
1254 | unsigned long handle; |
1255 | ||
a7a03505 | 1256 | #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME |
7e529283 MK |
1257 | zram->table[index].ac_time = 0; |
1258 | #endif | |
e82592c4 MK |
1259 | if (zram_test_flag(zram, index, ZRAM_IDLE)) |
1260 | zram_clear_flag(zram, index, ZRAM_IDLE); | |
1261 | ||
89e85bce MK |
1262 | if (zram_test_flag(zram, index, ZRAM_HUGE)) { |
1263 | zram_clear_flag(zram, index, ZRAM_HUGE); | |
1264 | atomic64_dec(&zram->stats.huge_pages); | |
1265 | } | |
1266 | ||
84b33bf7 SS |
1267 | if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) |
1268 | zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE); | |
1269 | ||
1270 | zram_set_priority(zram, index, 0); | |
1271 | ||
7e529283 MK |
1272 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
1273 | zram_clear_flag(zram, index, ZRAM_WB); | |
1274 | free_block_bdev(zram, zram_get_element(zram, index)); | |
1275 | goto out; | |
db8ffbd4 | 1276 | } |
306b0c95 | 1277 | |
8e19d540 | 1278 | /* |
1279 | * No memory is allocated for same element filled pages. | |
1280 | * Simply clear same page flag. | |
1281 | */ | |
beb6602c MK |
1282 | if (zram_test_flag(zram, index, ZRAM_SAME)) { |
1283 | zram_clear_flag(zram, index, ZRAM_SAME); | |
8e19d540 | 1284 | atomic64_dec(&zram->stats.same_pages); |
7e529283 | 1285 | goto out; |
306b0c95 NG |
1286 | } |
1287 | ||
db8ffbd4 | 1288 | handle = zram_get_handle(zram, index); |
8e19d540 | 1289 | if (!handle) |
1290 | return; | |
1291 | ||
beb6602c | 1292 | zs_free(zram->mem_pool, handle); |
306b0c95 | 1293 | |
beb6602c | 1294 | atomic64_sub(zram_get_obj_size(zram, index), |
d2d5e762 | 1295 | &zram->stats.compr_data_size); |
7e529283 | 1296 | out: |
90a7806e | 1297 | atomic64_dec(&zram->stats.pages_stored); |
643ae61d | 1298 | zram_set_handle(zram, index, 0); |
beb6602c | 1299 | zram_set_obj_size(zram, index, 0); |
a939888e MK |
1300 | WARN_ON_ONCE(zram->table[index].flags & |
1301 | ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); | |
306b0c95 NG |
1302 | } |
1303 | ||
5561347a SS |
1304 | /* |
1305 | * Reads (decompresses if needed) a page from zspool (zsmalloc). | |
1306 | * Corresponding ZRAM slot should be locked. | |
1307 | */ | |
1308 | static int zram_read_from_zspool(struct zram *zram, struct page *page, | |
1309 | u32 index) | |
306b0c95 | 1310 | { |
0669d2b2 | 1311 | struct zcomp_strm *zstrm; |
92967471 | 1312 | unsigned long handle; |
ebaf9ab5 | 1313 | unsigned int size; |
1f7319c7 | 1314 | void *src, *dst; |
84b33bf7 | 1315 | u32 prio; |
0669d2b2 | 1316 | int ret; |
1f7319c7 | 1317 | |
643ae61d | 1318 | handle = zram_get_handle(zram, index); |
ae94264e MK |
1319 | if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { |
1320 | unsigned long value; | |
1321 | void *mem; | |
1322 | ||
1323 | value = handle ? zram_get_element(zram, index) : 0; | |
73829b71 | 1324 | mem = kmap_local_page(page); |
ae94264e | 1325 | zram_fill_page(mem, PAGE_SIZE, value); |
73829b71 | 1326 | kunmap_local(mem); |
ae94264e MK |
1327 | return 0; |
1328 | } | |
1329 | ||
beb6602c | 1330 | size = zram_get_obj_size(zram, index); |
306b0c95 | 1331 | |
84b33bf7 SS |
1332 | if (size != PAGE_SIZE) { |
1333 | prio = zram_get_priority(zram, index); | |
1334 | zstrm = zcomp_stream_get(zram->comps[prio]); | |
1335 | } | |
0669d2b2 | 1336 | |
beb6602c | 1337 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
ebaf9ab5 | 1338 | if (size == PAGE_SIZE) { |
73829b71 | 1339 | dst = kmap_local_page(page); |
1f7319c7 | 1340 | memcpy(dst, src, PAGE_SIZE); |
73829b71 | 1341 | kunmap_local(dst); |
1f7319c7 | 1342 | ret = 0; |
ebaf9ab5 | 1343 | } else { |
73829b71 | 1344 | dst = kmap_local_page(page); |
1f7319c7 | 1345 | ret = zcomp_decompress(zstrm, src, size, dst); |
73829b71 | 1346 | kunmap_local(dst); |
84b33bf7 | 1347 | zcomp_stream_put(zram->comps[prio]); |
ebaf9ab5 | 1348 | } |
beb6602c | 1349 | zs_unmap_object(zram->mem_pool, handle); |
5561347a SS |
1350 | return ret; |
1351 | } | |
1352 | ||
ffb0a9e6 | 1353 | static int zram_read_page(struct zram *zram, struct page *page, u32 index, |
4e3c87b9 | 1354 | struct bio *parent) |
5561347a SS |
1355 | { |
1356 | int ret; | |
1357 | ||
1358 | zram_slot_lock(zram, index); | |
1359 | if (!zram_test_flag(zram, index, ZRAM_WB)) { | |
1360 | /* Slot should be locked through out the function call */ | |
1361 | ret = zram_read_from_zspool(zram, page, index); | |
1362 | zram_slot_unlock(zram, index); | |
1363 | } else { | |
fd45af53 CH |
1364 | /* |
1365 | * The slot should be unlocked before reading from the backing | |
1366 | * device. | |
1367 | */ | |
5561347a SS |
1368 | zram_slot_unlock(zram, index); |
1369 | ||
fd45af53 | 1370 | ret = read_from_bdev(zram, page, zram_get_element(zram, index), |
4e3c87b9 | 1371 | parent); |
5561347a | 1372 | } |
a1dd52af | 1373 | |
8c921b2b | 1374 | /* Should NEVER happen. Return bio error if it does. */ |
5561347a | 1375 | if (WARN_ON(ret < 0)) |
8c921b2b | 1376 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
306b0c95 | 1377 | |
1f7319c7 | 1378 | return ret; |
306b0c95 NG |
1379 | } |
1380 | ||
889ae916 CH |
1381 | /* |
1382 | * Use a temporary buffer to decompress the page, as the decompressor | |
1383 | * always expects a full page for the output. | |
1384 | */ | |
1385 | static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec, | |
4e3c87b9 | 1386 | u32 index, int offset) |
924bd88d | 1387 | { |
889ae916 | 1388 | struct page *page = alloc_page(GFP_NOIO); |
924bd88d | 1389 | int ret; |
37b51fdd | 1390 | |
889ae916 CH |
1391 | if (!page) |
1392 | return -ENOMEM; | |
4e3c87b9 | 1393 | ret = zram_read_page(zram, page, index, NULL); |
889ae916 | 1394 | if (likely(!ret)) |
f575a5ad | 1395 | memcpy_to_bvec(bvec, page_address(page) + offset); |
889ae916 | 1396 | __free_page(page); |
37b51fdd | 1397 | return ret; |
924bd88d | 1398 | } |
37b51fdd | 1399 | |
889ae916 CH |
1400 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
1401 | u32 index, int offset, struct bio *bio) | |
1402 | { | |
37b51fdd | 1403 | if (is_partial_io(bvec)) |
4e3c87b9 CH |
1404 | return zram_bvec_read_partial(zram, bvec, index, offset); |
1405 | return zram_read_page(zram, bvec->bv_page, index, bio); | |
924bd88d JM |
1406 | } |
1407 | ||
6aa4b839 | 1408 | static int zram_write_page(struct zram *zram, struct page *page, u32 index) |
306b0c95 | 1409 | { |
ae85a807 | 1410 | int ret = 0; |
1f7319c7 | 1411 | unsigned long alloced_pages; |
37887783 | 1412 | unsigned long handle = -ENOMEM; |
97ec7c8b MK |
1413 | unsigned int comp_len = 0; |
1414 | void *src, *dst, *mem; | |
1415 | struct zcomp_strm *zstrm; | |
97ec7c8b MK |
1416 | unsigned long element = 0; |
1417 | enum zram_pageflags flags = 0; | |
1418 | ||
73829b71 | 1419 | mem = kmap_local_page(page); |
97ec7c8b | 1420 | if (page_same_filled(mem, &element)) { |
73829b71 | 1421 | kunmap_local(mem); |
97ec7c8b MK |
1422 | /* Free memory associated with this sector now. */ |
1423 | flags = ZRAM_SAME; | |
1424 | atomic64_inc(&zram->stats.same_pages); | |
1425 | goto out; | |
1426 | } | |
73829b71 | 1427 | kunmap_local(mem); |
924bd88d | 1428 | |
37887783 | 1429 | compress_again: |
7ac07a26 | 1430 | zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); |
73829b71 | 1431 | src = kmap_local_page(page); |
97ec7c8b | 1432 | ret = zcomp_compress(zstrm, src, &comp_len); |
73829b71 | 1433 | kunmap_local(src); |
306b0c95 | 1434 | |
b7ca232e | 1435 | if (unlikely(ret)) { |
7ac07a26 | 1436 | zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); |
8c921b2b | 1437 | pr_err("Compression failed! err=%d\n", ret); |
37887783 | 1438 | zs_free(zram->mem_pool, handle); |
1f7319c7 | 1439 | return ret; |
8c921b2b | 1440 | } |
da9556a2 | 1441 | |
a939888e | 1442 | if (comp_len >= huge_class_size) |
89e85bce | 1443 | comp_len = PAGE_SIZE; |
37887783 JS |
1444 | /* |
1445 | * handle allocation has 2 paths: | |
1446 | * a) fast path is executed with preemption disabled (for | |
1447 | * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, | |
1448 | * since we can't sleep; | |
1449 | * b) slow path enables preemption and attempts to allocate | |
1450 | * the page with __GFP_DIRECT_RECLAIM bit set. we have to | |
1451 | * put per-cpu compression stream and, thus, to re-do | |
1452 | * the compression once handle is allocated. | |
1453 | * | |
1454 | * if we have a 'non-null' handle here then we are coming | |
1455 | * from the slow path and handle has already been allocated. | |
1456 | */ | |
f24ee92c | 1457 | if (IS_ERR_VALUE(handle)) |
37887783 JS |
1458 | handle = zs_malloc(zram->mem_pool, comp_len, |
1459 | __GFP_KSWAPD_RECLAIM | | |
1460 | __GFP_NOWARN | | |
1461 | __GFP_HIGHMEM | | |
1462 | __GFP_MOVABLE); | |
f24ee92c | 1463 | if (IS_ERR_VALUE(handle)) { |
7ac07a26 | 1464 | zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); |
37887783 JS |
1465 | atomic64_inc(&zram->stats.writestall); |
1466 | handle = zs_malloc(zram->mem_pool, comp_len, | |
1467 | GFP_NOIO | __GFP_HIGHMEM | | |
1468 | __GFP_MOVABLE); | |
f24ee92c | 1469 | if (IS_ERR_VALUE(handle)) |
641608f3 AR |
1470 | return PTR_ERR((void *)handle); |
1471 | ||
1472 | if (comp_len != PAGE_SIZE) | |
37887783 | 1473 | goto compress_again; |
641608f3 | 1474 | /* |
f9bceb2f SS |
1475 | * If the page is not compressible, you need to acquire the |
1476 | * lock and execute the code below. The zcomp_stream_get() | |
1477 | * call is needed to disable the cpu hotplug and grab the | |
1478 | * zstrm buffer back. It is necessary that the dereferencing | |
1479 | * of the zstrm variable below occurs correctly. | |
641608f3 | 1480 | */ |
7ac07a26 | 1481 | zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); |
8c921b2b | 1482 | } |
9ada9da9 | 1483 | |
beb6602c | 1484 | alloced_pages = zs_get_total_pages(zram->mem_pool); |
12372755 SS |
1485 | update_used_max(zram, alloced_pages); |
1486 | ||
461a8eee | 1487 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
7ac07a26 | 1488 | zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); |
beb6602c | 1489 | zs_free(zram->mem_pool, handle); |
1f7319c7 MK |
1490 | return -ENOMEM; |
1491 | } | |
1492 | ||
beb6602c | 1493 | dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); |
1f7319c7 MK |
1494 | |
1495 | src = zstrm->buffer; | |
1496 | if (comp_len == PAGE_SIZE) | |
73829b71 | 1497 | src = kmap_local_page(page); |
1f7319c7 MK |
1498 | memcpy(dst, src, comp_len); |
1499 | if (comp_len == PAGE_SIZE) | |
73829b71 | 1500 | kunmap_local(src); |
306b0c95 | 1501 | |
7ac07a26 | 1502 | zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); |
beb6602c | 1503 | zs_unmap_object(zram->mem_pool, handle); |
4ebbe7f7 MK |
1504 | atomic64_add(comp_len, &zram->stats.compr_data_size); |
1505 | out: | |
f40ac2ae SS |
1506 | /* |
1507 | * Free memory associated with this sector | |
1508 | * before overwriting unused sectors. | |
1509 | */ | |
86c49814 | 1510 | zram_slot_lock(zram, index); |
f40ac2ae | 1511 | zram_free_page(zram, index); |
db8ffbd4 | 1512 | |
89e85bce MK |
1513 | if (comp_len == PAGE_SIZE) { |
1514 | zram_set_flag(zram, index, ZRAM_HUGE); | |
1515 | atomic64_inc(&zram->stats.huge_pages); | |
194e28da | 1516 | atomic64_inc(&zram->stats.huge_pages_since); |
89e85bce MK |
1517 | } |
1518 | ||
db8ffbd4 MK |
1519 | if (flags) { |
1520 | zram_set_flag(zram, index, flags); | |
4ebbe7f7 | 1521 | zram_set_element(zram, index, element); |
db8ffbd4 | 1522 | } else { |
4ebbe7f7 MK |
1523 | zram_set_handle(zram, index, handle); |
1524 | zram_set_obj_size(zram, index, comp_len); | |
1525 | } | |
86c49814 | 1526 | zram_slot_unlock(zram, index); |
306b0c95 | 1527 | |
8c921b2b | 1528 | /* Update stats */ |
90a7806e | 1529 | atomic64_inc(&zram->stats.pages_stored); |
ae85a807 | 1530 | return ret; |
1f7319c7 MK |
1531 | } |
1532 | ||
a0b81ae7 CH |
1533 | /* |
1534 | * This is a partial IO. Read the full page before writing the changes. | |
1535 | */ | |
1536 | static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec, | |
1537 | u32 index, int offset, struct bio *bio) | |
1f7319c7 | 1538 | { |
a0b81ae7 | 1539 | struct page *page = alloc_page(GFP_NOIO); |
1f7319c7 | 1540 | int ret; |
1f7319c7 | 1541 | |
a0b81ae7 CH |
1542 | if (!page) |
1543 | return -ENOMEM; | |
1f7319c7 | 1544 | |
4e3c87b9 | 1545 | ret = zram_read_page(zram, page, index, bio); |
a0b81ae7 | 1546 | if (!ret) { |
f575a5ad | 1547 | memcpy_from_bvec(page_address(page) + offset, bvec); |
a0b81ae7 | 1548 | ret = zram_write_page(zram, page, index); |
1f7319c7 | 1549 | } |
a0b81ae7 CH |
1550 | __free_page(page); |
1551 | return ret; | |
1552 | } | |
1f7319c7 | 1553 | |
a0b81ae7 CH |
1554 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
1555 | u32 index, int offset, struct bio *bio) | |
1556 | { | |
397c6066 | 1557 | if (is_partial_io(bvec)) |
a0b81ae7 CH |
1558 | return zram_bvec_write_partial(zram, bvec, index, offset, bio); |
1559 | return zram_write_page(zram, bvec->bv_page, index); | |
8c921b2b JM |
1560 | } |
1561 | ||
84b33bf7 SS |
1562 | #ifdef CONFIG_ZRAM_MULTI_COMP |
1563 | /* | |
1564 | * This function will decompress (unless it's ZRAM_HUGE) the page and then | |
1565 | * attempt to compress it using provided compression algorithm priority | |
1566 | * (which is potentially more effective). | |
1567 | * | |
1568 | * Corresponding ZRAM slot should be locked. | |
1569 | */ | |
1570 | static int zram_recompress(struct zram *zram, u32 index, struct page *page, | |
1571 | u32 threshold, u32 prio, u32 prio_max) | |
1572 | { | |
1573 | struct zcomp_strm *zstrm = NULL; | |
1574 | unsigned long handle_old; | |
1575 | unsigned long handle_new; | |
1576 | unsigned int comp_len_old; | |
1577 | unsigned int comp_len_new; | |
7c2af309 AR |
1578 | unsigned int class_index_old; |
1579 | unsigned int class_index_new; | |
a55cf964 | 1580 | u32 num_recomps = 0; |
84b33bf7 SS |
1581 | void *src, *dst; |
1582 | int ret; | |
1583 | ||
1584 | handle_old = zram_get_handle(zram, index); | |
1585 | if (!handle_old) | |
1586 | return -EINVAL; | |
1587 | ||
1588 | comp_len_old = zram_get_obj_size(zram, index); | |
1589 | /* | |
1590 | * Do not recompress objects that are already "small enough". | |
1591 | */ | |
1592 | if (comp_len_old < threshold) | |
1593 | return 0; | |
1594 | ||
1595 | ret = zram_read_from_zspool(zram, page, index); | |
1596 | if (ret) | |
1597 | return ret; | |
1598 | ||
7c2af309 | 1599 | class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); |
84b33bf7 SS |
1600 | /* |
1601 | * Iterate the secondary comp algorithms list (in order of priority) | |
1602 | * and try to recompress the page. | |
1603 | */ | |
1604 | for (; prio < prio_max; prio++) { | |
1605 | if (!zram->comps[prio]) | |
1606 | continue; | |
1607 | ||
1608 | /* | |
1609 | * Skip if the object is already re-compressed with a higher | |
1610 | * priority algorithm (or same algorithm). | |
1611 | */ | |
1612 | if (prio <= zram_get_priority(zram, index)) | |
1613 | continue; | |
1614 | ||
a55cf964 | 1615 | num_recomps++; |
84b33bf7 | 1616 | zstrm = zcomp_stream_get(zram->comps[prio]); |
73829b71 | 1617 | src = kmap_local_page(page); |
84b33bf7 | 1618 | ret = zcomp_compress(zstrm, src, &comp_len_new); |
73829b71 | 1619 | kunmap_local(src); |
84b33bf7 SS |
1620 | |
1621 | if (ret) { | |
1622 | zcomp_stream_put(zram->comps[prio]); | |
1623 | return ret; | |
1624 | } | |
1625 | ||
7c2af309 AR |
1626 | class_index_new = zs_lookup_class_index(zram->mem_pool, |
1627 | comp_len_new); | |
1628 | ||
84b33bf7 | 1629 | /* Continue until we make progress */ |
4942cf6a | 1630 | if (class_index_new >= class_index_old || |
84b33bf7 SS |
1631 | (threshold && comp_len_new >= threshold)) { |
1632 | zcomp_stream_put(zram->comps[prio]); | |
1633 | continue; | |
1634 | } | |
1635 | ||
1636 | /* Recompression was successful so break out */ | |
1637 | break; | |
1638 | } | |
1639 | ||
1640 | /* | |
1641 | * We did not try to recompress, e.g. when we have only one | |
1642 | * secondary algorithm and the page is already recompressed | |
1643 | * using that algorithm | |
1644 | */ | |
1645 | if (!zstrm) | |
1646 | return 0; | |
1647 | ||
4942cf6a | 1648 | if (class_index_new >= class_index_old) { |
a55cf964 SS |
1649 | /* |
1650 | * Secondary algorithms failed to re-compress the page | |
1651 | * in a way that would save memory, mark the object as | |
1652 | * incompressible so that we will not try to compress | |
1653 | * it again. | |
1654 | * | |
1655 | * We need to make sure that all secondary algorithms have | |
1656 | * failed, so we test if the number of recompressions matches | |
1657 | * the number of active secondary algorithms. | |
1658 | */ | |
1659 | if (num_recomps == zram->num_active_comps - 1) | |
1660 | zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); | |
84b33bf7 SS |
1661 | return 0; |
1662 | } | |
1663 | ||
1664 | /* Successful recompression but above threshold */ | |
1665 | if (threshold && comp_len_new >= threshold) | |
1666 | return 0; | |
1667 | ||
1668 | /* | |
1669 | * No direct reclaim (slow path) for handle allocation and no | |
6aa4b839 | 1670 | * re-compression attempt (unlike in zram_write_bvec()) since |
84b33bf7 SS |
1671 | * we already have stored that object in zsmalloc. If we cannot |
1672 | * alloc memory for recompressed object then we bail out and | |
1673 | * simply keep the old (existing) object in zsmalloc. | |
1674 | */ | |
1675 | handle_new = zs_malloc(zram->mem_pool, comp_len_new, | |
1676 | __GFP_KSWAPD_RECLAIM | | |
1677 | __GFP_NOWARN | | |
1678 | __GFP_HIGHMEM | | |
1679 | __GFP_MOVABLE); | |
1680 | if (IS_ERR_VALUE(handle_new)) { | |
1681 | zcomp_stream_put(zram->comps[prio]); | |
1682 | return PTR_ERR((void *)handle_new); | |
1683 | } | |
1684 | ||
1685 | dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); | |
1686 | memcpy(dst, zstrm->buffer, comp_len_new); | |
1687 | zcomp_stream_put(zram->comps[prio]); | |
1688 | ||
1689 | zs_unmap_object(zram->mem_pool, handle_new); | |
1690 | ||
1691 | zram_free_page(zram, index); | |
1692 | zram_set_handle(zram, index, handle_new); | |
1693 | zram_set_obj_size(zram, index, comp_len_new); | |
1694 | zram_set_priority(zram, index, prio); | |
1695 | ||
1696 | atomic64_add(comp_len_new, &zram->stats.compr_data_size); | |
1697 | atomic64_inc(&zram->stats.pages_stored); | |
1698 | ||
1699 | return 0; | |
1700 | } | |
1701 | ||
1702 | #define RECOMPRESS_IDLE (1 << 0) | |
1703 | #define RECOMPRESS_HUGE (1 << 1) | |
1704 | ||
1705 | static ssize_t recompress_store(struct device *dev, | |
1706 | struct device_attribute *attr, | |
1707 | const char *buf, size_t len) | |
1708 | { | |
a55cf964 | 1709 | u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; |
84b33bf7 | 1710 | struct zram *zram = dev_to_zram(dev); |
84b33bf7 | 1711 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
a55cf964 SS |
1712 | char *args, *param, *val, *algo = NULL; |
1713 | u32 mode = 0, threshold = 0; | |
84b33bf7 SS |
1714 | unsigned long index; |
1715 | struct page *page; | |
1716 | ssize_t ret; | |
1717 | ||
1718 | args = skip_spaces(buf); | |
1719 | while (*args) { | |
1720 | args = next_arg(args, ¶m, &val); | |
1721 | ||
df32de14 | 1722 | if (!val || !*val) |
84b33bf7 SS |
1723 | return -EINVAL; |
1724 | ||
1725 | if (!strcmp(param, "type")) { | |
1726 | if (!strcmp(val, "idle")) | |
1727 | mode = RECOMPRESS_IDLE; | |
1728 | if (!strcmp(val, "huge")) | |
1729 | mode = RECOMPRESS_HUGE; | |
1730 | if (!strcmp(val, "huge_idle")) | |
1731 | mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE; | |
1732 | continue; | |
1733 | } | |
1734 | ||
1735 | if (!strcmp(param, "threshold")) { | |
1736 | /* | |
1737 | * We will re-compress only idle objects equal or | |
1738 | * greater in size than watermark. | |
1739 | */ | |
1740 | ret = kstrtouint(val, 10, &threshold); | |
1741 | if (ret) | |
1742 | return ret; | |
1743 | continue; | |
1744 | } | |
a55cf964 SS |
1745 | |
1746 | if (!strcmp(param, "algo")) { | |
1747 | algo = val; | |
1748 | continue; | |
1749 | } | |
84b33bf7 SS |
1750 | } |
1751 | ||
cb0551ad | 1752 | if (threshold >= huge_class_size) |
84b33bf7 SS |
1753 | return -EINVAL; |
1754 | ||
1755 | down_read(&zram->init_lock); | |
1756 | if (!init_done(zram)) { | |
1757 | ret = -EINVAL; | |
1758 | goto release_init_lock; | |
1759 | } | |
1760 | ||
a55cf964 SS |
1761 | if (algo) { |
1762 | bool found = false; | |
1763 | ||
1764 | for (; prio < ZRAM_MAX_COMPS; prio++) { | |
1765 | if (!zram->comp_algs[prio]) | |
1766 | continue; | |
1767 | ||
1768 | if (!strcmp(zram->comp_algs[prio], algo)) { | |
1769 | prio_max = min(prio + 1, ZRAM_MAX_COMPS); | |
1770 | found = true; | |
1771 | break; | |
1772 | } | |
1773 | } | |
1774 | ||
1775 | if (!found) { | |
1776 | ret = -EINVAL; | |
1777 | goto release_init_lock; | |
1778 | } | |
1779 | } | |
1780 | ||
84b33bf7 SS |
1781 | page = alloc_page(GFP_KERNEL); |
1782 | if (!page) { | |
1783 | ret = -ENOMEM; | |
1784 | goto release_init_lock; | |
1785 | } | |
1786 | ||
1787 | ret = len; | |
1788 | for (index = 0; index < nr_pages; index++) { | |
1789 | int err = 0; | |
1790 | ||
1791 | zram_slot_lock(zram, index); | |
1792 | ||
1793 | if (!zram_allocated(zram, index)) | |
1794 | goto next; | |
1795 | ||
1796 | if (mode & RECOMPRESS_IDLE && | |
1797 | !zram_test_flag(zram, index, ZRAM_IDLE)) | |
1798 | goto next; | |
1799 | ||
1800 | if (mode & RECOMPRESS_HUGE && | |
1801 | !zram_test_flag(zram, index, ZRAM_HUGE)) | |
1802 | goto next; | |
1803 | ||
1804 | if (zram_test_flag(zram, index, ZRAM_WB) || | |
1805 | zram_test_flag(zram, index, ZRAM_UNDER_WB) || | |
1806 | zram_test_flag(zram, index, ZRAM_SAME) || | |
1807 | zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) | |
1808 | goto next; | |
1809 | ||
1810 | err = zram_recompress(zram, index, page, threshold, | |
a55cf964 | 1811 | prio, prio_max); |
84b33bf7 SS |
1812 | next: |
1813 | zram_slot_unlock(zram, index); | |
1814 | if (err) { | |
1815 | ret = err; | |
1816 | break; | |
1817 | } | |
1818 | ||
1819 | cond_resched(); | |
1820 | } | |
1821 | ||
1822 | __free_page(page); | |
1823 | ||
1824 | release_init_lock: | |
1825 | up_read(&zram->init_lock); | |
1826 | return ret; | |
1827 | } | |
1828 | #endif | |
1829 | ||
0120dd6e | 1830 | static void zram_bio_discard(struct zram *zram, struct bio *bio) |
f4659d8e JK |
1831 | { |
1832 | size_t n = bio->bi_iter.bi_size; | |
0120dd6e CH |
1833 | u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1834 | u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << | |
1835 | SECTOR_SHIFT; | |
f4659d8e JK |
1836 | |
1837 | /* | |
1838 | * zram manages data in physical block size units. Because logical block | |
1839 | * size isn't identical with physical block size on some arch, we | |
1840 | * could get a discard request pointing to a specific offset within a | |
1841 | * certain physical block. Although we can handle this request by | |
1842 | * reading that physiclal block and decompressing and partially zeroing | |
1843 | * and re-compressing and then re-storing it, this isn't reasonable | |
1844 | * because our intent with a discard request is to save memory. So | |
1845 | * skipping this logical block is appropriate here. | |
1846 | */ | |
1847 | if (offset) { | |
38515c73 | 1848 | if (n <= (PAGE_SIZE - offset)) |
f4659d8e JK |
1849 | return; |
1850 | ||
38515c73 | 1851 | n -= (PAGE_SIZE - offset); |
f4659d8e JK |
1852 | index++; |
1853 | } | |
1854 | ||
1855 | while (n >= PAGE_SIZE) { | |
86c49814 | 1856 | zram_slot_lock(zram, index); |
f4659d8e | 1857 | zram_free_page(zram, index); |
86c49814 | 1858 | zram_slot_unlock(zram, index); |
015254da | 1859 | atomic64_inc(&zram->stats.notify_free); |
f4659d8e JK |
1860 | index++; |
1861 | n -= PAGE_SIZE; | |
1862 | } | |
0120dd6e CH |
1863 | |
1864 | bio_endio(bio); | |
f4659d8e JK |
1865 | } |
1866 | ||
82ca875d | 1867 | static void zram_bio_read(struct zram *zram, struct bio *bio) |
9b3bb7ab | 1868 | { |
95848dcb CH |
1869 | unsigned long start_time = bio_start_io_acct(bio); |
1870 | struct bvec_iter iter = bio->bi_iter; | |
9b3bb7ab | 1871 | |
95848dcb | 1872 | do { |
82ca875d CH |
1873 | u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1874 | u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << | |
1875 | SECTOR_SHIFT; | |
95848dcb CH |
1876 | struct bio_vec bv = bio_iter_iovec(bio, iter); |
1877 | ||
1878 | bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); | |
d7eac6b6 | 1879 | |
82ca875d | 1880 | if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) { |
522698d7 | 1881 | atomic64_inc(&zram->stats.failed_reads); |
82ca875d CH |
1882 | bio->bi_status = BLK_STS_IOERR; |
1883 | break; | |
57de7bd8 | 1884 | } |
82ca875d | 1885 | flush_dcache_page(bv.bv_page); |
9b3bb7ab | 1886 | |
82ca875d CH |
1887 | zram_slot_lock(zram, index); |
1888 | zram_accessed(zram, index); | |
1889 | zram_slot_unlock(zram, index); | |
95848dcb CH |
1890 | |
1891 | bio_advance_iter_single(bio, &iter, bv.bv_len); | |
1892 | } while (iter.bi_size); | |
1893 | ||
82ca875d CH |
1894 | bio_end_io_acct(bio, start_time); |
1895 | bio_endio(bio); | |
8c921b2b JM |
1896 | } |
1897 | ||
82ca875d | 1898 | static void zram_bio_write(struct zram *zram, struct bio *bio) |
8c921b2b | 1899 | { |
95848dcb CH |
1900 | unsigned long start_time = bio_start_io_acct(bio); |
1901 | struct bvec_iter iter = bio->bi_iter; | |
8c921b2b | 1902 | |
95848dcb | 1903 | do { |
af8b04c6 CH |
1904 | u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1905 | u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << | |
1906 | SECTOR_SHIFT; | |
95848dcb CH |
1907 | struct bio_vec bv = bio_iter_iovec(bio, iter); |
1908 | ||
1909 | bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); | |
924bd88d | 1910 | |
82ca875d CH |
1911 | if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) { |
1912 | atomic64_inc(&zram->stats.failed_writes); | |
af8b04c6 CH |
1913 | bio->bi_status = BLK_STS_IOERR; |
1914 | break; | |
1915 | } | |
924bd88d | 1916 | |
82ca875d CH |
1917 | zram_slot_lock(zram, index); |
1918 | zram_accessed(zram, index); | |
1919 | zram_slot_unlock(zram, index); | |
95848dcb CH |
1920 | |
1921 | bio_advance_iter_single(bio, &iter, bv.bv_len); | |
1922 | } while (iter.bi_size); | |
1923 | ||
d7614e44 | 1924 | bio_end_io_acct(bio, start_time); |
4246a0b6 | 1925 | bio_endio(bio); |
306b0c95 NG |
1926 | } |
1927 | ||
306b0c95 | 1928 | /* |
f1e3cfff | 1929 | * Handler function for all zram I/O requests. |
306b0c95 | 1930 | */ |
3e08773c | 1931 | static void zram_submit_bio(struct bio *bio) |
306b0c95 | 1932 | { |
309dca30 | 1933 | struct zram *zram = bio->bi_bdev->bd_disk->private_data; |
306b0c95 | 1934 | |
d6eea009 CH |
1935 | switch (bio_op(bio)) { |
1936 | case REQ_OP_READ: | |
82ca875d CH |
1937 | zram_bio_read(zram, bio); |
1938 | break; | |
d6eea009 | 1939 | case REQ_OP_WRITE: |
82ca875d | 1940 | zram_bio_write(zram, bio); |
d6eea009 CH |
1941 | break; |
1942 | case REQ_OP_DISCARD: | |
1943 | case REQ_OP_WRITE_ZEROES: | |
1944 | zram_bio_discard(zram, bio); | |
1945 | break; | |
1946 | default: | |
1947 | WARN_ON_ONCE(1); | |
1948 | bio_endio(bio); | |
6642a67c | 1949 | } |
306b0c95 NG |
1950 | } |
1951 | ||
2ccbec05 NG |
1952 | static void zram_slot_free_notify(struct block_device *bdev, |
1953 | unsigned long index) | |
107c161b | 1954 | { |
f1e3cfff | 1955 | struct zram *zram; |
107c161b | 1956 | |
f1e3cfff | 1957 | zram = bdev->bd_disk->private_data; |
a0c516cb | 1958 | |
3c9959e0 MK |
1959 | atomic64_inc(&zram->stats.notify_free); |
1960 | if (!zram_slot_trylock(zram, index)) { | |
1961 | atomic64_inc(&zram->stats.miss_free); | |
1962 | return; | |
1963 | } | |
1964 | ||
f614a9f4 | 1965 | zram_free_page(zram, index); |
86c49814 | 1966 | zram_slot_unlock(zram, index); |
107c161b NG |
1967 | } |
1968 | ||
7ac07a26 SS |
1969 | static void zram_destroy_comps(struct zram *zram) |
1970 | { | |
1971 | u32 prio; | |
1972 | ||
1973 | for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { | |
1974 | struct zcomp *comp = zram->comps[prio]; | |
1975 | ||
1976 | zram->comps[prio] = NULL; | |
1977 | if (!comp) | |
1978 | continue; | |
1979 | zcomp_destroy(comp); | |
a55cf964 | 1980 | zram->num_active_comps--; |
7ac07a26 SS |
1981 | } |
1982 | } | |
1983 | ||
522698d7 SS |
1984 | static void zram_reset_device(struct zram *zram) |
1985 | { | |
522698d7 | 1986 | down_write(&zram->init_lock); |
9b3bb7ab | 1987 | |
522698d7 SS |
1988 | zram->limit_pages = 0; |
1989 | ||
1990 | if (!init_done(zram)) { | |
1991 | up_write(&zram->init_lock); | |
1992 | return; | |
1993 | } | |
1994 | ||
6e017a39 | 1995 | set_capacity_and_notify(zram->disk, 0); |
8446fe92 | 1996 | part_stat_set_all(zram->disk->part0, 0); |
522698d7 | 1997 | |
522698d7 | 1998 | /* I/O operation under all of CPU are done so let's free */ |
6d2453c3 SS |
1999 | zram_meta_free(zram, zram->disksize); |
2000 | zram->disksize = 0; | |
7ac07a26 | 2001 | zram_destroy_comps(zram); |
302128dc | 2002 | memset(&zram->stats, 0, sizeof(zram->stats)); |
013bf95a | 2003 | reset_bdev(zram); |
6f163779 | 2004 | |
7ac07a26 | 2005 | comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); |
6f163779 | 2006 | up_write(&zram->init_lock); |
522698d7 SS |
2007 | } |
2008 | ||
2009 | static ssize_t disksize_store(struct device *dev, | |
2010 | struct device_attribute *attr, const char *buf, size_t len) | |
2f6a3bed | 2011 | { |
522698d7 SS |
2012 | u64 disksize; |
2013 | struct zcomp *comp; | |
2f6a3bed | 2014 | struct zram *zram = dev_to_zram(dev); |
522698d7 | 2015 | int err; |
7ac07a26 | 2016 | u32 prio; |
2f6a3bed | 2017 | |
522698d7 SS |
2018 | disksize = memparse(buf, NULL); |
2019 | if (!disksize) | |
2020 | return -EINVAL; | |
2f6a3bed | 2021 | |
beb6602c MK |
2022 | down_write(&zram->init_lock); |
2023 | if (init_done(zram)) { | |
2024 | pr_info("Cannot change disksize for initialized device\n"); | |
2025 | err = -EBUSY; | |
2026 | goto out_unlock; | |
2027 | } | |
2028 | ||
522698d7 | 2029 | disksize = PAGE_ALIGN(disksize); |
beb6602c MK |
2030 | if (!zram_meta_alloc(zram, disksize)) { |
2031 | err = -ENOMEM; | |
2032 | goto out_unlock; | |
2033 | } | |
522698d7 | 2034 | |
7ac07a26 SS |
2035 | for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { |
2036 | if (!zram->comp_algs[prio]) | |
2037 | continue; | |
2038 | ||
2039 | comp = zcomp_create(zram->comp_algs[prio]); | |
2040 | if (IS_ERR(comp)) { | |
2041 | pr_err("Cannot initialise %s compressing backend\n", | |
2042 | zram->comp_algs[prio]); | |
2043 | err = PTR_ERR(comp); | |
2044 | goto out_free_comps; | |
2045 | } | |
522698d7 | 2046 | |
7ac07a26 | 2047 | zram->comps[prio] = comp; |
a55cf964 | 2048 | zram->num_active_comps++; |
7ac07a26 | 2049 | } |
522698d7 | 2050 | zram->disksize = disksize; |
6e017a39 | 2051 | set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT); |
e7ccfc4c | 2052 | up_write(&zram->init_lock); |
522698d7 SS |
2053 | |
2054 | return len; | |
2055 | ||
7ac07a26 SS |
2056 | out_free_comps: |
2057 | zram_destroy_comps(zram); | |
beb6602c MK |
2058 | zram_meta_free(zram, disksize); |
2059 | out_unlock: | |
2060 | up_write(&zram->init_lock); | |
522698d7 | 2061 | return err; |
2f6a3bed SS |
2062 | } |
2063 | ||
522698d7 SS |
2064 | static ssize_t reset_store(struct device *dev, |
2065 | struct device_attribute *attr, const char *buf, size_t len) | |
4f2109f6 | 2066 | { |
522698d7 SS |
2067 | int ret; |
2068 | unsigned short do_reset; | |
2069 | struct zram *zram; | |
d666e20e | 2070 | struct gendisk *disk; |
4f2109f6 | 2071 | |
f405c445 SS |
2072 | ret = kstrtou16(buf, 10, &do_reset); |
2073 | if (ret) | |
2074 | return ret; | |
2075 | ||
2076 | if (!do_reset) | |
2077 | return -EINVAL; | |
2078 | ||
522698d7 | 2079 | zram = dev_to_zram(dev); |
d666e20e | 2080 | disk = zram->disk; |
4f2109f6 | 2081 | |
d666e20e | 2082 | mutex_lock(&disk->open_mutex); |
f405c445 | 2083 | /* Do not reset an active device or claimed device */ |
dbdc1be3 | 2084 | if (disk_openers(disk) || zram->claim) { |
d666e20e | 2085 | mutex_unlock(&disk->open_mutex); |
f405c445 | 2086 | return -EBUSY; |
522698d7 SS |
2087 | } |
2088 | ||
f405c445 SS |
2089 | /* From now on, anyone can't open /dev/zram[0-9] */ |
2090 | zram->claim = true; | |
d666e20e | 2091 | mutex_unlock(&disk->open_mutex); |
522698d7 | 2092 | |
f405c445 | 2093 | /* Make sure all the pending I/O are finished */ |
d666e20e | 2094 | sync_blockdev(disk->part0); |
522698d7 | 2095 | zram_reset_device(zram); |
522698d7 | 2096 | |
d666e20e | 2097 | mutex_lock(&disk->open_mutex); |
f405c445 | 2098 | zram->claim = false; |
d666e20e | 2099 | mutex_unlock(&disk->open_mutex); |
f405c445 | 2100 | |
522698d7 | 2101 | return len; |
f405c445 SS |
2102 | } |
2103 | ||
05bdb996 | 2104 | static int zram_open(struct gendisk *disk, blk_mode_t mode) |
f405c445 | 2105 | { |
d32e2bf8 | 2106 | struct zram *zram = disk->private_data; |
f405c445 | 2107 | |
d32e2bf8 | 2108 | WARN_ON(!mutex_is_locked(&disk->open_mutex)); |
f405c445 | 2109 | |
f405c445 SS |
2110 | /* zram was claimed to reset so open request fails */ |
2111 | if (zram->claim) | |
d32e2bf8 CH |
2112 | return -EBUSY; |
2113 | return 0; | |
4f2109f6 SS |
2114 | } |
2115 | ||
522698d7 | 2116 | static const struct block_device_operations zram_devops = { |
f405c445 | 2117 | .open = zram_open, |
c62b37d9 | 2118 | .submit_bio = zram_submit_bio, |
522698d7 | 2119 | .swap_slot_free_notify = zram_slot_free_notify, |
522698d7 SS |
2120 | .owner = THIS_MODULE |
2121 | }; | |
2122 | ||
2123 | static DEVICE_ATTR_WO(compact); | |
2124 | static DEVICE_ATTR_RW(disksize); | |
2125 | static DEVICE_ATTR_RO(initstate); | |
2126 | static DEVICE_ATTR_WO(reset); | |
c87d1655 SS |
2127 | static DEVICE_ATTR_WO(mem_limit); |
2128 | static DEVICE_ATTR_WO(mem_used_max); | |
e82592c4 | 2129 | static DEVICE_ATTR_WO(idle); |
522698d7 SS |
2130 | static DEVICE_ATTR_RW(max_comp_streams); |
2131 | static DEVICE_ATTR_RW(comp_algorithm); | |
013bf95a MK |
2132 | #ifdef CONFIG_ZRAM_WRITEBACK |
2133 | static DEVICE_ATTR_RW(backing_dev); | |
a939888e | 2134 | static DEVICE_ATTR_WO(writeback); |
bb416d18 | 2135 | static DEVICE_ATTR_RW(writeback_limit); |
1d69a3f8 | 2136 | static DEVICE_ATTR_RW(writeback_limit_enable); |
013bf95a | 2137 | #endif |
001d9273 SS |
2138 | #ifdef CONFIG_ZRAM_MULTI_COMP |
2139 | static DEVICE_ATTR_RW(recomp_algorithm); | |
84b33bf7 | 2140 | static DEVICE_ATTR_WO(recompress); |
001d9273 | 2141 | #endif |
a68eb3b6 | 2142 | |
9b3bb7ab SS |
2143 | static struct attribute *zram_disk_attrs[] = { |
2144 | &dev_attr_disksize.attr, | |
2145 | &dev_attr_initstate.attr, | |
2146 | &dev_attr_reset.attr, | |
99ebbd30 | 2147 | &dev_attr_compact.attr, |
9ada9da9 | 2148 | &dev_attr_mem_limit.attr, |
461a8eee | 2149 | &dev_attr_mem_used_max.attr, |
e82592c4 | 2150 | &dev_attr_idle.attr, |
beca3ec7 | 2151 | &dev_attr_max_comp_streams.attr, |
e46b8a03 | 2152 | &dev_attr_comp_algorithm.attr, |
013bf95a MK |
2153 | #ifdef CONFIG_ZRAM_WRITEBACK |
2154 | &dev_attr_backing_dev.attr, | |
a939888e | 2155 | &dev_attr_writeback.attr, |
bb416d18 | 2156 | &dev_attr_writeback_limit.attr, |
1d69a3f8 | 2157 | &dev_attr_writeback_limit_enable.attr, |
013bf95a | 2158 | #endif |
2f6a3bed | 2159 | &dev_attr_io_stat.attr, |
4f2109f6 | 2160 | &dev_attr_mm_stat.attr, |
23eddf39 MK |
2161 | #ifdef CONFIG_ZRAM_WRITEBACK |
2162 | &dev_attr_bd_stat.attr, | |
2163 | #endif | |
623e47fc | 2164 | &dev_attr_debug_stat.attr, |
001d9273 SS |
2165 | #ifdef CONFIG_ZRAM_MULTI_COMP |
2166 | &dev_attr_recomp_algorithm.attr, | |
84b33bf7 | 2167 | &dev_attr_recompress.attr, |
001d9273 | 2168 | #endif |
9b3bb7ab SS |
2169 | NULL, |
2170 | }; | |
2171 | ||
7f0d2672 | 2172 | ATTRIBUTE_GROUPS(zram_disk); |
98af4d4d | 2173 | |
92ff1528 SS |
2174 | /* |
2175 | * Allocate and initialize new zram device. the function returns | |
2176 | * '>= 0' device_id upon success, and negative value otherwise. | |
2177 | */ | |
2178 | static int zram_add(void) | |
306b0c95 | 2179 | { |
4190b3f2 CH |
2180 | struct queue_limits lim = { |
2181 | .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE, | |
2182 | /* | |
2183 | * To ensure that we always get PAGE_SIZE aligned and | |
2184 | * n*PAGE_SIZED sized I/O requests. | |
2185 | */ | |
2186 | .physical_block_size = PAGE_SIZE, | |
2187 | .io_min = PAGE_SIZE, | |
2188 | .io_opt = PAGE_SIZE, | |
2189 | .max_hw_discard_sectors = UINT_MAX, | |
2190 | /* | |
2191 | * zram_bio_discard() will clear all logical blocks if logical | |
2192 | * block size is identical with physical block size(PAGE_SIZE). | |
2193 | * But if it is different, we will skip discarding some parts of | |
2194 | * logical blocks in the part of the request range which isn't | |
2195 | * aligned to physical block size. So we can't ensure that all | |
2196 | * discarded logical blocks are zeroed. | |
2197 | */ | |
2198 | #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE | |
2199 | .max_write_zeroes_sectors = UINT_MAX, | |
2200 | #endif | |
2201 | }; | |
85508ec6 | 2202 | struct zram *zram; |
92ff1528 | 2203 | int ret, device_id; |
85508ec6 SS |
2204 | |
2205 | zram = kzalloc(sizeof(struct zram), GFP_KERNEL); | |
2206 | if (!zram) | |
2207 | return -ENOMEM; | |
2208 | ||
92ff1528 | 2209 | ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); |
85508ec6 SS |
2210 | if (ret < 0) |
2211 | goto out_free_dev; | |
92ff1528 | 2212 | device_id = ret; |
de1a21a0 | 2213 | |
0900beae | 2214 | init_rwsem(&zram->init_lock); |
1d69a3f8 MK |
2215 | #ifdef CONFIG_ZRAM_WRITEBACK |
2216 | spin_lock_init(&zram->wb_limit_lock); | |
2217 | #endif | |
306b0c95 | 2218 | |
85508ec6 | 2219 | /* gendisk structure */ |
4190b3f2 | 2220 | zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE); |
74fa8f9c | 2221 | if (IS_ERR(zram->disk)) { |
70864969 | 2222 | pr_err("Error allocating disk structure for device %d\n", |
306b0c95 | 2223 | device_id); |
74fa8f9c | 2224 | ret = PTR_ERR(zram->disk); |
7681750b | 2225 | goto out_free_idr; |
306b0c95 NG |
2226 | } |
2227 | ||
f1e3cfff NG |
2228 | zram->disk->major = zram_major; |
2229 | zram->disk->first_minor = device_id; | |
7681750b | 2230 | zram->disk->minors = 1; |
1ebe2e5f | 2231 | zram->disk->flags |= GENHD_FL_NO_PART; |
f1e3cfff | 2232 | zram->disk->fops = &zram_devops; |
f1e3cfff NG |
2233 | zram->disk->private_data = zram; |
2234 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
306b0c95 | 2235 | |
071acb30 | 2236 | /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */ |
f1e3cfff | 2237 | set_capacity(zram->disk, 0); |
b67d1ec1 | 2238 | /* zram devices sort of resembles non-rotational disks */ |
8b904b5b | 2239 | blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); |
3222d8c2 | 2240 | blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue); |
37887783 | 2241 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); |
7f0d2672 | 2242 | ret = device_add_disk(NULL, zram->disk, zram_disk_groups); |
5e2e1cc4 LC |
2243 | if (ret) |
2244 | goto out_cleanup_disk; | |
98af4d4d | 2245 | |
001d9273 | 2246 | comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); |
d12b63c9 | 2247 | |
c0265342 | 2248 | zram_debugfs_register(zram); |
d12b63c9 | 2249 | pr_info("Added device: %s\n", zram->disk->disk_name); |
92ff1528 | 2250 | return device_id; |
de1a21a0 | 2251 | |
5e2e1cc4 | 2252 | out_cleanup_disk: |
8b9ab626 | 2253 | put_disk(zram->disk); |
85508ec6 SS |
2254 | out_free_idr: |
2255 | idr_remove(&zram_index_idr, device_id); | |
2256 | out_free_dev: | |
2257 | kfree(zram); | |
de1a21a0 | 2258 | return ret; |
306b0c95 NG |
2259 | } |
2260 | ||
6566d1a3 | 2261 | static int zram_remove(struct zram *zram) |
306b0c95 | 2262 | { |
8c54499a | 2263 | bool claimed; |
6566d1a3 | 2264 | |
7a86d6dc | 2265 | mutex_lock(&zram->disk->open_mutex); |
dbdc1be3 | 2266 | if (disk_openers(zram->disk)) { |
7a86d6dc | 2267 | mutex_unlock(&zram->disk->open_mutex); |
6566d1a3 SS |
2268 | return -EBUSY; |
2269 | } | |
2270 | ||
8c54499a ML |
2271 | claimed = zram->claim; |
2272 | if (!claimed) | |
2273 | zram->claim = true; | |
7a86d6dc | 2274 | mutex_unlock(&zram->disk->open_mutex); |
6566d1a3 | 2275 | |
c0265342 | 2276 | zram_debugfs_unregister(zram); |
306b0c95 | 2277 | |
8c54499a ML |
2278 | if (claimed) { |
2279 | /* | |
2280 | * If we were claimed by reset_store(), del_gendisk() will | |
2281 | * wait until reset_store() is done, so nothing need to do. | |
2282 | */ | |
2283 | ; | |
2284 | } else { | |
2285 | /* Make sure all the pending I/O are finished */ | |
7a86d6dc | 2286 | sync_blockdev(zram->disk->part0); |
8c54499a ML |
2287 | zram_reset_device(zram); |
2288 | } | |
6566d1a3 SS |
2289 | |
2290 | pr_info("Removed device: %s\n", zram->disk->disk_name); | |
2291 | ||
85508ec6 | 2292 | del_gendisk(zram->disk); |
8c54499a ML |
2293 | |
2294 | /* del_gendisk drains pending reset_store */ | |
2295 | WARN_ON_ONCE(claimed && zram->claim); | |
2296 | ||
5a4b6536 ML |
2297 | /* |
2298 | * disksize_store() may be called in between zram_reset_device() | |
2299 | * and del_gendisk(), so run the last reset to avoid leaking | |
2300 | * anything allocated with disksize_store() | |
2301 | */ | |
2302 | zram_reset_device(zram); | |
2303 | ||
8b9ab626 | 2304 | put_disk(zram->disk); |
85508ec6 | 2305 | kfree(zram); |
6566d1a3 SS |
2306 | return 0; |
2307 | } | |
2308 | ||
2309 | /* zram-control sysfs attributes */ | |
27104a53 GKH |
2310 | |
2311 | /* | |
2312 | * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a | |
2313 | * sense that reading from this file does alter the state of your system -- it | |
2314 | * creates a new un-initialized zram device and returns back this device's | |
2315 | * device_id (or an error code if it fails to create a new device). | |
2316 | */ | |
75a2d422 GKH |
2317 | static ssize_t hot_add_show(const struct class *class, |
2318 | const struct class_attribute *attr, | |
6566d1a3 SS |
2319 | char *buf) |
2320 | { | |
2321 | int ret; | |
2322 | ||
2323 | mutex_lock(&zram_index_mutex); | |
2324 | ret = zram_add(); | |
2325 | mutex_unlock(&zram_index_mutex); | |
2326 | ||
2327 | if (ret < 0) | |
2328 | return ret; | |
2329 | return scnprintf(buf, PAGE_SIZE, "%d\n", ret); | |
2330 | } | |
ca9d081b GKH |
2331 | /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */ |
2332 | static struct class_attribute class_attr_hot_add = | |
2333 | __ATTR(hot_add, 0400, hot_add_show, NULL); | |
6566d1a3 | 2334 | |
75a2d422 GKH |
2335 | static ssize_t hot_remove_store(const struct class *class, |
2336 | const struct class_attribute *attr, | |
6566d1a3 SS |
2337 | const char *buf, |
2338 | size_t count) | |
2339 | { | |
2340 | struct zram *zram; | |
2341 | int ret, dev_id; | |
2342 | ||
2343 | /* dev_id is gendisk->first_minor, which is `int' */ | |
2344 | ret = kstrtoint(buf, 10, &dev_id); | |
2345 | if (ret) | |
2346 | return ret; | |
2347 | if (dev_id < 0) | |
2348 | return -EINVAL; | |
2349 | ||
2350 | mutex_lock(&zram_index_mutex); | |
2351 | ||
2352 | zram = idr_find(&zram_index_idr, dev_id); | |
17ec4cd9 | 2353 | if (zram) { |
6566d1a3 | 2354 | ret = zram_remove(zram); |
529e71e1 TI |
2355 | if (!ret) |
2356 | idr_remove(&zram_index_idr, dev_id); | |
17ec4cd9 | 2357 | } else { |
6566d1a3 | 2358 | ret = -ENODEV; |
17ec4cd9 | 2359 | } |
6566d1a3 SS |
2360 | |
2361 | mutex_unlock(&zram_index_mutex); | |
2362 | return ret ? ret : count; | |
85508ec6 | 2363 | } |
27104a53 | 2364 | static CLASS_ATTR_WO(hot_remove); |
a096cafc | 2365 | |
27104a53 GKH |
2366 | static struct attribute *zram_control_class_attrs[] = { |
2367 | &class_attr_hot_add.attr, | |
2368 | &class_attr_hot_remove.attr, | |
2369 | NULL, | |
6566d1a3 | 2370 | }; |
27104a53 | 2371 | ATTRIBUTE_GROUPS(zram_control_class); |
6566d1a3 SS |
2372 | |
2373 | static struct class zram_control_class = { | |
2374 | .name = "zram-control", | |
27104a53 | 2375 | .class_groups = zram_control_class_groups, |
6566d1a3 SS |
2376 | }; |
2377 | ||
85508ec6 SS |
2378 | static int zram_remove_cb(int id, void *ptr, void *data) |
2379 | { | |
8c54499a | 2380 | WARN_ON_ONCE(zram_remove(ptr)); |
85508ec6 SS |
2381 | return 0; |
2382 | } | |
a096cafc | 2383 | |
85508ec6 SS |
2384 | static void destroy_devices(void) |
2385 | { | |
6566d1a3 | 2386 | class_unregister(&zram_control_class); |
85508ec6 | 2387 | idr_for_each(&zram_index_idr, &zram_remove_cb, NULL); |
c0265342 | 2388 | zram_debugfs_destroy(); |
85508ec6 | 2389 | idr_destroy(&zram_index_idr); |
a096cafc | 2390 | unregister_blkdev(zram_major, "zram"); |
1dd6c834 | 2391 | cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE); |
306b0c95 NG |
2392 | } |
2393 | ||
f1e3cfff | 2394 | static int __init zram_init(void) |
306b0c95 | 2395 | { |
92ff1528 | 2396 | int ret; |
306b0c95 | 2397 | |
f635725c SS |
2398 | BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG); |
2399 | ||
1dd6c834 AMG |
2400 | ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare", |
2401 | zcomp_cpu_up_prepare, zcomp_cpu_dead); | |
2402 | if (ret < 0) | |
2403 | return ret; | |
2404 | ||
6566d1a3 SS |
2405 | ret = class_register(&zram_control_class); |
2406 | if (ret) { | |
70864969 | 2407 | pr_err("Unable to register zram-control class\n"); |
1dd6c834 | 2408 | cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE); |
6566d1a3 SS |
2409 | return ret; |
2410 | } | |
2411 | ||
c0265342 | 2412 | zram_debugfs_create(); |
f1e3cfff NG |
2413 | zram_major = register_blkdev(0, "zram"); |
2414 | if (zram_major <= 0) { | |
70864969 | 2415 | pr_err("Unable to get major number\n"); |
6566d1a3 | 2416 | class_unregister(&zram_control_class); |
1dd6c834 | 2417 | cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE); |
a096cafc | 2418 | return -EBUSY; |
306b0c95 NG |
2419 | } |
2420 | ||
92ff1528 | 2421 | while (num_devices != 0) { |
6566d1a3 | 2422 | mutex_lock(&zram_index_mutex); |
92ff1528 | 2423 | ret = zram_add(); |
6566d1a3 | 2424 | mutex_unlock(&zram_index_mutex); |
92ff1528 | 2425 | if (ret < 0) |
a096cafc | 2426 | goto out_error; |
92ff1528 | 2427 | num_devices--; |
de1a21a0 NG |
2428 | } |
2429 | ||
306b0c95 | 2430 | return 0; |
de1a21a0 | 2431 | |
a096cafc | 2432 | out_error: |
85508ec6 | 2433 | destroy_devices(); |
306b0c95 NG |
2434 | return ret; |
2435 | } | |
2436 | ||
f1e3cfff | 2437 | static void __exit zram_exit(void) |
306b0c95 | 2438 | { |
85508ec6 | 2439 | destroy_devices(); |
306b0c95 NG |
2440 | } |
2441 | ||
f1e3cfff NG |
2442 | module_init(zram_init); |
2443 | module_exit(zram_exit); | |
306b0c95 | 2444 | |
9b3bb7ab | 2445 | module_param(num_devices, uint, 0); |
c3cdb40e | 2446 | MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices"); |
9b3bb7ab | 2447 | |
306b0c95 NG |
2448 | MODULE_LICENSE("Dual BSD/GPL"); |
2449 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
f1e3cfff | 2450 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |