Commit | Line | Data |
---|---|---|
306b0c95 | 1 | /* |
f1e3cfff | 2 | * Compressed RAM block device |
306b0c95 | 3 | * |
1130ebba | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
306b0c95 NG |
5 | * |
6 | * This code is released using a dual license strategy: BSD/GPL | |
7 | * You can choose the licence that better fits your requirements. | |
8 | * | |
9 | * Released under the terms of 3-clause BSD License | |
10 | * Released under the terms of GNU General Public License Version 2.0 | |
11 | * | |
12 | * Project home: http://compcache.googlecode.com | |
13 | */ | |
14 | ||
f1e3cfff | 15 | #define KMSG_COMPONENT "zram" |
306b0c95 NG |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <linux/kernel.h> | |
8946a086 | 20 | #include <linux/bio.h> |
306b0c95 NG |
21 | #include <linux/bitops.h> |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/buffer_head.h> | |
24 | #include <linux/device.h> | |
25 | #include <linux/genhd.h> | |
26 | #include <linux/highmem.h> | |
5a0e3ad6 | 27 | #include <linux/slab.h> |
306b0c95 | 28 | #include <linux/lzo.h> |
306b0c95 | 29 | #include <linux/string.h> |
306b0c95 | 30 | #include <linux/vmalloc.h> |
306b0c95 | 31 | |
16a4bfb9 | 32 | #include "zram_drv.h" |
306b0c95 NG |
33 | |
34 | /* Globals */ | |
f1e3cfff | 35 | static int zram_major; |
33863c21 | 36 | struct zram *devices; |
306b0c95 | 37 | |
306b0c95 | 38 | /* Module params (documentation at end) */ |
33863c21 NG |
39 | unsigned int num_devices; |
40 | ||
41 | static void zram_stat_inc(u32 *v) | |
42 | { | |
43 | *v = *v + 1; | |
44 | } | |
45 | ||
46 | static void zram_stat_dec(u32 *v) | |
47 | { | |
48 | *v = *v - 1; | |
49 | } | |
50 | ||
51 | static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc) | |
52 | { | |
53 | spin_lock(&zram->stat64_lock); | |
54 | *v = *v + inc; | |
55 | spin_unlock(&zram->stat64_lock); | |
56 | } | |
57 | ||
58 | static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec) | |
59 | { | |
60 | spin_lock(&zram->stat64_lock); | |
61 | *v = *v - dec; | |
62 | spin_unlock(&zram->stat64_lock); | |
63 | } | |
64 | ||
65 | static void zram_stat64_inc(struct zram *zram, u64 *v) | |
66 | { | |
67 | zram_stat64_add(zram, v, 1); | |
68 | } | |
306b0c95 | 69 | |
f1e3cfff NG |
70 | static int zram_test_flag(struct zram *zram, u32 index, |
71 | enum zram_pageflags flag) | |
306b0c95 | 72 | { |
f1e3cfff | 73 | return zram->table[index].flags & BIT(flag); |
306b0c95 NG |
74 | } |
75 | ||
f1e3cfff NG |
76 | static void zram_set_flag(struct zram *zram, u32 index, |
77 | enum zram_pageflags flag) | |
306b0c95 | 78 | { |
f1e3cfff | 79 | zram->table[index].flags |= BIT(flag); |
306b0c95 NG |
80 | } |
81 | ||
f1e3cfff NG |
82 | static void zram_clear_flag(struct zram *zram, u32 index, |
83 | enum zram_pageflags flag) | |
306b0c95 | 84 | { |
f1e3cfff | 85 | zram->table[index].flags &= ~BIT(flag); |
306b0c95 NG |
86 | } |
87 | ||
88 | static int page_zero_filled(void *ptr) | |
89 | { | |
90 | unsigned int pos; | |
91 | unsigned long *page; | |
92 | ||
93 | page = (unsigned long *)ptr; | |
94 | ||
95 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | |
96 | if (page[pos]) | |
97 | return 0; | |
98 | } | |
99 | ||
100 | return 1; | |
101 | } | |
102 | ||
f1e3cfff | 103 | static void zram_set_disksize(struct zram *zram, size_t totalram_bytes) |
306b0c95 | 104 | { |
f1e3cfff | 105 | if (!zram->disksize) { |
306b0c95 NG |
106 | pr_info( |
107 | "disk size not provided. You can use disksize_kb module " | |
108 | "param to specify size.\nUsing default: (%u%% of RAM).\n", | |
109 | default_disksize_perc_ram | |
110 | ); | |
f1e3cfff | 111 | zram->disksize = default_disksize_perc_ram * |
306b0c95 NG |
112 | (totalram_bytes / 100); |
113 | } | |
114 | ||
f1e3cfff | 115 | if (zram->disksize > 2 * (totalram_bytes)) { |
306b0c95 | 116 | pr_info( |
f1e3cfff | 117 | "There is little point creating a zram of greater than " |
306b0c95 | 118 | "twice the size of memory since we expect a 2:1 compression " |
f1e3cfff NG |
119 | "ratio. Note that zram uses about 0.1%% of the size of " |
120 | "the disk when not in use so a huge zram is " | |
306b0c95 NG |
121 | "wasteful.\n" |
122 | "\tMemory Size: %zu kB\n" | |
33863c21 | 123 | "\tSize you selected: %llu kB\n" |
306b0c95 | 124 | "Continuing anyway ...\n", |
f1e3cfff | 125 | totalram_bytes >> 10, zram->disksize |
306b0c95 NG |
126 | ); |
127 | } | |
128 | ||
f1e3cfff | 129 | zram->disksize &= PAGE_MASK; |
306b0c95 NG |
130 | } |
131 | ||
f1e3cfff | 132 | static void zram_free_page(struct zram *zram, size_t index) |
306b0c95 NG |
133 | { |
134 | u32 clen; | |
135 | void *obj; | |
136 | ||
f1e3cfff NG |
137 | struct page *page = zram->table[index].page; |
138 | u32 offset = zram->table[index].offset; | |
306b0c95 NG |
139 | |
140 | if (unlikely(!page)) { | |
2e882281 NG |
141 | /* |
142 | * No memory is allocated for zero filled pages. | |
143 | * Simply clear zero page flag. | |
144 | */ | |
f1e3cfff NG |
145 | if (zram_test_flag(zram, index, ZRAM_ZERO)) { |
146 | zram_clear_flag(zram, index, ZRAM_ZERO); | |
147 | zram_stat_dec(&zram->stats.pages_zero); | |
306b0c95 NG |
148 | } |
149 | return; | |
150 | } | |
151 | ||
f1e3cfff | 152 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { |
306b0c95 NG |
153 | clen = PAGE_SIZE; |
154 | __free_page(page); | |
f1e3cfff NG |
155 | zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED); |
156 | zram_stat_dec(&zram->stats.pages_expand); | |
306b0c95 NG |
157 | goto out; |
158 | } | |
159 | ||
160 | obj = kmap_atomic(page, KM_USER0) + offset; | |
161 | clen = xv_get_object_size(obj) - sizeof(struct zobj_header); | |
162 | kunmap_atomic(obj, KM_USER0); | |
163 | ||
f1e3cfff | 164 | xv_free(zram->mem_pool, page, offset); |
306b0c95 | 165 | if (clen <= PAGE_SIZE / 2) |
f1e3cfff | 166 | zram_stat_dec(&zram->stats.good_compress); |
306b0c95 NG |
167 | |
168 | out: | |
33863c21 | 169 | zram_stat64_sub(zram, &zram->stats.compr_size, clen); |
f1e3cfff | 170 | zram_stat_dec(&zram->stats.pages_stored); |
306b0c95 | 171 | |
f1e3cfff NG |
172 | zram->table[index].page = NULL; |
173 | zram->table[index].offset = 0; | |
306b0c95 NG |
174 | } |
175 | ||
a1dd52af | 176 | static void handle_zero_page(struct page *page) |
306b0c95 NG |
177 | { |
178 | void *user_mem; | |
306b0c95 NG |
179 | |
180 | user_mem = kmap_atomic(page, KM_USER0); | |
181 | memset(user_mem, 0, PAGE_SIZE); | |
182 | kunmap_atomic(user_mem, KM_USER0); | |
183 | ||
30fb8a71 | 184 | flush_dcache_page(page); |
306b0c95 NG |
185 | } |
186 | ||
f1e3cfff | 187 | static void handle_uncompressed_page(struct zram *zram, |
a1dd52af | 188 | struct page *page, u32 index) |
306b0c95 | 189 | { |
306b0c95 NG |
190 | unsigned char *user_mem, *cmem; |
191 | ||
306b0c95 | 192 | user_mem = kmap_atomic(page, KM_USER0); |
f1e3cfff NG |
193 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + |
194 | zram->table[index].offset; | |
306b0c95 NG |
195 | |
196 | memcpy(user_mem, cmem, PAGE_SIZE); | |
197 | kunmap_atomic(user_mem, KM_USER0); | |
198 | kunmap_atomic(cmem, KM_USER1); | |
199 | ||
30fb8a71 | 200 | flush_dcache_page(page); |
306b0c95 NG |
201 | } |
202 | ||
f1e3cfff | 203 | static int zram_read(struct zram *zram, struct bio *bio) |
306b0c95 | 204 | { |
a1dd52af NG |
205 | |
206 | int i; | |
306b0c95 | 207 | u32 index; |
a1dd52af | 208 | struct bio_vec *bvec; |
306b0c95 | 209 | |
484875ad NG |
210 | if (unlikely(!zram->init_done)) { |
211 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
212 | bio_endio(bio, 0); | |
213 | return 0; | |
214 | } | |
306b0c95 | 215 | |
484875ad | 216 | zram_stat64_inc(zram, &zram->stats.num_reads); |
306b0c95 | 217 | index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; |
484875ad | 218 | |
a1dd52af NG |
219 | bio_for_each_segment(bvec, bio, i) { |
220 | int ret; | |
221 | size_t clen; | |
222 | struct page *page; | |
223 | struct zobj_header *zheader; | |
224 | unsigned char *user_mem, *cmem; | |
306b0c95 | 225 | |
a1dd52af | 226 | page = bvec->bv_page; |
306b0c95 | 227 | |
f1e3cfff | 228 | if (zram_test_flag(zram, index, ZRAM_ZERO)) { |
a1dd52af NG |
229 | handle_zero_page(page); |
230 | continue; | |
231 | } | |
306b0c95 | 232 | |
a1dd52af | 233 | /* Requested page is not present in compressed area */ |
f1e3cfff | 234 | if (unlikely(!zram->table[index].page)) { |
a1dd52af NG |
235 | pr_debug("Read before write: sector=%lu, size=%u", |
236 | (ulong)(bio->bi_sector), bio->bi_size); | |
237 | /* Do nothing */ | |
238 | continue; | |
239 | } | |
306b0c95 | 240 | |
a1dd52af | 241 | /* Page is stored uncompressed since it's incompressible */ |
f1e3cfff NG |
242 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { |
243 | handle_uncompressed_page(zram, page, index); | |
a1dd52af NG |
244 | continue; |
245 | } | |
306b0c95 | 246 | |
a1dd52af NG |
247 | user_mem = kmap_atomic(page, KM_USER0); |
248 | clen = PAGE_SIZE; | |
306b0c95 | 249 | |
f1e3cfff NG |
250 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + |
251 | zram->table[index].offset; | |
306b0c95 | 252 | |
a1dd52af NG |
253 | ret = lzo1x_decompress_safe( |
254 | cmem + sizeof(*zheader), | |
255 | xv_get_object_size(cmem) - sizeof(*zheader), | |
256 | user_mem, &clen); | |
306b0c95 | 257 | |
a1dd52af NG |
258 | kunmap_atomic(user_mem, KM_USER0); |
259 | kunmap_atomic(cmem, KM_USER1); | |
306b0c95 | 260 | |
a1dd52af NG |
261 | /* Should NEVER happen. Return bio error if it does. */ |
262 | if (unlikely(ret != LZO_E_OK)) { | |
263 | pr_err("Decompression failed! err=%d, page=%u\n", | |
264 | ret, index); | |
f1e3cfff | 265 | zram_stat64_inc(zram, &zram->stats.failed_reads); |
a1dd52af NG |
266 | goto out; |
267 | } | |
268 | ||
269 | flush_dcache_page(page); | |
270 | index++; | |
271 | } | |
306b0c95 NG |
272 | |
273 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
274 | bio_endio(bio, 0); | |
275 | return 0; | |
276 | ||
277 | out: | |
278 | bio_io_error(bio); | |
279 | return 0; | |
280 | } | |
281 | ||
f1e3cfff | 282 | static int zram_write(struct zram *zram, struct bio *bio) |
306b0c95 | 283 | { |
484875ad | 284 | int i, ret; |
a1dd52af NG |
285 | u32 index; |
286 | struct bio_vec *bvec; | |
306b0c95 | 287 | |
484875ad NG |
288 | if (unlikely(!zram->init_done)) { |
289 | ret = zram_init_device(zram); | |
290 | if (ret) | |
291 | goto out; | |
292 | } | |
306b0c95 | 293 | |
484875ad | 294 | zram_stat64_inc(zram, &zram->stats.num_writes); |
306b0c95 NG |
295 | index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; |
296 | ||
a1dd52af | 297 | bio_for_each_segment(bvec, bio, i) { |
a1dd52af NG |
298 | u32 offset; |
299 | size_t clen; | |
300 | struct zobj_header *zheader; | |
301 | struct page *page, *page_store; | |
302 | unsigned char *user_mem, *cmem, *src; | |
306b0c95 | 303 | |
a1dd52af | 304 | page = bvec->bv_page; |
f1e3cfff | 305 | src = zram->compress_buffer; |
306b0c95 | 306 | |
a1dd52af NG |
307 | /* |
308 | * System overwrites unused sectors. Free memory associated | |
309 | * with this sector now. | |
310 | */ | |
f1e3cfff NG |
311 | if (zram->table[index].page || |
312 | zram_test_flag(zram, index, ZRAM_ZERO)) | |
313 | zram_free_page(zram, index); | |
306b0c95 | 314 | |
f1e3cfff | 315 | mutex_lock(&zram->lock); |
306b0c95 | 316 | |
a1dd52af NG |
317 | user_mem = kmap_atomic(page, KM_USER0); |
318 | if (page_zero_filled(user_mem)) { | |
319 | kunmap_atomic(user_mem, KM_USER0); | |
f1e3cfff NG |
320 | mutex_unlock(&zram->lock); |
321 | zram_stat_inc(&zram->stats.pages_zero); | |
322 | zram_set_flag(zram, index, ZRAM_ZERO); | |
a1dd52af NG |
323 | continue; |
324 | } | |
306b0c95 | 325 | |
a1dd52af | 326 | ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen, |
f1e3cfff | 327 | zram->compress_workmem); |
306b0c95 | 328 | |
a1dd52af | 329 | kunmap_atomic(user_mem, KM_USER0); |
306b0c95 | 330 | |
a1dd52af | 331 | if (unlikely(ret != LZO_E_OK)) { |
f1e3cfff | 332 | mutex_unlock(&zram->lock); |
a1dd52af | 333 | pr_err("Compression failed! err=%d\n", ret); |
f1e3cfff | 334 | zram_stat64_inc(zram, &zram->stats.failed_writes); |
306b0c95 NG |
335 | goto out; |
336 | } | |
337 | ||
a1dd52af NG |
338 | /* |
339 | * Page is incompressible. Store it as-is (uncompressed) | |
f1e3cfff | 340 | * since we do not want to return too many disk write |
a1dd52af NG |
341 | * errors which has side effect of hanging the system. |
342 | */ | |
343 | if (unlikely(clen > max_zpage_size)) { | |
344 | clen = PAGE_SIZE; | |
345 | page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM); | |
346 | if (unlikely(!page_store)) { | |
f1e3cfff | 347 | mutex_unlock(&zram->lock); |
a1dd52af NG |
348 | pr_info("Error allocating memory for " |
349 | "incompressible page: %u\n", index); | |
f1e3cfff NG |
350 | zram_stat64_inc(zram, |
351 | &zram->stats.failed_writes); | |
a1dd52af NG |
352 | goto out; |
353 | } | |
354 | ||
355 | offset = 0; | |
f1e3cfff NG |
356 | zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); |
357 | zram_stat_inc(&zram->stats.pages_expand); | |
358 | zram->table[index].page = page_store; | |
a1dd52af NG |
359 | src = kmap_atomic(page, KM_USER0); |
360 | goto memstore; | |
361 | } | |
306b0c95 | 362 | |
f1e3cfff NG |
363 | if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), |
364 | &zram->table[index].page, &offset, | |
a1dd52af | 365 | GFP_NOIO | __GFP_HIGHMEM)) { |
f1e3cfff | 366 | mutex_unlock(&zram->lock); |
a1dd52af NG |
367 | pr_info("Error allocating memory for compressed " |
368 | "page: %u, size=%zu\n", index, clen); | |
f1e3cfff | 369 | zram_stat64_inc(zram, &zram->stats.failed_writes); |
a1dd52af NG |
370 | goto out; |
371 | } | |
306b0c95 NG |
372 | |
373 | memstore: | |
f1e3cfff | 374 | zram->table[index].offset = offset; |
306b0c95 | 375 | |
f1e3cfff NG |
376 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + |
377 | zram->table[index].offset; | |
306b0c95 NG |
378 | |
379 | #if 0 | |
a1dd52af | 380 | /* Back-reference needed for memory defragmentation */ |
f1e3cfff | 381 | if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { |
a1dd52af NG |
382 | zheader = (struct zobj_header *)cmem; |
383 | zheader->table_idx = index; | |
384 | cmem += sizeof(*zheader); | |
385 | } | |
306b0c95 NG |
386 | #endif |
387 | ||
a1dd52af | 388 | memcpy(cmem, src, clen); |
306b0c95 | 389 | |
a1dd52af | 390 | kunmap_atomic(cmem, KM_USER1); |
f1e3cfff | 391 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) |
a1dd52af | 392 | kunmap_atomic(src, KM_USER0); |
306b0c95 | 393 | |
a1dd52af | 394 | /* Update stats */ |
33863c21 | 395 | zram_stat64_add(zram, &zram->stats.compr_size, clen); |
f1e3cfff | 396 | zram_stat_inc(&zram->stats.pages_stored); |
a1dd52af | 397 | if (clen <= PAGE_SIZE / 2) |
f1e3cfff | 398 | zram_stat_inc(&zram->stats.good_compress); |
306b0c95 | 399 | |
f1e3cfff | 400 | mutex_unlock(&zram->lock); |
a1dd52af NG |
401 | index++; |
402 | } | |
306b0c95 NG |
403 | |
404 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
405 | bio_endio(bio, 0); | |
406 | return 0; | |
407 | ||
408 | out: | |
306b0c95 NG |
409 | bio_io_error(bio); |
410 | return 0; | |
411 | } | |
412 | ||
306b0c95 NG |
413 | /* |
414 | * Check if request is within bounds and page aligned. | |
415 | */ | |
f1e3cfff | 416 | static inline int valid_io_request(struct zram *zram, struct bio *bio) |
306b0c95 NG |
417 | { |
418 | if (unlikely( | |
f1e3cfff | 419 | (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) || |
306b0c95 | 420 | (bio->bi_sector & (SECTORS_PER_PAGE - 1)) || |
a1dd52af | 421 | (bio->bi_size & (PAGE_SIZE - 1)))) { |
306b0c95 NG |
422 | |
423 | return 0; | |
424 | } | |
425 | ||
a1dd52af | 426 | /* I/O request is valid */ |
306b0c95 NG |
427 | return 1; |
428 | } | |
429 | ||
430 | /* | |
f1e3cfff | 431 | * Handler function for all zram I/O requests. |
306b0c95 | 432 | */ |
f1e3cfff | 433 | static int zram_make_request(struct request_queue *queue, struct bio *bio) |
306b0c95 NG |
434 | { |
435 | int ret = 0; | |
f1e3cfff | 436 | struct zram *zram = queue->queuedata; |
306b0c95 | 437 | |
f1e3cfff | 438 | if (unlikely(!zram->init_done)) { |
7e24cce3 AB |
439 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
440 | bio_endio(bio, 0); | |
306b0c95 NG |
441 | return 0; |
442 | } | |
443 | ||
f1e3cfff NG |
444 | if (!valid_io_request(zram, bio)) { |
445 | zram_stat64_inc(zram, &zram->stats.invalid_io); | |
306b0c95 NG |
446 | bio_io_error(bio); |
447 | return 0; | |
448 | } | |
449 | ||
450 | switch (bio_data_dir(bio)) { | |
451 | case READ: | |
f1e3cfff | 452 | ret = zram_read(zram, bio); |
306b0c95 NG |
453 | break; |
454 | ||
455 | case WRITE: | |
f1e3cfff | 456 | ret = zram_write(zram, bio); |
306b0c95 NG |
457 | break; |
458 | } | |
459 | ||
460 | return ret; | |
461 | } | |
462 | ||
33863c21 | 463 | void zram_reset_device(struct zram *zram) |
306b0c95 | 464 | { |
97a06382 | 465 | size_t index; |
306b0c95 | 466 | |
484875ad | 467 | mutex_lock(&zram->init_lock); |
f1e3cfff | 468 | zram->init_done = 0; |
7eef7533 | 469 | |
306b0c95 | 470 | /* Free various per-device buffers */ |
f1e3cfff NG |
471 | kfree(zram->compress_workmem); |
472 | free_pages((unsigned long)zram->compress_buffer, 1); | |
306b0c95 | 473 | |
f1e3cfff NG |
474 | zram->compress_workmem = NULL; |
475 | zram->compress_buffer = NULL; | |
306b0c95 | 476 | |
f1e3cfff NG |
477 | /* Free all pages that are still in this zram device */ |
478 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { | |
306b0c95 NG |
479 | struct page *page; |
480 | u16 offset; | |
481 | ||
f1e3cfff NG |
482 | page = zram->table[index].page; |
483 | offset = zram->table[index].offset; | |
306b0c95 NG |
484 | |
485 | if (!page) | |
486 | continue; | |
487 | ||
f1e3cfff | 488 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) |
306b0c95 NG |
489 | __free_page(page); |
490 | else | |
f1e3cfff | 491 | xv_free(zram->mem_pool, page, offset); |
306b0c95 NG |
492 | } |
493 | ||
f1e3cfff NG |
494 | vfree(zram->table); |
495 | zram->table = NULL; | |
306b0c95 | 496 | |
f1e3cfff NG |
497 | xv_destroy_pool(zram->mem_pool); |
498 | zram->mem_pool = NULL; | |
306b0c95 | 499 | |
306b0c95 | 500 | /* Reset stats */ |
f1e3cfff | 501 | memset(&zram->stats, 0, sizeof(zram->stats)); |
306b0c95 | 502 | |
f1e3cfff | 503 | zram->disksize = 0; |
484875ad | 504 | mutex_unlock(&zram->init_lock); |
306b0c95 NG |
505 | } |
506 | ||
33863c21 | 507 | int zram_init_device(struct zram *zram) |
306b0c95 NG |
508 | { |
509 | int ret; | |
510 | size_t num_pages; | |
306b0c95 | 511 | |
484875ad NG |
512 | mutex_lock(&zram->init_lock); |
513 | ||
f1e3cfff | 514 | if (zram->init_done) { |
484875ad NG |
515 | mutex_unlock(&zram->init_lock); |
516 | return 0; | |
306b0c95 NG |
517 | } |
518 | ||
f1e3cfff | 519 | zram_set_disksize(zram, totalram_pages << PAGE_SHIFT); |
306b0c95 | 520 | |
f1e3cfff NG |
521 | zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); |
522 | if (!zram->compress_workmem) { | |
306b0c95 NG |
523 | pr_err("Error allocating compressor working memory!\n"); |
524 | ret = -ENOMEM; | |
525 | goto fail; | |
526 | } | |
527 | ||
f1e3cfff NG |
528 | zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1); |
529 | if (!zram->compress_buffer) { | |
306b0c95 NG |
530 | pr_err("Error allocating compressor buffer space\n"); |
531 | ret = -ENOMEM; | |
532 | goto fail; | |
533 | } | |
534 | ||
f1e3cfff NG |
535 | num_pages = zram->disksize >> PAGE_SHIFT; |
536 | zram->table = vmalloc(num_pages * sizeof(*zram->table)); | |
537 | if (!zram->table) { | |
538 | pr_err("Error allocating zram address table\n"); | |
306b0c95 | 539 | /* To prevent accessing table entries during cleanup */ |
f1e3cfff | 540 | zram->disksize = 0; |
306b0c95 NG |
541 | ret = -ENOMEM; |
542 | goto fail; | |
543 | } | |
f1e3cfff | 544 | memset(zram->table, 0, num_pages * sizeof(*zram->table)); |
306b0c95 | 545 | |
f1e3cfff | 546 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
306b0c95 | 547 | |
f1e3cfff NG |
548 | /* zram devices sort of resembles non-rotational disks */ |
549 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | |
306b0c95 | 550 | |
f1e3cfff NG |
551 | zram->mem_pool = xv_create_pool(); |
552 | if (!zram->mem_pool) { | |
306b0c95 NG |
553 | pr_err("Error creating memory pool\n"); |
554 | ret = -ENOMEM; | |
555 | goto fail; | |
556 | } | |
557 | ||
f1e3cfff | 558 | zram->init_done = 1; |
484875ad | 559 | mutex_unlock(&zram->init_lock); |
306b0c95 NG |
560 | |
561 | pr_debug("Initialization done!\n"); | |
562 | return 0; | |
563 | ||
564 | fail: | |
484875ad | 565 | mutex_unlock(&zram->init_lock); |
33863c21 | 566 | zram_reset_device(zram); |
306b0c95 NG |
567 | |
568 | pr_err("Initialization failed: err=%d\n", ret); | |
569 | return ret; | |
570 | } | |
571 | ||
f1e3cfff | 572 | void zram_slot_free_notify(struct block_device *bdev, unsigned long index) |
107c161b | 573 | { |
f1e3cfff | 574 | struct zram *zram; |
107c161b | 575 | |
f1e3cfff NG |
576 | zram = bdev->bd_disk->private_data; |
577 | zram_free_page(zram, index); | |
578 | zram_stat64_inc(zram, &zram->stats.notify_free); | |
107c161b NG |
579 | } |
580 | ||
f1e3cfff | 581 | static const struct block_device_operations zram_devops = { |
f1e3cfff | 582 | .swap_slot_free_notify = zram_slot_free_notify, |
107c161b | 583 | .owner = THIS_MODULE |
306b0c95 NG |
584 | }; |
585 | ||
f1e3cfff | 586 | static int create_device(struct zram *zram, int device_id) |
306b0c95 | 587 | { |
de1a21a0 NG |
588 | int ret = 0; |
589 | ||
f1e3cfff | 590 | mutex_init(&zram->lock); |
484875ad | 591 | mutex_init(&zram->init_lock); |
f1e3cfff | 592 | spin_lock_init(&zram->stat64_lock); |
306b0c95 | 593 | |
f1e3cfff NG |
594 | zram->queue = blk_alloc_queue(GFP_KERNEL); |
595 | if (!zram->queue) { | |
306b0c95 NG |
596 | pr_err("Error allocating disk queue for device %d\n", |
597 | device_id); | |
de1a21a0 NG |
598 | ret = -ENOMEM; |
599 | goto out; | |
306b0c95 NG |
600 | } |
601 | ||
f1e3cfff NG |
602 | blk_queue_make_request(zram->queue, zram_make_request); |
603 | zram->queue->queuedata = zram; | |
306b0c95 NG |
604 | |
605 | /* gendisk structure */ | |
f1e3cfff NG |
606 | zram->disk = alloc_disk(1); |
607 | if (!zram->disk) { | |
608 | blk_cleanup_queue(zram->queue); | |
306b0c95 NG |
609 | pr_warning("Error allocating disk structure for device %d\n", |
610 | device_id); | |
de1a21a0 NG |
611 | ret = -ENOMEM; |
612 | goto out; | |
306b0c95 NG |
613 | } |
614 | ||
f1e3cfff NG |
615 | zram->disk->major = zram_major; |
616 | zram->disk->first_minor = device_id; | |
617 | zram->disk->fops = &zram_devops; | |
618 | zram->disk->queue = zram->queue; | |
619 | zram->disk->private_data = zram; | |
620 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
306b0c95 | 621 | |
33863c21 | 622 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
f1e3cfff | 623 | set_capacity(zram->disk, 0); |
5d83d5a0 | 624 | |
a1dd52af NG |
625 | /* |
626 | * To ensure that we always get PAGE_SIZE aligned | |
627 | * and n*PAGE_SIZED sized I/O requests. | |
628 | */ | |
f1e3cfff NG |
629 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
630 | blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE); | |
631 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); | |
632 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | |
5d83d5a0 | 633 | |
f1e3cfff | 634 | add_disk(zram->disk); |
306b0c95 | 635 | |
33863c21 NG |
636 | #ifdef CONFIG_SYSFS |
637 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, | |
638 | &zram_disk_attr_group); | |
639 | if (ret < 0) { | |
640 | pr_warning("Error creating sysfs group"); | |
641 | goto out; | |
642 | } | |
643 | #endif | |
644 | ||
f1e3cfff | 645 | zram->init_done = 0; |
de1a21a0 NG |
646 | |
647 | out: | |
648 | return ret; | |
306b0c95 NG |
649 | } |
650 | ||
f1e3cfff | 651 | static void destroy_device(struct zram *zram) |
306b0c95 | 652 | { |
33863c21 NG |
653 | #ifdef CONFIG_SYSFS |
654 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, | |
655 | &zram_disk_attr_group); | |
656 | #endif | |
657 | ||
f1e3cfff NG |
658 | if (zram->disk) { |
659 | del_gendisk(zram->disk); | |
660 | put_disk(zram->disk); | |
306b0c95 NG |
661 | } |
662 | ||
f1e3cfff NG |
663 | if (zram->queue) |
664 | blk_cleanup_queue(zram->queue); | |
306b0c95 NG |
665 | } |
666 | ||
f1e3cfff | 667 | static int __init zram_init(void) |
306b0c95 | 668 | { |
de1a21a0 | 669 | int ret, dev_id; |
306b0c95 NG |
670 | |
671 | if (num_devices > max_num_devices) { | |
672 | pr_warning("Invalid value for num_devices: %u\n", | |
673 | num_devices); | |
de1a21a0 NG |
674 | ret = -EINVAL; |
675 | goto out; | |
306b0c95 NG |
676 | } |
677 | ||
f1e3cfff NG |
678 | zram_major = register_blkdev(0, "zram"); |
679 | if (zram_major <= 0) { | |
306b0c95 | 680 | pr_warning("Unable to get major number\n"); |
de1a21a0 NG |
681 | ret = -EBUSY; |
682 | goto out; | |
306b0c95 NG |
683 | } |
684 | ||
685 | if (!num_devices) { | |
686 | pr_info("num_devices not specified. Using default: 1\n"); | |
687 | num_devices = 1; | |
688 | } | |
689 | ||
690 | /* Allocate the device array and initialize each one */ | |
691 | pr_info("Creating %u devices ...\n", num_devices); | |
f1e3cfff | 692 | devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
de1a21a0 NG |
693 | if (!devices) { |
694 | ret = -ENOMEM; | |
695 | goto unregister; | |
696 | } | |
306b0c95 | 697 | |
de1a21a0 NG |
698 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
699 | ret = create_device(&devices[dev_id], dev_id); | |
700 | if (ret) | |
3bf040c7 | 701 | goto free_devices; |
de1a21a0 NG |
702 | } |
703 | ||
306b0c95 | 704 | return 0; |
de1a21a0 | 705 | |
3bf040c7 | 706 | free_devices: |
de1a21a0 NG |
707 | while (dev_id) |
708 | destroy_device(&devices[--dev_id]); | |
273ad8dc | 709 | kfree(devices); |
de1a21a0 | 710 | unregister: |
f1e3cfff | 711 | unregister_blkdev(zram_major, "zram"); |
de1a21a0 | 712 | out: |
306b0c95 NG |
713 | return ret; |
714 | } | |
715 | ||
f1e3cfff | 716 | static void __exit zram_exit(void) |
306b0c95 NG |
717 | { |
718 | int i; | |
f1e3cfff | 719 | struct zram *zram; |
306b0c95 NG |
720 | |
721 | for (i = 0; i < num_devices; i++) { | |
f1e3cfff | 722 | zram = &devices[i]; |
306b0c95 | 723 | |
f1e3cfff NG |
724 | destroy_device(zram); |
725 | if (zram->init_done) | |
33863c21 | 726 | zram_reset_device(zram); |
306b0c95 NG |
727 | } |
728 | ||
f1e3cfff | 729 | unregister_blkdev(zram_major, "zram"); |
306b0c95 NG |
730 | |
731 | kfree(devices); | |
732 | pr_debug("Cleanup done!\n"); | |
733 | } | |
734 | ||
735 | module_param(num_devices, uint, 0); | |
f1e3cfff | 736 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); |
306b0c95 | 737 | |
f1e3cfff NG |
738 | module_init(zram_init); |
739 | module_exit(zram_exit); | |
306b0c95 NG |
740 | |
741 | MODULE_LICENSE("Dual BSD/GPL"); | |
742 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
f1e3cfff | 743 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |