staging: vt6656: replace IS_ETH_ADDRESS_EQUAL with compare_ether_addr
[linux-2.6-block.git] / drivers / staging / zram / zram_drv.c
CommitLineData
306b0c95 1/*
f1e3cfff 2 * Compressed RAM block device
306b0c95 3 *
1130ebba 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
306b0c95
NG
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
f1e3cfff 15#define KMSG_COMPONENT "zram"
306b0c95
NG
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/bitops.h>
21#include <linux/blkdev.h>
22#include <linux/buffer_head.h>
23#include <linux/device.h>
24#include <linux/genhd.h>
25#include <linux/highmem.h>
5a0e3ad6 26#include <linux/slab.h>
306b0c95 27#include <linux/lzo.h>
306b0c95 28#include <linux/string.h>
306b0c95 29#include <linux/vmalloc.h>
306b0c95 30
16a4bfb9 31#include "zram_drv.h"
306b0c95
NG
32
33/* Globals */
f1e3cfff
NG
34static int zram_major;
35static struct zram *devices;
306b0c95 36
306b0c95
NG
37/* Module params (documentation at end) */
38static unsigned int num_devices;
39
f1e3cfff
NG
40static int zram_test_flag(struct zram *zram, u32 index,
41 enum zram_pageflags flag)
306b0c95 42{
f1e3cfff 43 return zram->table[index].flags & BIT(flag);
306b0c95
NG
44}
45
f1e3cfff
NG
46static void zram_set_flag(struct zram *zram, u32 index,
47 enum zram_pageflags flag)
306b0c95 48{
f1e3cfff 49 zram->table[index].flags |= BIT(flag);
306b0c95
NG
50}
51
f1e3cfff
NG
52static void zram_clear_flag(struct zram *zram, u32 index,
53 enum zram_pageflags flag)
306b0c95 54{
f1e3cfff 55 zram->table[index].flags &= ~BIT(flag);
306b0c95
NG
56}
57
58static int page_zero_filled(void *ptr)
59{
60 unsigned int pos;
61 unsigned long *page;
62
63 page = (unsigned long *)ptr;
64
65 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
66 if (page[pos])
67 return 0;
68 }
69
70 return 1;
71}
72
f1e3cfff 73static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
306b0c95 74{
f1e3cfff 75 if (!zram->disksize) {
306b0c95
NG
76 pr_info(
77 "disk size not provided. You can use disksize_kb module "
78 "param to specify size.\nUsing default: (%u%% of RAM).\n",
79 default_disksize_perc_ram
80 );
f1e3cfff 81 zram->disksize = default_disksize_perc_ram *
306b0c95
NG
82 (totalram_bytes / 100);
83 }
84
f1e3cfff 85 if (zram->disksize > 2 * (totalram_bytes)) {
306b0c95 86 pr_info(
f1e3cfff 87 "There is little point creating a zram of greater than "
306b0c95 88 "twice the size of memory since we expect a 2:1 compression "
f1e3cfff
NG
89 "ratio. Note that zram uses about 0.1%% of the size of "
90 "the disk when not in use so a huge zram is "
306b0c95
NG
91 "wasteful.\n"
92 "\tMemory Size: %zu kB\n"
93 "\tSize you selected: %zu kB\n"
94 "Continuing anyway ...\n",
f1e3cfff 95 totalram_bytes >> 10, zram->disksize
306b0c95
NG
96 );
97 }
98
f1e3cfff 99 zram->disksize &= PAGE_MASK;
306b0c95
NG
100}
101
f1e3cfff
NG
102static void zram_ioctl_get_stats(struct zram *zram,
103 struct zram_ioctl_stats *s)
306b0c95 104{
f1e3cfff 105 s->disksize = zram->disksize;
306b0c95 106
f1e3cfff 107#if defined(CONFIG_ZRAM_STATS)
306b0c95 108 {
f1e3cfff 109 struct zram_stats *rs = &zram->stats;
306b0c95
NG
110 size_t succ_writes, mem_used;
111 unsigned int good_compress_perc = 0, no_compress_perc = 0;
112
f1e3cfff 113 mem_used = xv_get_total_size_bytes(zram->mem_pool)
306b0c95 114 + (rs->pages_expand << PAGE_SHIFT);
f1e3cfff
NG
115 succ_writes = zram_stat64_read(zram, &rs->num_writes) -
116 zram_stat64_read(zram, &rs->failed_writes);
306b0c95
NG
117
118 if (succ_writes && rs->pages_stored) {
119 good_compress_perc = rs->good_compress * 100
120 / rs->pages_stored;
121 no_compress_perc = rs->pages_expand * 100
122 / rs->pages_stored;
123 }
124
f1e3cfff
NG
125 s->num_reads = zram_stat64_read(zram, &rs->num_reads);
126 s->num_writes = zram_stat64_read(zram, &rs->num_writes);
127 s->failed_reads = zram_stat64_read(zram, &rs->failed_reads);
128 s->failed_writes = zram_stat64_read(zram, &rs->failed_writes);
129 s->invalid_io = zram_stat64_read(zram, &rs->invalid_io);
130 s->notify_free = zram_stat64_read(zram, &rs->notify_free);
306b0c95
NG
131 s->pages_zero = rs->pages_zero;
132
133 s->good_compress_pct = good_compress_perc;
134 s->pages_expand_pct = no_compress_perc;
135
136 s->pages_stored = rs->pages_stored;
137 s->pages_used = mem_used >> PAGE_SHIFT;
138 s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
139 s->compr_data_size = rs->compr_size;
140 s->mem_used_total = mem_used;
306b0c95 141 }
f1e3cfff 142#endif /* CONFIG_ZRAM_STATS */
306b0c95
NG
143}
144
f1e3cfff 145static void zram_free_page(struct zram *zram, size_t index)
306b0c95
NG
146{
147 u32 clen;
148 void *obj;
149
f1e3cfff
NG
150 struct page *page = zram->table[index].page;
151 u32 offset = zram->table[index].offset;
306b0c95
NG
152
153 if (unlikely(!page)) {
2e882281
NG
154 /*
155 * No memory is allocated for zero filled pages.
156 * Simply clear zero page flag.
157 */
f1e3cfff
NG
158 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
159 zram_clear_flag(zram, index, ZRAM_ZERO);
160 zram_stat_dec(&zram->stats.pages_zero);
306b0c95
NG
161 }
162 return;
163 }
164
f1e3cfff 165 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
306b0c95
NG
166 clen = PAGE_SIZE;
167 __free_page(page);
f1e3cfff
NG
168 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
169 zram_stat_dec(&zram->stats.pages_expand);
306b0c95
NG
170 goto out;
171 }
172
173 obj = kmap_atomic(page, KM_USER0) + offset;
174 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
175 kunmap_atomic(obj, KM_USER0);
176
f1e3cfff 177 xv_free(zram->mem_pool, page, offset);
306b0c95 178 if (clen <= PAGE_SIZE / 2)
f1e3cfff 179 zram_stat_dec(&zram->stats.good_compress);
306b0c95
NG
180
181out:
f1e3cfff
NG
182 zram->stats.compr_size -= clen;
183 zram_stat_dec(&zram->stats.pages_stored);
306b0c95 184
f1e3cfff
NG
185 zram->table[index].page = NULL;
186 zram->table[index].offset = 0;
306b0c95
NG
187}
188
a1dd52af 189static void handle_zero_page(struct page *page)
306b0c95
NG
190{
191 void *user_mem;
306b0c95
NG
192
193 user_mem = kmap_atomic(page, KM_USER0);
194 memset(user_mem, 0, PAGE_SIZE);
195 kunmap_atomic(user_mem, KM_USER0);
196
30fb8a71 197 flush_dcache_page(page);
306b0c95
NG
198}
199
f1e3cfff 200static void handle_uncompressed_page(struct zram *zram,
a1dd52af 201 struct page *page, u32 index)
306b0c95 202{
306b0c95
NG
203 unsigned char *user_mem, *cmem;
204
306b0c95 205 user_mem = kmap_atomic(page, KM_USER0);
f1e3cfff
NG
206 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
207 zram->table[index].offset;
306b0c95
NG
208
209 memcpy(user_mem, cmem, PAGE_SIZE);
210 kunmap_atomic(user_mem, KM_USER0);
211 kunmap_atomic(cmem, KM_USER1);
212
30fb8a71 213 flush_dcache_page(page);
306b0c95
NG
214}
215
f1e3cfff 216static int zram_read(struct zram *zram, struct bio *bio)
306b0c95 217{
a1dd52af
NG
218
219 int i;
306b0c95 220 u32 index;
a1dd52af 221 struct bio_vec *bvec;
306b0c95 222
f1e3cfff 223 zram_stat64_inc(zram, &zram->stats.num_reads);
306b0c95 224
306b0c95 225 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
a1dd52af
NG
226 bio_for_each_segment(bvec, bio, i) {
227 int ret;
228 size_t clen;
229 struct page *page;
230 struct zobj_header *zheader;
231 unsigned char *user_mem, *cmem;
306b0c95 232
a1dd52af 233 page = bvec->bv_page;
306b0c95 234
f1e3cfff 235 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
a1dd52af
NG
236 handle_zero_page(page);
237 continue;
238 }
306b0c95 239
a1dd52af 240 /* Requested page is not present in compressed area */
f1e3cfff 241 if (unlikely(!zram->table[index].page)) {
a1dd52af
NG
242 pr_debug("Read before write: sector=%lu, size=%u",
243 (ulong)(bio->bi_sector), bio->bi_size);
244 /* Do nothing */
245 continue;
246 }
306b0c95 247
a1dd52af 248 /* Page is stored uncompressed since it's incompressible */
f1e3cfff
NG
249 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
250 handle_uncompressed_page(zram, page, index);
a1dd52af
NG
251 continue;
252 }
306b0c95 253
a1dd52af
NG
254 user_mem = kmap_atomic(page, KM_USER0);
255 clen = PAGE_SIZE;
306b0c95 256
f1e3cfff
NG
257 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
258 zram->table[index].offset;
306b0c95 259
a1dd52af
NG
260 ret = lzo1x_decompress_safe(
261 cmem + sizeof(*zheader),
262 xv_get_object_size(cmem) - sizeof(*zheader),
263 user_mem, &clen);
306b0c95 264
a1dd52af
NG
265 kunmap_atomic(user_mem, KM_USER0);
266 kunmap_atomic(cmem, KM_USER1);
306b0c95 267
a1dd52af
NG
268 /* Should NEVER happen. Return bio error if it does. */
269 if (unlikely(ret != LZO_E_OK)) {
270 pr_err("Decompression failed! err=%d, page=%u\n",
271 ret, index);
f1e3cfff 272 zram_stat64_inc(zram, &zram->stats.failed_reads);
a1dd52af
NG
273 goto out;
274 }
275
276 flush_dcache_page(page);
277 index++;
278 }
306b0c95
NG
279
280 set_bit(BIO_UPTODATE, &bio->bi_flags);
281 bio_endio(bio, 0);
282 return 0;
283
284out:
285 bio_io_error(bio);
286 return 0;
287}
288
f1e3cfff 289static int zram_write(struct zram *zram, struct bio *bio)
306b0c95 290{
a1dd52af
NG
291 int i;
292 u32 index;
293 struct bio_vec *bvec;
306b0c95 294
f1e3cfff 295 zram_stat64_inc(zram, &zram->stats.num_writes);
306b0c95 296
306b0c95
NG
297 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
298
a1dd52af
NG
299 bio_for_each_segment(bvec, bio, i) {
300 int ret;
301 u32 offset;
302 size_t clen;
303 struct zobj_header *zheader;
304 struct page *page, *page_store;
305 unsigned char *user_mem, *cmem, *src;
306b0c95 306
a1dd52af 307 page = bvec->bv_page;
f1e3cfff 308 src = zram->compress_buffer;
306b0c95 309
a1dd52af
NG
310 /*
311 * System overwrites unused sectors. Free memory associated
312 * with this sector now.
313 */
f1e3cfff
NG
314 if (zram->table[index].page ||
315 zram_test_flag(zram, index, ZRAM_ZERO))
316 zram_free_page(zram, index);
306b0c95 317
f1e3cfff 318 mutex_lock(&zram->lock);
306b0c95 319
a1dd52af
NG
320 user_mem = kmap_atomic(page, KM_USER0);
321 if (page_zero_filled(user_mem)) {
322 kunmap_atomic(user_mem, KM_USER0);
f1e3cfff
NG
323 mutex_unlock(&zram->lock);
324 zram_stat_inc(&zram->stats.pages_zero);
325 zram_set_flag(zram, index, ZRAM_ZERO);
a1dd52af
NG
326 continue;
327 }
306b0c95 328
a1dd52af 329 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
f1e3cfff 330 zram->compress_workmem);
306b0c95 331
a1dd52af 332 kunmap_atomic(user_mem, KM_USER0);
306b0c95 333
a1dd52af 334 if (unlikely(ret != LZO_E_OK)) {
f1e3cfff 335 mutex_unlock(&zram->lock);
a1dd52af 336 pr_err("Compression failed! err=%d\n", ret);
f1e3cfff 337 zram_stat64_inc(zram, &zram->stats.failed_writes);
306b0c95
NG
338 goto out;
339 }
340
a1dd52af
NG
341 /*
342 * Page is incompressible. Store it as-is (uncompressed)
f1e3cfff 343 * since we do not want to return too many disk write
a1dd52af
NG
344 * errors which has side effect of hanging the system.
345 */
346 if (unlikely(clen > max_zpage_size)) {
347 clen = PAGE_SIZE;
348 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
349 if (unlikely(!page_store)) {
f1e3cfff 350 mutex_unlock(&zram->lock);
a1dd52af
NG
351 pr_info("Error allocating memory for "
352 "incompressible page: %u\n", index);
f1e3cfff
NG
353 zram_stat64_inc(zram,
354 &zram->stats.failed_writes);
a1dd52af
NG
355 goto out;
356 }
357
358 offset = 0;
f1e3cfff
NG
359 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
360 zram_stat_inc(&zram->stats.pages_expand);
361 zram->table[index].page = page_store;
a1dd52af
NG
362 src = kmap_atomic(page, KM_USER0);
363 goto memstore;
364 }
306b0c95 365
f1e3cfff
NG
366 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
367 &zram->table[index].page, &offset,
a1dd52af 368 GFP_NOIO | __GFP_HIGHMEM)) {
f1e3cfff 369 mutex_unlock(&zram->lock);
a1dd52af
NG
370 pr_info("Error allocating memory for compressed "
371 "page: %u, size=%zu\n", index, clen);
f1e3cfff 372 zram_stat64_inc(zram, &zram->stats.failed_writes);
a1dd52af
NG
373 goto out;
374 }
306b0c95
NG
375
376memstore:
f1e3cfff 377 zram->table[index].offset = offset;
306b0c95 378
f1e3cfff
NG
379 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
380 zram->table[index].offset;
306b0c95
NG
381
382#if 0
a1dd52af 383 /* Back-reference needed for memory defragmentation */
f1e3cfff 384 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
a1dd52af
NG
385 zheader = (struct zobj_header *)cmem;
386 zheader->table_idx = index;
387 cmem += sizeof(*zheader);
388 }
306b0c95
NG
389#endif
390
a1dd52af 391 memcpy(cmem, src, clen);
306b0c95 392
a1dd52af 393 kunmap_atomic(cmem, KM_USER1);
f1e3cfff 394 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
a1dd52af 395 kunmap_atomic(src, KM_USER0);
306b0c95 396
a1dd52af 397 /* Update stats */
f1e3cfff
NG
398 zram->stats.compr_size += clen;
399 zram_stat_inc(&zram->stats.pages_stored);
a1dd52af 400 if (clen <= PAGE_SIZE / 2)
f1e3cfff 401 zram_stat_inc(&zram->stats.good_compress);
306b0c95 402
f1e3cfff 403 mutex_unlock(&zram->lock);
a1dd52af
NG
404 index++;
405 }
306b0c95
NG
406
407 set_bit(BIO_UPTODATE, &bio->bi_flags);
408 bio_endio(bio, 0);
409 return 0;
410
411out:
306b0c95
NG
412 bio_io_error(bio);
413 return 0;
414}
415
306b0c95
NG
416/*
417 * Check if request is within bounds and page aligned.
418 */
f1e3cfff 419static inline int valid_io_request(struct zram *zram, struct bio *bio)
306b0c95
NG
420{
421 if (unlikely(
f1e3cfff 422 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
306b0c95 423 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
a1dd52af 424 (bio->bi_size & (PAGE_SIZE - 1)))) {
306b0c95
NG
425
426 return 0;
427 }
428
a1dd52af 429 /* I/O request is valid */
306b0c95
NG
430 return 1;
431}
432
433/*
f1e3cfff 434 * Handler function for all zram I/O requests.
306b0c95 435 */
f1e3cfff 436static int zram_make_request(struct request_queue *queue, struct bio *bio)
306b0c95
NG
437{
438 int ret = 0;
f1e3cfff 439 struct zram *zram = queue->queuedata;
306b0c95 440
f1e3cfff 441 if (unlikely(!zram->init_done)) {
306b0c95
NG
442 bio_io_error(bio);
443 return 0;
444 }
445
f1e3cfff
NG
446 if (!valid_io_request(zram, bio)) {
447 zram_stat64_inc(zram, &zram->stats.invalid_io);
306b0c95
NG
448 bio_io_error(bio);
449 return 0;
450 }
451
452 switch (bio_data_dir(bio)) {
453 case READ:
f1e3cfff 454 ret = zram_read(zram, bio);
306b0c95
NG
455 break;
456
457 case WRITE:
f1e3cfff 458 ret = zram_write(zram, bio);
306b0c95
NG
459 break;
460 }
461
462 return ret;
463}
464
f1e3cfff 465static void reset_device(struct zram *zram)
306b0c95 466{
97a06382 467 size_t index;
306b0c95 468
7eef7533 469 /* Do not accept any new I/O request */
f1e3cfff 470 zram->init_done = 0;
7eef7533 471
306b0c95 472 /* Free various per-device buffers */
f1e3cfff
NG
473 kfree(zram->compress_workmem);
474 free_pages((unsigned long)zram->compress_buffer, 1);
306b0c95 475
f1e3cfff
NG
476 zram->compress_workmem = NULL;
477 zram->compress_buffer = NULL;
306b0c95 478
f1e3cfff
NG
479 /* Free all pages that are still in this zram device */
480 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
306b0c95
NG
481 struct page *page;
482 u16 offset;
483
f1e3cfff
NG
484 page = zram->table[index].page;
485 offset = zram->table[index].offset;
306b0c95
NG
486
487 if (!page)
488 continue;
489
f1e3cfff 490 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
306b0c95
NG
491 __free_page(page);
492 else
f1e3cfff 493 xv_free(zram->mem_pool, page, offset);
306b0c95
NG
494 }
495
f1e3cfff
NG
496 vfree(zram->table);
497 zram->table = NULL;
306b0c95 498
f1e3cfff
NG
499 xv_destroy_pool(zram->mem_pool);
500 zram->mem_pool = NULL;
306b0c95 501
306b0c95 502 /* Reset stats */
f1e3cfff 503 memset(&zram->stats, 0, sizeof(zram->stats));
306b0c95 504
f1e3cfff 505 zram->disksize = 0;
306b0c95
NG
506}
507
f1e3cfff 508static int zram_ioctl_init_device(struct zram *zram)
306b0c95
NG
509{
510 int ret;
511 size_t num_pages;
306b0c95 512
f1e3cfff 513 if (zram->init_done) {
306b0c95
NG
514 pr_info("Device already initialized!\n");
515 return -EBUSY;
516 }
517
f1e3cfff 518 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
306b0c95 519
f1e3cfff
NG
520 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
521 if (!zram->compress_workmem) {
306b0c95
NG
522 pr_err("Error allocating compressor working memory!\n");
523 ret = -ENOMEM;
524 goto fail;
525 }
526
f1e3cfff
NG
527 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
528 if (!zram->compress_buffer) {
306b0c95
NG
529 pr_err("Error allocating compressor buffer space\n");
530 ret = -ENOMEM;
531 goto fail;
532 }
533
f1e3cfff
NG
534 num_pages = zram->disksize >> PAGE_SHIFT;
535 zram->table = vmalloc(num_pages * sizeof(*zram->table));
536 if (!zram->table) {
537 pr_err("Error allocating zram address table\n");
306b0c95 538 /* To prevent accessing table entries during cleanup */
f1e3cfff 539 zram->disksize = 0;
306b0c95
NG
540 ret = -ENOMEM;
541 goto fail;
542 }
f1e3cfff 543 memset(zram->table, 0, num_pages * sizeof(*zram->table));
306b0c95 544
f1e3cfff 545 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
306b0c95 546
f1e3cfff
NG
547 /* zram devices sort of resembles non-rotational disks */
548 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
306b0c95 549
f1e3cfff
NG
550 zram->mem_pool = xv_create_pool();
551 if (!zram->mem_pool) {
306b0c95
NG
552 pr_err("Error creating memory pool\n");
553 ret = -ENOMEM;
554 goto fail;
555 }
556
f1e3cfff 557 zram->init_done = 1;
306b0c95
NG
558
559 pr_debug("Initialization done!\n");
560 return 0;
561
562fail:
f1e3cfff 563 reset_device(zram);
306b0c95
NG
564
565 pr_err("Initialization failed: err=%d\n", ret);
566 return ret;
567}
568
f1e3cfff 569static int zram_ioctl_reset_device(struct zram *zram)
306b0c95 570{
f1e3cfff
NG
571 if (zram->init_done)
572 reset_device(zram);
306b0c95
NG
573
574 return 0;
575}
576
f1e3cfff 577static int zram_ioctl(struct block_device *bdev, fmode_t mode,
306b0c95
NG
578 unsigned int cmd, unsigned long arg)
579{
580 int ret = 0;
97a06382 581 size_t disksize_kb;
306b0c95 582
f1e3cfff 583 struct zram *zram = bdev->bd_disk->private_data;
306b0c95
NG
584
585 switch (cmd) {
f1e3cfff
NG
586 case ZRAMIO_SET_DISKSIZE_KB:
587 if (zram->init_done) {
306b0c95
NG
588 ret = -EBUSY;
589 goto out;
590 }
591 if (copy_from_user(&disksize_kb, (void *)arg,
592 _IOC_SIZE(cmd))) {
593 ret = -EFAULT;
594 goto out;
595 }
f1e3cfff 596 zram->disksize = disksize_kb << 10;
306b0c95
NG
597 pr_info("Disk size set to %zu kB\n", disksize_kb);
598 break;
599
f1e3cfff 600 case ZRAMIO_GET_STATS:
306b0c95 601 {
f1e3cfff
NG
602 struct zram_ioctl_stats *stats;
603 if (!zram->init_done) {
306b0c95
NG
604 ret = -ENOTTY;
605 goto out;
606 }
607 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
608 if (!stats) {
609 ret = -ENOMEM;
610 goto out;
611 }
f1e3cfff 612 zram_ioctl_get_stats(zram, stats);
306b0c95
NG
613 if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
614 kfree(stats);
615 ret = -EFAULT;
616 goto out;
617 }
618 kfree(stats);
619 break;
620 }
f1e3cfff
NG
621 case ZRAMIO_INIT:
622 ret = zram_ioctl_init_device(zram);
306b0c95
NG
623 break;
624
f1e3cfff 625 case ZRAMIO_RESET:
306b0c95
NG
626 /* Do not reset an active device! */
627 if (bdev->bd_holders) {
628 ret = -EBUSY;
629 goto out;
630 }
7eef7533
NG
631
632 /* Make sure all pending I/O is finished */
633 if (bdev)
634 fsync_bdev(bdev);
635
f1e3cfff 636 ret = zram_ioctl_reset_device(zram);
306b0c95
NG
637 break;
638
639 default:
640 pr_info("Invalid ioctl %u\n", cmd);
641 ret = -ENOTTY;
642 }
643
644out:
645 return ret;
646}
647
f1e3cfff 648void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
107c161b 649{
f1e3cfff 650 struct zram *zram;
107c161b 651
f1e3cfff
NG
652 zram = bdev->bd_disk->private_data;
653 zram_free_page(zram, index);
654 zram_stat64_inc(zram, &zram->stats.notify_free);
107c161b
NG
655}
656
f1e3cfff
NG
657static const struct block_device_operations zram_devops = {
658 .ioctl = zram_ioctl,
659 .swap_slot_free_notify = zram_slot_free_notify,
107c161b 660 .owner = THIS_MODULE
306b0c95
NG
661};
662
f1e3cfff 663static int create_device(struct zram *zram, int device_id)
306b0c95 664{
de1a21a0
NG
665 int ret = 0;
666
f1e3cfff
NG
667 mutex_init(&zram->lock);
668 spin_lock_init(&zram->stat64_lock);
306b0c95 669
f1e3cfff
NG
670 zram->queue = blk_alloc_queue(GFP_KERNEL);
671 if (!zram->queue) {
306b0c95
NG
672 pr_err("Error allocating disk queue for device %d\n",
673 device_id);
de1a21a0
NG
674 ret = -ENOMEM;
675 goto out;
306b0c95
NG
676 }
677
f1e3cfff
NG
678 blk_queue_make_request(zram->queue, zram_make_request);
679 zram->queue->queuedata = zram;
306b0c95
NG
680
681 /* gendisk structure */
f1e3cfff
NG
682 zram->disk = alloc_disk(1);
683 if (!zram->disk) {
684 blk_cleanup_queue(zram->queue);
306b0c95
NG
685 pr_warning("Error allocating disk structure for device %d\n",
686 device_id);
de1a21a0
NG
687 ret = -ENOMEM;
688 goto out;
306b0c95
NG
689 }
690
f1e3cfff
NG
691 zram->disk->major = zram_major;
692 zram->disk->first_minor = device_id;
693 zram->disk->fops = &zram_devops;
694 zram->disk->queue = zram->queue;
695 zram->disk->private_data = zram;
696 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
306b0c95 697
f1e3cfff
NG
698 /* Actual capacity set using ZRAMIO_SET_DISKSIZE_KB ioctl */
699 set_capacity(zram->disk, 0);
5d83d5a0 700
a1dd52af
NG
701 /*
702 * To ensure that we always get PAGE_SIZE aligned
703 * and n*PAGE_SIZED sized I/O requests.
704 */
f1e3cfff
NG
705 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
706 blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
707 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
708 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
5d83d5a0 709
f1e3cfff 710 add_disk(zram->disk);
306b0c95 711
f1e3cfff 712 zram->init_done = 0;
de1a21a0
NG
713
714out:
715 return ret;
306b0c95
NG
716}
717
f1e3cfff 718static void destroy_device(struct zram *zram)
306b0c95 719{
f1e3cfff
NG
720 if (zram->disk) {
721 del_gendisk(zram->disk);
722 put_disk(zram->disk);
306b0c95
NG
723 }
724
f1e3cfff
NG
725 if (zram->queue)
726 blk_cleanup_queue(zram->queue);
306b0c95
NG
727}
728
f1e3cfff 729static int __init zram_init(void)
306b0c95 730{
de1a21a0 731 int ret, dev_id;
306b0c95
NG
732
733 if (num_devices > max_num_devices) {
734 pr_warning("Invalid value for num_devices: %u\n",
735 num_devices);
de1a21a0
NG
736 ret = -EINVAL;
737 goto out;
306b0c95
NG
738 }
739
f1e3cfff
NG
740 zram_major = register_blkdev(0, "zram");
741 if (zram_major <= 0) {
306b0c95 742 pr_warning("Unable to get major number\n");
de1a21a0
NG
743 ret = -EBUSY;
744 goto out;
306b0c95
NG
745 }
746
747 if (!num_devices) {
748 pr_info("num_devices not specified. Using default: 1\n");
749 num_devices = 1;
750 }
751
752 /* Allocate the device array and initialize each one */
753 pr_info("Creating %u devices ...\n", num_devices);
f1e3cfff 754 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
de1a21a0
NG
755 if (!devices) {
756 ret = -ENOMEM;
757 goto unregister;
758 }
306b0c95 759
de1a21a0
NG
760 for (dev_id = 0; dev_id < num_devices; dev_id++) {
761 ret = create_device(&devices[dev_id], dev_id);
762 if (ret)
3bf040c7 763 goto free_devices;
de1a21a0
NG
764 }
765
306b0c95 766 return 0;
de1a21a0 767
3bf040c7 768free_devices:
de1a21a0
NG
769 while (dev_id)
770 destroy_device(&devices[--dev_id]);
771unregister:
f1e3cfff 772 unregister_blkdev(zram_major, "zram");
de1a21a0 773out:
306b0c95
NG
774 return ret;
775}
776
f1e3cfff 777static void __exit zram_exit(void)
306b0c95
NG
778{
779 int i;
f1e3cfff 780 struct zram *zram;
306b0c95
NG
781
782 for (i = 0; i < num_devices; i++) {
f1e3cfff 783 zram = &devices[i];
306b0c95 784
f1e3cfff
NG
785 destroy_device(zram);
786 if (zram->init_done)
787 reset_device(zram);
306b0c95
NG
788 }
789
f1e3cfff 790 unregister_blkdev(zram_major, "zram");
306b0c95
NG
791
792 kfree(devices);
793 pr_debug("Cleanup done!\n");
794}
795
796module_param(num_devices, uint, 0);
f1e3cfff 797MODULE_PARM_DESC(num_devices, "Number of zram devices");
306b0c95 798
f1e3cfff
NG
799module_init(zram_init);
800module_exit(zram_exit);
306b0c95
NG
801
802MODULE_LICENSE("Dual BSD/GPL");
803MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
f1e3cfff 804MODULE_DESCRIPTION("Compressed RAM Block Device");