mm/z3fold.c: add structure for buddy handles
[linux-2.6-block.git] / mm / z3fold.c
CommitLineData
9a001fc1
VW
1/*
2 * z3fold.c
3 *
4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
6 *
7 * This implementation is based on zbud written by Seth Jennings.
8 *
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
16 *
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
19 *
20 * z3fold doesn't export any API and is meant to be used via zpool API.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/atomic.h>
d30561c5 26#include <linux/sched.h>
9a001fc1
VW
27#include <linux/list.h>
28#include <linux/mm.h>
29#include <linux/module.h>
d30561c5 30#include <linux/percpu.h>
9a001fc1 31#include <linux/preempt.h>
d30561c5 32#include <linux/workqueue.h>
9a001fc1
VW
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/zpool.h>
36
7c2b8baa
VW
37/*
38 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
39 * adjusting internal fragmentation. It also determines the number of
40 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
41 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
42 * in the beginning of an allocated page are occupied by z3fold header, so
43 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
44 * which shows the max number of free chunks in z3fold page, also there will
45 * be 63, or 62, respectively, freelists per pool.
46 */
47#define NCHUNKS_ORDER 6
48
49#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
50#define CHUNK_SIZE (1 << CHUNK_SHIFT)
51#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
52#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
53#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
54#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
55
56#define BUDDY_MASK (0x3)
57#define BUDDY_SHIFT 2
58#define SLOTS_ALIGN (0x40)
59
9a001fc1
VW
60/*****************
61 * Structures
62*****************/
ede93213
VW
63struct z3fold_pool;
64struct z3fold_ops {
65 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
66};
67
68enum buddy {
69 HEADLESS = 0,
70 FIRST,
71 MIDDLE,
72 LAST,
7c2b8baa
VW
73 BUDDIES_MAX = LAST
74};
75
76struct z3fold_buddy_slots {
77 /*
78 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
79 * be enough slots to hold all possible variants
80 */
81 unsigned long slot[BUDDY_MASK + 1];
82 unsigned long pool; /* back link + flags */
ede93213 83};
7c2b8baa 84#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
85
86/*
d30561c5 87 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 88 * z3fold page, except for HEADLESS pages
d30561c5
VW
89 * @buddy: links the z3fold page into the relevant list in the
90 * pool
2f1e5e4d 91 * @page_lock: per-page lock
d30561c5
VW
92 * @refcount: reference count for the z3fold page
93 * @work: work_struct for page layout optimization
7c2b8baa 94 * @slots: pointer to the structure holding buddy slots
d30561c5 95 * @cpu: CPU which this page "belongs" to
ede93213
VW
96 * @first_chunks: the size of the first buddy in chunks, 0 if free
97 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
98 * @last_chunks: the size of the last buddy in chunks, 0 if free
99 * @first_num: the starting number (for the first handle)
100 */
101struct z3fold_header {
102 struct list_head buddy;
2f1e5e4d 103 spinlock_t page_lock;
5a27aa82 104 struct kref refcount;
d30561c5 105 struct work_struct work;
7c2b8baa 106 struct z3fold_buddy_slots *slots;
d30561c5 107 short cpu;
ede93213
VW
108 unsigned short first_chunks;
109 unsigned short middle_chunks;
110 unsigned short last_chunks;
111 unsigned short start_middle;
112 unsigned short first_num:2;
113};
114
9a001fc1
VW
115/**
116 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
117 * @name: pool name
118 * @lock: protects pool unbuddied/lru lists
119 * @stale_lock: protects pool stale page list
120 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
121 * buddies; the list each z3fold page is added to depends on
122 * the size of its free region.
9a001fc1
VW
123 * @lru: list tracking the z3fold pages in LRU order by most recently
124 * added buddy.
d30561c5 125 * @stale: list of pages marked for freeing
9a001fc1 126 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 127 * @c_handle: cache for z3fold_buddy_slots allocation
9a001fc1
VW
128 * @ops: pointer to a structure of user defined operations specified at
129 * pool creation time.
d30561c5
VW
130 * @compact_wq: workqueue for page layout background optimization
131 * @release_wq: workqueue for safe page release
132 * @work: work_struct for safe page release
9a001fc1
VW
133 *
134 * This structure is allocated at pool creation time and maintains metadata
135 * pertaining to a particular z3fold pool.
136 */
137struct z3fold_pool {
d30561c5 138 const char *name;
9a001fc1 139 spinlock_t lock;
d30561c5
VW
140 spinlock_t stale_lock;
141 struct list_head *unbuddied;
9a001fc1 142 struct list_head lru;
d30561c5 143 struct list_head stale;
12d59ae6 144 atomic64_t pages_nr;
7c2b8baa 145 struct kmem_cache *c_handle;
9a001fc1
VW
146 const struct z3fold_ops *ops;
147 struct zpool *zpool;
148 const struct zpool_ops *zpool_ops;
d30561c5
VW
149 struct workqueue_struct *compact_wq;
150 struct workqueue_struct *release_wq;
151 struct work_struct work;
9a001fc1
VW
152};
153
9a001fc1
VW
154/*
155 * Internal z3fold page flags
156 */
157enum z3fold_page_flags {
5a27aa82 158 PAGE_HEADLESS = 0,
9a001fc1 159 MIDDLE_CHUNK_MAPPED,
d30561c5 160 NEEDS_COMPACTING,
6098d7e1 161 PAGE_STALE,
ca0246bb 162 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
163};
164
165/*****************
166 * Helpers
167*****************/
168
169/* Converts an allocation size in bytes to size in z3fold chunks */
170static int size_to_chunks(size_t size)
171{
172 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
173}
174
175#define for_each_unbuddied_list(_iter, _begin) \
176 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
177
d30561c5
VW
178static void compact_page_work(struct work_struct *w);
179
7c2b8baa
VW
180static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
181{
182 struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
183 GFP_KERNEL);
184
185 if (slots) {
186 memset(slots->slot, 0, sizeof(slots->slot));
187 slots->pool = (unsigned long)pool;
188 }
189
190 return slots;
191}
192
193static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
194{
195 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
196}
197
198static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
199{
200 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
201}
202
203static inline void free_handle(unsigned long handle)
204{
205 struct z3fold_buddy_slots *slots;
206 int i;
207 bool is_free;
208
209 if (handle & (1 << PAGE_HEADLESS))
210 return;
211
212 WARN_ON(*(unsigned long *)handle == 0);
213 *(unsigned long *)handle = 0;
214 slots = handle_to_slots(handle);
215 is_free = true;
216 for (i = 0; i <= BUDDY_MASK; i++) {
217 if (slots->slot[i]) {
218 is_free = false;
219 break;
220 }
221 }
222
223 if (is_free) {
224 struct z3fold_pool *pool = slots_to_pool(slots);
225
226 kmem_cache_free(pool->c_handle, slots);
227 }
228}
229
9a001fc1 230/* Initializes the z3fold header of a newly allocated z3fold page */
d30561c5
VW
231static struct z3fold_header *init_z3fold_page(struct page *page,
232 struct z3fold_pool *pool)
9a001fc1
VW
233{
234 struct z3fold_header *zhdr = page_address(page);
7c2b8baa
VW
235 struct z3fold_buddy_slots *slots = alloc_slots(pool);
236
237 if (!slots)
238 return NULL;
9a001fc1
VW
239
240 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
241 clear_bit(PAGE_HEADLESS, &page->private);
242 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
243 clear_bit(NEEDS_COMPACTING, &page->private);
244 clear_bit(PAGE_STALE, &page->private);
ca0246bb 245 clear_bit(PAGE_CLAIMED, &page->private);
9a001fc1 246
2f1e5e4d 247 spin_lock_init(&zhdr->page_lock);
5a27aa82 248 kref_init(&zhdr->refcount);
9a001fc1
VW
249 zhdr->first_chunks = 0;
250 zhdr->middle_chunks = 0;
251 zhdr->last_chunks = 0;
252 zhdr->first_num = 0;
253 zhdr->start_middle = 0;
d30561c5 254 zhdr->cpu = -1;
7c2b8baa 255 zhdr->slots = slots;
9a001fc1 256 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 257 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
258 return zhdr;
259}
260
261/* Resets the struct page fields and frees the page */
5a27aa82 262static void free_z3fold_page(struct page *page)
9a001fc1 263{
5a27aa82
VW
264 __free_page(page);
265}
266
2f1e5e4d
VW
267/* Lock a z3fold page */
268static inline void z3fold_page_lock(struct z3fold_header *zhdr)
269{
270 spin_lock(&zhdr->page_lock);
271}
272
76e32a2a
VW
273/* Try to lock a z3fold page */
274static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
275{
276 return spin_trylock(&zhdr->page_lock);
277}
278
2f1e5e4d
VW
279/* Unlock a z3fold page */
280static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
281{
282 spin_unlock(&zhdr->page_lock);
283}
284
7c2b8baa
VW
285/* Helper function to build the index */
286static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
287{
288 return (bud + zhdr->first_num) & BUDDY_MASK;
289}
290
9a001fc1
VW
291/*
292 * Encodes the handle of a particular buddy within a z3fold page
293 * Pool lock should be held as this function accesses first_num
294 */
295static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
296{
7c2b8baa
VW
297 struct z3fold_buddy_slots *slots;
298 unsigned long h = (unsigned long)zhdr;
299 int idx = 0;
9a001fc1 300
7c2b8baa
VW
301 /*
302 * For a headless page, its handle is its pointer with the extra
303 * PAGE_HEADLESS bit set
304 */
305 if (bud == HEADLESS)
306 return h | (1 << PAGE_HEADLESS);
307
308 /* otherwise, return pointer to encoded handle */
309 idx = __idx(zhdr, bud);
310 h += idx;
311 if (bud == LAST)
312 h |= (zhdr->last_chunks << BUDDY_SHIFT);
313
314 slots = zhdr->slots;
315 slots->slot[idx] = h;
316 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
317}
318
319/* Returns the z3fold page where a given handle is stored */
7c2b8baa 320static inline struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
9a001fc1 321{
7c2b8baa
VW
322 unsigned long addr = handle;
323
324 if (!(addr & (1 << PAGE_HEADLESS)))
325 addr = *(unsigned long *)handle;
326
327 return (struct z3fold_header *)(addr & PAGE_MASK);
9a001fc1
VW
328}
329
ca0246bb
VW
330/* only for LAST bud, returns zero otherwise */
331static unsigned short handle_to_chunks(unsigned long handle)
332{
7c2b8baa
VW
333 unsigned long addr = *(unsigned long *)handle;
334
335 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
336}
337
f201ebd8 338/*
339 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
340 * but that doesn't matter. because the masking will result in the
341 * correct buddy number.
342 */
9a001fc1
VW
343static enum buddy handle_to_buddy(unsigned long handle)
344{
7c2b8baa
VW
345 struct z3fold_header *zhdr;
346 unsigned long addr;
347
348 WARN_ON(handle & (1 << PAGE_HEADLESS));
349 addr = *(unsigned long *)handle;
350 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
351 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
352}
353
9050cce1
VW
354static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
355{
7c2b8baa 356 return slots_to_pool(zhdr->slots);
9050cce1
VW
357}
358
d30561c5
VW
359static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
360{
361 struct page *page = virt_to_page(zhdr);
9050cce1 362 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
363
364 WARN_ON(!list_empty(&zhdr->buddy));
365 set_bit(PAGE_STALE, &page->private);
35529357 366 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
367 spin_lock(&pool->lock);
368 if (!list_empty(&page->lru))
369 list_del(&page->lru);
370 spin_unlock(&pool->lock);
371 if (locked)
372 z3fold_page_unlock(zhdr);
373 spin_lock(&pool->stale_lock);
374 list_add(&zhdr->buddy, &pool->stale);
375 queue_work(pool->release_wq, &pool->work);
376 spin_unlock(&pool->stale_lock);
377}
378
379static void __attribute__((__unused__))
380 release_z3fold_page(struct kref *ref)
381{
382 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
383 refcount);
384 __release_z3fold_page(zhdr, false);
385}
386
387static void release_z3fold_page_locked(struct kref *ref)
388{
389 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
390 refcount);
391 WARN_ON(z3fold_page_trylock(zhdr));
392 __release_z3fold_page(zhdr, true);
393}
394
395static void release_z3fold_page_locked_list(struct kref *ref)
396{
397 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
398 refcount);
9050cce1
VW
399 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
400 spin_lock(&pool->lock);
d30561c5 401 list_del_init(&zhdr->buddy);
9050cce1 402 spin_unlock(&pool->lock);
d30561c5
VW
403
404 WARN_ON(z3fold_page_trylock(zhdr));
405 __release_z3fold_page(zhdr, true);
406}
407
408static void free_pages_work(struct work_struct *w)
409{
410 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
411
412 spin_lock(&pool->stale_lock);
413 while (!list_empty(&pool->stale)) {
414 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
415 struct z3fold_header, buddy);
416 struct page *page = virt_to_page(zhdr);
417
418 list_del(&zhdr->buddy);
419 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
420 continue;
d30561c5
VW
421 spin_unlock(&pool->stale_lock);
422 cancel_work_sync(&zhdr->work);
423 free_z3fold_page(page);
424 cond_resched();
425 spin_lock(&pool->stale_lock);
426 }
427 spin_unlock(&pool->stale_lock);
428}
429
9a001fc1
VW
430/*
431 * Returns the number of free chunks in a z3fold page.
432 * NB: can't be used with HEADLESS pages.
433 */
434static int num_free_chunks(struct z3fold_header *zhdr)
435{
436 int nfree;
437 /*
438 * If there is a middle object, pick up the bigger free space
439 * either before or after it. Otherwise just subtract the number
440 * of chunks occupied by the first and the last objects.
441 */
442 if (zhdr->middle_chunks != 0) {
443 int nfree_before = zhdr->first_chunks ?
ede93213 444 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 445 int nfree_after = zhdr->last_chunks ?
ede93213
VW
446 0 : TOTAL_CHUNKS -
447 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
448 nfree = max(nfree_before, nfree_after);
449 } else
450 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
451 return nfree;
452}
453
9050cce1
VW
454/* Add to the appropriate unbuddied list */
455static inline void add_to_unbuddied(struct z3fold_pool *pool,
456 struct z3fold_header *zhdr)
457{
458 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
459 zhdr->middle_chunks == 0) {
460 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
461
462 int freechunks = num_free_chunks(zhdr);
463 spin_lock(&pool->lock);
464 list_add(&zhdr->buddy, &unbuddied[freechunks]);
465 spin_unlock(&pool->lock);
466 zhdr->cpu = smp_processor_id();
467 put_cpu_ptr(pool->unbuddied);
468 }
469}
470
ede93213
VW
471static inline void *mchunk_memmove(struct z3fold_header *zhdr,
472 unsigned short dst_chunk)
473{
474 void *beg = zhdr;
475 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
476 beg + (zhdr->start_middle << CHUNK_SHIFT),
477 zhdr->middle_chunks << CHUNK_SHIFT);
478}
479
1b096e5a 480#define BIG_CHUNK_GAP 3
9a001fc1
VW
481/* Has to be called with lock held */
482static int z3fold_compact_page(struct z3fold_header *zhdr)
483{
484 struct page *page = virt_to_page(zhdr);
9a001fc1 485
ede93213
VW
486 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
487 return 0; /* can't move middle chunk, it's used */
9a001fc1 488
ede93213
VW
489 if (zhdr->middle_chunks == 0)
490 return 0; /* nothing to compact */
491
492 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
493 /* move to the beginning */
494 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
495 zhdr->first_chunks = zhdr->middle_chunks;
496 zhdr->middle_chunks = 0;
497 zhdr->start_middle = 0;
498 zhdr->first_num++;
1b096e5a 499 return 1;
9a001fc1 500 }
1b096e5a
VW
501
502 /*
503 * moving data is expensive, so let's only do that if
504 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
505 */
506 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
507 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
508 BIG_CHUNK_GAP) {
509 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
510 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
511 return 1;
512 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
513 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
514 + zhdr->middle_chunks) >=
515 BIG_CHUNK_GAP) {
516 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
517 zhdr->middle_chunks;
518 mchunk_memmove(zhdr, new_start);
519 zhdr->start_middle = new_start;
520 return 1;
521 }
522
523 return 0;
9a001fc1
VW
524}
525
d30561c5
VW
526static void do_compact_page(struct z3fold_header *zhdr, bool locked)
527{
9050cce1 528 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 529 struct page *page;
d30561c5
VW
530
531 page = virt_to_page(zhdr);
532 if (locked)
533 WARN_ON(z3fold_page_trylock(zhdr));
534 else
535 z3fold_page_lock(zhdr);
5d03a661 536 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
537 z3fold_page_unlock(zhdr);
538 return;
539 }
540 spin_lock(&pool->lock);
541 list_del_init(&zhdr->buddy);
542 spin_unlock(&pool->lock);
543
5d03a661
VW
544 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
545 atomic64_dec(&pool->pages_nr);
546 return;
547 }
548
d30561c5 549 z3fold_compact_page(zhdr);
9050cce1 550 add_to_unbuddied(pool, zhdr);
d30561c5
VW
551 z3fold_page_unlock(zhdr);
552}
553
554static void compact_page_work(struct work_struct *w)
555{
556 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
557 work);
558
559 do_compact_page(zhdr, false);
560}
561
9050cce1
VW
562/* returns _locked_ z3fold page header or NULL */
563static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
564 size_t size, bool can_sleep)
565{
566 struct z3fold_header *zhdr = NULL;
567 struct page *page;
568 struct list_head *unbuddied;
569 int chunks = size_to_chunks(size), i;
570
571lookup:
572 /* First, try to find an unbuddied z3fold page. */
573 unbuddied = get_cpu_ptr(pool->unbuddied);
574 for_each_unbuddied_list(i, chunks) {
575 struct list_head *l = &unbuddied[i];
576
577 zhdr = list_first_entry_or_null(READ_ONCE(l),
578 struct z3fold_header, buddy);
579
580 if (!zhdr)
581 continue;
582
583 /* Re-check under lock. */
584 spin_lock(&pool->lock);
585 l = &unbuddied[i];
586 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
587 struct z3fold_header, buddy)) ||
588 !z3fold_page_trylock(zhdr)) {
589 spin_unlock(&pool->lock);
590 zhdr = NULL;
591 put_cpu_ptr(pool->unbuddied);
592 if (can_sleep)
593 cond_resched();
594 goto lookup;
595 }
596 list_del_init(&zhdr->buddy);
597 zhdr->cpu = -1;
598 spin_unlock(&pool->lock);
599
600 page = virt_to_page(zhdr);
601 if (test_bit(NEEDS_COMPACTING, &page->private)) {
602 z3fold_page_unlock(zhdr);
603 zhdr = NULL;
604 put_cpu_ptr(pool->unbuddied);
605 if (can_sleep)
606 cond_resched();
607 goto lookup;
608 }
609
610 /*
611 * this page could not be removed from its unbuddied
612 * list while pool lock was held, and then we've taken
613 * page lock so kref_put could not be called before
614 * we got here, so it's safe to just call kref_get()
615 */
616 kref_get(&zhdr->refcount);
617 break;
618 }
619 put_cpu_ptr(pool->unbuddied);
620
351618b2
VW
621 if (!zhdr) {
622 int cpu;
623
624 /* look for _exact_ match on other cpus' lists */
625 for_each_online_cpu(cpu) {
626 struct list_head *l;
627
628 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
629 spin_lock(&pool->lock);
630 l = &unbuddied[chunks];
631
632 zhdr = list_first_entry_or_null(READ_ONCE(l),
633 struct z3fold_header, buddy);
634
635 if (!zhdr || !z3fold_page_trylock(zhdr)) {
636 spin_unlock(&pool->lock);
637 zhdr = NULL;
638 continue;
639 }
640 list_del_init(&zhdr->buddy);
641 zhdr->cpu = -1;
642 spin_unlock(&pool->lock);
643
644 page = virt_to_page(zhdr);
645 if (test_bit(NEEDS_COMPACTING, &page->private)) {
646 z3fold_page_unlock(zhdr);
647 zhdr = NULL;
648 if (can_sleep)
649 cond_resched();
650 continue;
651 }
652 kref_get(&zhdr->refcount);
653 break;
654 }
655 }
656
9050cce1
VW
657 return zhdr;
658}
d30561c5
VW
659
660/*
661 * API Functions
662 */
663
664/**
665 * z3fold_create_pool() - create a new z3fold pool
666 * @name: pool name
667 * @gfp: gfp flags when allocating the z3fold pool structure
668 * @ops: user-defined operations for the z3fold pool
669 *
670 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
671 * failed.
672 */
673static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
674 const struct z3fold_ops *ops)
675{
676 struct z3fold_pool *pool = NULL;
677 int i, cpu;
678
679 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
680 if (!pool)
681 goto out;
7c2b8baa
VW
682 pool->c_handle = kmem_cache_create("z3fold_handle",
683 sizeof(struct z3fold_buddy_slots),
684 SLOTS_ALIGN, 0, NULL);
685 if (!pool->c_handle)
686 goto out_c;
d30561c5
VW
687 spin_lock_init(&pool->lock);
688 spin_lock_init(&pool->stale_lock);
689 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1ec6995d
XW
690 if (!pool->unbuddied)
691 goto out_pool;
d30561c5
VW
692 for_each_possible_cpu(cpu) {
693 struct list_head *unbuddied =
694 per_cpu_ptr(pool->unbuddied, cpu);
695 for_each_unbuddied_list(i, 0)
696 INIT_LIST_HEAD(&unbuddied[i]);
697 }
698 INIT_LIST_HEAD(&pool->lru);
699 INIT_LIST_HEAD(&pool->stale);
700 atomic64_set(&pool->pages_nr, 0);
701 pool->name = name;
702 pool->compact_wq = create_singlethread_workqueue(pool->name);
703 if (!pool->compact_wq)
1ec6995d 704 goto out_unbuddied;
d30561c5
VW
705 pool->release_wq = create_singlethread_workqueue(pool->name);
706 if (!pool->release_wq)
707 goto out_wq;
708 INIT_WORK(&pool->work, free_pages_work);
709 pool->ops = ops;
710 return pool;
711
712out_wq:
713 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
714out_unbuddied:
715 free_percpu(pool->unbuddied);
716out_pool:
7c2b8baa
VW
717 kmem_cache_destroy(pool->c_handle);
718out_c:
d30561c5 719 kfree(pool);
1ec6995d 720out:
d30561c5
VW
721 return NULL;
722}
723
724/**
725 * z3fold_destroy_pool() - destroys an existing z3fold pool
726 * @pool: the z3fold pool to be destroyed
727 *
728 * The pool should be emptied before this function is called.
729 */
730static void z3fold_destroy_pool(struct z3fold_pool *pool)
731{
7c2b8baa 732 kmem_cache_destroy(pool->c_handle);
d30561c5
VW
733 destroy_workqueue(pool->release_wq);
734 destroy_workqueue(pool->compact_wq);
735 kfree(pool);
736}
737
9a001fc1
VW
738/**
739 * z3fold_alloc() - allocates a region of a given size
740 * @pool: z3fold pool from which to allocate
741 * @size: size in bytes of the desired allocation
742 * @gfp: gfp flags used if the pool needs to grow
743 * @handle: handle of the new allocation
744 *
745 * This function will attempt to find a free region in the pool large enough to
746 * satisfy the allocation request. A search of the unbuddied lists is
747 * performed first. If no suitable free region is found, then a new page is
748 * allocated and added to the pool to satisfy the request.
749 *
750 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
751 * as z3fold pool pages.
752 *
753 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
754 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
755 * a new page.
756 */
757static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
758 unsigned long *handle)
759{
9050cce1 760 int chunks = size_to_chunks(size);
9a001fc1 761 struct z3fold_header *zhdr = NULL;
d30561c5 762 struct page *page = NULL;
9a001fc1 763 enum buddy bud;
8a97ea54 764 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1
VW
765
766 if (!size || (gfp & __GFP_HIGHMEM))
767 return -EINVAL;
768
769 if (size > PAGE_SIZE)
770 return -ENOSPC;
771
772 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
773 bud = HEADLESS;
774 else {
9050cce1
VW
775retry:
776 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 777 if (zhdr) {
2f1e5e4d
VW
778 if (zhdr->first_chunks == 0) {
779 if (zhdr->middle_chunks != 0 &&
780 chunks >= zhdr->start_middle)
9a001fc1 781 bud = LAST;
2f1e5e4d
VW
782 else
783 bud = FIRST;
784 } else if (zhdr->last_chunks == 0)
785 bud = LAST;
786 else if (zhdr->middle_chunks == 0)
787 bud = MIDDLE;
788 else {
5a27aa82 789 if (kref_put(&zhdr->refcount,
d30561c5 790 release_z3fold_page_locked))
5a27aa82 791 atomic64_dec(&pool->pages_nr);
d30561c5
VW
792 else
793 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
794 pr_err("No free chunks in unbuddied\n");
795 WARN_ON(1);
9050cce1 796 goto retry;
9a001fc1 797 }
9050cce1 798 page = virt_to_page(zhdr);
2f1e5e4d 799 goto found;
9a001fc1
VW
800 }
801 bud = FIRST;
9a001fc1
VW
802 }
803
5c9bab59
VW
804 page = NULL;
805 if (can_sleep) {
806 spin_lock(&pool->stale_lock);
807 zhdr = list_first_entry_or_null(&pool->stale,
808 struct z3fold_header, buddy);
809 /*
810 * Before allocating a page, let's see if we can take one from
811 * the stale pages list. cancel_work_sync() can sleep so we
812 * limit this case to the contexts where we can sleep
813 */
814 if (zhdr) {
815 list_del(&zhdr->buddy);
816 spin_unlock(&pool->stale_lock);
d30561c5 817 cancel_work_sync(&zhdr->work);
5c9bab59
VW
818 page = virt_to_page(zhdr);
819 } else {
820 spin_unlock(&pool->stale_lock);
821 }
d30561c5 822 }
5c9bab59
VW
823 if (!page)
824 page = alloc_page(gfp);
d30561c5 825
9a001fc1
VW
826 if (!page)
827 return -ENOMEM;
2f1e5e4d 828
d30561c5 829 zhdr = init_z3fold_page(page, pool);
9050cce1
VW
830 if (!zhdr) {
831 __free_page(page);
832 return -ENOMEM;
833 }
834 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
835
836 if (bud == HEADLESS) {
837 set_bit(PAGE_HEADLESS, &page->private);
838 goto headless;
839 }
2f1e5e4d 840 z3fold_page_lock(zhdr);
9a001fc1
VW
841
842found:
843 if (bud == FIRST)
844 zhdr->first_chunks = chunks;
845 else if (bud == LAST)
846 zhdr->last_chunks = chunks;
847 else {
848 zhdr->middle_chunks = chunks;
ede93213 849 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 850 }
9050cce1 851 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
852
853headless:
d30561c5 854 spin_lock(&pool->lock);
9a001fc1
VW
855 /* Add/move z3fold page to beginning of LRU */
856 if (!list_empty(&page->lru))
857 list_del(&page->lru);
858
859 list_add(&page->lru, &pool->lru);
860
861 *handle = encode_handle(zhdr, bud);
862 spin_unlock(&pool->lock);
2f1e5e4d
VW
863 if (bud != HEADLESS)
864 z3fold_page_unlock(zhdr);
9a001fc1
VW
865
866 return 0;
867}
868
869/**
870 * z3fold_free() - frees the allocation associated with the given handle
871 * @pool: pool in which the allocation resided
872 * @handle: handle associated with the allocation returned by z3fold_alloc()
873 *
874 * In the case that the z3fold page in which the allocation resides is under
875 * reclaim, as indicated by the PG_reclaim flag being set, this function
876 * only sets the first|last_chunks to 0. The page is actually freed
877 * once both buddies are evicted (see z3fold_reclaim_page() below).
878 */
879static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
880{
881 struct z3fold_header *zhdr;
9a001fc1
VW
882 struct page *page;
883 enum buddy bud;
884
9a001fc1
VW
885 zhdr = handle_to_z3fold_header(handle);
886 page = virt_to_page(zhdr);
887
888 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
889 /* if a headless page is under reclaim, just leave.
890 * NB: we use test_and_set_bit for a reason: if the bit
891 * has not been set before, we release this page
892 * immediately so we don't care about its value any more.
893 */
894 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
895 spin_lock(&pool->lock);
896 list_del(&page->lru);
897 spin_unlock(&pool->lock);
898 free_z3fold_page(page);
899 atomic64_dec(&pool->pages_nr);
9a001fc1 900 }
ca0246bb 901 return;
9a001fc1
VW
902 }
903
ca0246bb
VW
904 /* Non-headless case */
905 z3fold_page_lock(zhdr);
906 bud = handle_to_buddy(handle);
907
908 switch (bud) {
909 case FIRST:
910 zhdr->first_chunks = 0;
911 break;
912 case MIDDLE:
913 zhdr->middle_chunks = 0;
914 break;
915 case LAST:
916 zhdr->last_chunks = 0;
917 break;
918 default:
919 pr_err("%s: unknown bud %d\n", __func__, bud);
920 WARN_ON(1);
921 z3fold_page_unlock(zhdr);
d30561c5
VW
922 return;
923 }
924
7c2b8baa 925 free_handle(handle);
d30561c5
VW
926 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
927 atomic64_dec(&pool->pages_nr);
928 return;
929 }
ca0246bb 930 if (test_bit(PAGE_CLAIMED, &page->private)) {
6098d7e1
VW
931 z3fold_page_unlock(zhdr);
932 return;
933 }
d30561c5 934 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
5a27aa82 935 z3fold_page_unlock(zhdr);
d30561c5
VW
936 return;
937 }
938 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
2f1e5e4d 939 spin_lock(&pool->lock);
d30561c5 940 list_del_init(&zhdr->buddy);
2f1e5e4d 941 spin_unlock(&pool->lock);
d30561c5 942 zhdr->cpu = -1;
5d03a661 943 kref_get(&zhdr->refcount);
d30561c5
VW
944 do_compact_page(zhdr, true);
945 return;
9a001fc1 946 }
5d03a661 947 kref_get(&zhdr->refcount);
d30561c5
VW
948 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
949 z3fold_page_unlock(zhdr);
9a001fc1
VW
950}
951
952/**
953 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
954 * @pool: pool from which a page will attempt to be evicted
f144c390 955 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
956 * be attempted before failing
957 *
958 * z3fold reclaim is different from normal system reclaim in that it is done
959 * from the bottom, up. This is because only the bottom layer, z3fold, has
960 * information on how the allocations are organized within each z3fold page.
961 * This has the potential to create interesting locking situations between
962 * z3fold and the user, however.
963 *
964 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 965 *
9a001fc1
VW
966 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
967 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
968 * call the user-defined eviction handler with the pool and handle as
969 * arguments.
970 *
971 * If the handle can not be evicted, the eviction handler should return
972 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
973 * appropriate list and try the next z3fold page on the LRU up to
974 * a user defined number of retries.
975 *
976 * If the handle is successfully evicted, the eviction handler should
977 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
978 * contains logic to delay freeing the page if the page is under reclaim,
979 * as indicated by the setting of the PG_reclaim flag on the underlying page.
980 *
981 * If all buddies in the z3fold page are successfully evicted, then the
982 * z3fold page can be freed.
983 *
984 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
985 * no pages to evict or an eviction handler is not registered, -EAGAIN if
986 * the retry limit was hit.
987 */
988static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
989{
d30561c5
VW
990 int i, ret = 0;
991 struct z3fold_header *zhdr = NULL;
992 struct page *page = NULL;
993 struct list_head *pos;
9a001fc1
VW
994 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
995
996 spin_lock(&pool->lock);
2f1e5e4d 997 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
998 spin_unlock(&pool->lock);
999 return -EINVAL;
1000 }
1001 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
1002 if (list_empty(&pool->lru)) {
1003 spin_unlock(&pool->lock);
1004 return -EINVAL;
1005 }
d30561c5
VW
1006 list_for_each_prev(pos, &pool->lru) {
1007 page = list_entry(pos, struct page, lru);
ca0246bb
VW
1008
1009 /* this bit could have been set by free, in which case
1010 * we pass over to the next page in the pool.
1011 */
1012 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1013 continue;
1014
1015 zhdr = page_address(page);
d30561c5 1016 if (test_bit(PAGE_HEADLESS, &page->private))
d30561c5
VW
1017 break;
1018
ca0246bb
VW
1019 if (!z3fold_page_trylock(zhdr)) {
1020 zhdr = NULL;
d30561c5 1021 continue; /* can't evict at this point */
ca0246bb 1022 }
d30561c5
VW
1023 kref_get(&zhdr->refcount);
1024 list_del_init(&zhdr->buddy);
1025 zhdr->cpu = -1;
6098d7e1 1026 break;
d30561c5
VW
1027 }
1028
ca0246bb
VW
1029 if (!zhdr)
1030 break;
1031
5a27aa82 1032 list_del_init(&page->lru);
d30561c5 1033 spin_unlock(&pool->lock);
9a001fc1 1034
9a001fc1 1035 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1
VW
1036 /*
1037 * We need encode the handles before unlocking, since
1038 * we can race with free that will set
1039 * (first|last)_chunks to 0
1040 */
1041 first_handle = 0;
1042 last_handle = 0;
1043 middle_handle = 0;
1044 if (zhdr->first_chunks)
1045 first_handle = encode_handle(zhdr, FIRST);
1046 if (zhdr->middle_chunks)
1047 middle_handle = encode_handle(zhdr, MIDDLE);
1048 if (zhdr->last_chunks)
1049 last_handle = encode_handle(zhdr, LAST);
d30561c5
VW
1050 /*
1051 * it's safe to unlock here because we hold a
1052 * reference to this page
1053 */
2f1e5e4d 1054 z3fold_page_unlock(zhdr);
9a001fc1
VW
1055 } else {
1056 first_handle = encode_handle(zhdr, HEADLESS);
1057 last_handle = middle_handle = 0;
1058 }
1059
9a001fc1
VW
1060 /* Issue the eviction callback(s) */
1061 if (middle_handle) {
1062 ret = pool->ops->evict(pool, middle_handle);
1063 if (ret)
1064 goto next;
1065 }
1066 if (first_handle) {
1067 ret = pool->ops->evict(pool, first_handle);
1068 if (ret)
1069 goto next;
1070 }
1071 if (last_handle) {
1072 ret = pool->ops->evict(pool, last_handle);
1073 if (ret)
1074 goto next;
1075 }
1076next:
5a27aa82
VW
1077 if (test_bit(PAGE_HEADLESS, &page->private)) {
1078 if (ret == 0) {
1079 free_z3fold_page(page);
ca0246bb 1080 atomic64_dec(&pool->pages_nr);
5a27aa82 1081 return 0;
5a27aa82 1082 }
6098d7e1
VW
1083 spin_lock(&pool->lock);
1084 list_add(&page->lru, &pool->lru);
1085 spin_unlock(&pool->lock);
1086 } else {
1087 z3fold_page_lock(zhdr);
ca0246bb 1088 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1
VW
1089 if (kref_put(&zhdr->refcount,
1090 release_z3fold_page_locked)) {
1091 atomic64_dec(&pool->pages_nr);
1092 return 0;
1093 }
1094 /*
1095 * if we are here, the page is still not completely
1096 * free. Take the global pool lock then to be able
1097 * to add it back to the lru list
1098 */
1099 spin_lock(&pool->lock);
1100 list_add(&page->lru, &pool->lru);
d5567c9d 1101 spin_unlock(&pool->lock);
6098d7e1 1102 z3fold_page_unlock(zhdr);
5a27aa82 1103 }
2f1e5e4d 1104
6098d7e1
VW
1105 /* We started off locked to we need to lock the pool back */
1106 spin_lock(&pool->lock);
9a001fc1
VW
1107 }
1108 spin_unlock(&pool->lock);
1109 return -EAGAIN;
1110}
1111
1112/**
1113 * z3fold_map() - maps the allocation associated with the given handle
1114 * @pool: pool in which the allocation resides
1115 * @handle: handle associated with the allocation to be mapped
1116 *
1117 * Extracts the buddy number from handle and constructs the pointer to the
1118 * correct starting chunk within the page.
1119 *
1120 * Returns: a pointer to the mapped allocation
1121 */
1122static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1123{
1124 struct z3fold_header *zhdr;
1125 struct page *page;
1126 void *addr;
1127 enum buddy buddy;
1128
9a001fc1
VW
1129 zhdr = handle_to_z3fold_header(handle);
1130 addr = zhdr;
1131 page = virt_to_page(zhdr);
1132
1133 if (test_bit(PAGE_HEADLESS, &page->private))
1134 goto out;
1135
2f1e5e4d 1136 z3fold_page_lock(zhdr);
9a001fc1
VW
1137 buddy = handle_to_buddy(handle);
1138 switch (buddy) {
1139 case FIRST:
1140 addr += ZHDR_SIZE_ALIGNED;
1141 break;
1142 case MIDDLE:
1143 addr += zhdr->start_middle << CHUNK_SHIFT;
1144 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1145 break;
1146 case LAST:
ca0246bb 1147 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1148 break;
1149 default:
1150 pr_err("unknown buddy id %d\n", buddy);
1151 WARN_ON(1);
1152 addr = NULL;
1153 break;
1154 }
2f1e5e4d
VW
1155
1156 z3fold_page_unlock(zhdr);
9a001fc1 1157out:
9a001fc1
VW
1158 return addr;
1159}
1160
1161/**
1162 * z3fold_unmap() - unmaps the allocation associated with the given handle
1163 * @pool: pool in which the allocation resides
1164 * @handle: handle associated with the allocation to be unmapped
1165 */
1166static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1167{
1168 struct z3fold_header *zhdr;
1169 struct page *page;
1170 enum buddy buddy;
1171
9a001fc1
VW
1172 zhdr = handle_to_z3fold_header(handle);
1173 page = virt_to_page(zhdr);
1174
2f1e5e4d 1175 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1176 return;
9a001fc1 1177
2f1e5e4d 1178 z3fold_page_lock(zhdr);
9a001fc1
VW
1179 buddy = handle_to_buddy(handle);
1180 if (buddy == MIDDLE)
1181 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
2f1e5e4d 1182 z3fold_page_unlock(zhdr);
9a001fc1
VW
1183}
1184
1185/**
1186 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1187 * @pool: pool whose size is being queried
1188 *
12d59ae6 1189 * Returns: size in pages of the given pool.
9a001fc1
VW
1190 */
1191static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1192{
12d59ae6 1193 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1194}
1195
1196/*****************
1197 * zpool
1198 ****************/
1199
1200static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1201{
1202 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1203 return pool->zpool_ops->evict(pool->zpool, handle);
1204 else
1205 return -ENOENT;
1206}
1207
1208static const struct z3fold_ops z3fold_zpool_ops = {
1209 .evict = z3fold_zpool_evict
1210};
1211
1212static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1213 const struct zpool_ops *zpool_ops,
1214 struct zpool *zpool)
1215{
1216 struct z3fold_pool *pool;
1217
d30561c5
VW
1218 pool = z3fold_create_pool(name, gfp,
1219 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1220 if (pool) {
1221 pool->zpool = zpool;
1222 pool->zpool_ops = zpool_ops;
1223 }
1224 return pool;
1225}
1226
1227static void z3fold_zpool_destroy(void *pool)
1228{
1229 z3fold_destroy_pool(pool);
1230}
1231
1232static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1233 unsigned long *handle)
1234{
1235 return z3fold_alloc(pool, size, gfp, handle);
1236}
1237static void z3fold_zpool_free(void *pool, unsigned long handle)
1238{
1239 z3fold_free(pool, handle);
1240}
1241
1242static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1243 unsigned int *reclaimed)
1244{
1245 unsigned int total = 0;
1246 int ret = -EINVAL;
1247
1248 while (total < pages) {
1249 ret = z3fold_reclaim_page(pool, 8);
1250 if (ret < 0)
1251 break;
1252 total++;
1253 }
1254
1255 if (reclaimed)
1256 *reclaimed = total;
1257
1258 return ret;
1259}
1260
1261static void *z3fold_zpool_map(void *pool, unsigned long handle,
1262 enum zpool_mapmode mm)
1263{
1264 return z3fold_map(pool, handle);
1265}
1266static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1267{
1268 z3fold_unmap(pool, handle);
1269}
1270
1271static u64 z3fold_zpool_total_size(void *pool)
1272{
1273 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1274}
1275
1276static struct zpool_driver z3fold_zpool_driver = {
1277 .type = "z3fold",
1278 .owner = THIS_MODULE,
1279 .create = z3fold_zpool_create,
1280 .destroy = z3fold_zpool_destroy,
1281 .malloc = z3fold_zpool_malloc,
1282 .free = z3fold_zpool_free,
1283 .shrink = z3fold_zpool_shrink,
1284 .map = z3fold_zpool_map,
1285 .unmap = z3fold_zpool_unmap,
1286 .total_size = z3fold_zpool_total_size,
1287};
1288
1289MODULE_ALIAS("zpool-z3fold");
1290
1291static int __init init_z3fold(void)
1292{
ede93213
VW
1293 /* Make sure the z3fold header is not larger than the page size */
1294 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
9a001fc1
VW
1295 zpool_register_driver(&z3fold_zpool_driver);
1296
1297 return 0;
1298}
1299
1300static void __exit exit_z3fold(void)
1301{
1302 zpool_unregister_driver(&z3fold_zpool_driver);
1303}
1304
1305module_init(init_z3fold);
1306module_exit(exit_z3fold);
1307
1308MODULE_LICENSE("GPL");
1309MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1310MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");