Commit | Line | Data |
---|---|---|
61989a80 NG |
1 | /* |
2 | * zsmalloc memory allocator | |
3 | * | |
4 | * Copyright (C) 2011 Nitin Gupta | |
5 | * | |
6 | * This code is released using a dual license strategy: BSD/GPL | |
7 | * You can choose the license that better fits your requirements. | |
8 | * | |
9 | * Released under the terms of 3-clause BSD License | |
10 | * Released under the terms of GNU General Public License Version 2.0 | |
11 | */ | |
12 | ||
2db51dae NG |
13 | |
14 | /* | |
15 | * This allocator is designed for use with zcache and zram. Thus, the | |
16 | * allocator is supposed to work well under low memory conditions. In | |
17 | * particular, it never attempts higher order page allocation which is | |
18 | * very likely to fail under memory pressure. On the other hand, if we | |
19 | * just use single (0-order) pages, it would suffer from very high | |
20 | * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy | |
21 | * an entire page. This was one of the major issues with its predecessor | |
22 | * (xvmalloc). | |
23 | * | |
24 | * To overcome these issues, zsmalloc allocates a bunch of 0-order pages | |
25 | * and links them together using various 'struct page' fields. These linked | |
26 | * pages act as a single higher-order page i.e. an object can span 0-order | |
27 | * page boundaries. The code refers to these linked pages as a single entity | |
28 | * called zspage. | |
29 | * | |
30 | * Following is how we use various fields and flags of underlying | |
31 | * struct page(s) to form a zspage. | |
32 | * | |
33 | * Usage of struct page fields: | |
34 | * page->first_page: points to the first component (0-order) page | |
35 | * page->index (union with page->freelist): offset of the first object | |
36 | * starting in this page. For the first page, this is | |
37 | * always 0, so we use this field (aka freelist) to point | |
38 | * to the first free object in zspage. | |
39 | * page->lru: links together all component pages (except the first page) | |
40 | * of a zspage | |
41 | * | |
42 | * For _first_ page only: | |
43 | * | |
44 | * page->private (union with page->first_page): refers to the | |
45 | * component page after the first page | |
46 | * page->freelist: points to the first free object in zspage. | |
47 | * Free objects are linked together using in-place | |
48 | * metadata. | |
49 | * page->objects: maximum number of objects we can store in this | |
50 | * zspage (class->zspage_order * PAGE_SIZE / class->size) | |
51 | * page->lru: links together first pages of various zspages. | |
52 | * Basically forming list of zspages in a fullness group. | |
53 | * page->mapping: class index and fullness group of the zspage | |
54 | * | |
55 | * Usage of struct page flags: | |
56 | * PG_private: identifies the first component page | |
57 | * PG_private2: identifies the last component page | |
58 | * | |
59 | */ | |
60 | ||
61989a80 NG |
61 | #ifdef CONFIG_ZSMALLOC_DEBUG |
62 | #define DEBUG | |
63 | #endif | |
64 | ||
65 | #include <linux/module.h> | |
66 | #include <linux/kernel.h> | |
67 | #include <linux/bitops.h> | |
68 | #include <linux/errno.h> | |
69 | #include <linux/highmem.h> | |
70 | #include <linux/init.h> | |
71 | #include <linux/string.h> | |
72 | #include <linux/slab.h> | |
73 | #include <asm/tlbflush.h> | |
74 | #include <asm/pgtable.h> | |
75 | #include <linux/cpumask.h> | |
76 | #include <linux/cpu.h> | |
0cbb613f | 77 | #include <linux/vmalloc.h> |
c60369f0 | 78 | #include <linux/hardirq.h> |
0959c63f SJ |
79 | #include <linux/spinlock.h> |
80 | #include <linux/types.h> | |
61989a80 NG |
81 | |
82 | #include "zsmalloc.h" | |
0959c63f SJ |
83 | |
84 | /* | |
85 | * This must be power of 2 and greater than of equal to sizeof(link_free). | |
86 | * These two conditions ensure that any 'struct link_free' itself doesn't | |
87 | * span more than 1 page which avoids complex case of mapping 2 pages simply | |
88 | * to restore link_free pointer values. | |
89 | */ | |
90 | #define ZS_ALIGN 8 | |
91 | ||
92 | /* | |
93 | * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) | |
94 | * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. | |
95 | */ | |
96 | #define ZS_MAX_ZSPAGE_ORDER 2 | |
97 | #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) | |
98 | ||
99 | /* | |
100 | * Object location (<PFN>, <obj_idx>) is encoded as | |
101 | * as single (void *) handle value. | |
102 | * | |
103 | * Note that object index <obj_idx> is relative to system | |
104 | * page <PFN> it is stored in, so for each sub-page belonging | |
105 | * to a zspage, obj_idx starts with 0. | |
106 | * | |
107 | * This is made more complicated by various memory models and PAE. | |
108 | */ | |
109 | ||
110 | #ifndef MAX_PHYSMEM_BITS | |
111 | #ifdef CONFIG_HIGHMEM64G | |
112 | #define MAX_PHYSMEM_BITS 36 | |
113 | #else /* !CONFIG_HIGHMEM64G */ | |
114 | /* | |
115 | * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just | |
116 | * be PAGE_SHIFT | |
117 | */ | |
118 | #define MAX_PHYSMEM_BITS BITS_PER_LONG | |
119 | #endif | |
120 | #endif | |
121 | #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) | |
122 | #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) | |
123 | #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) | |
124 | ||
125 | #define MAX(a, b) ((a) >= (b) ? (a) : (b)) | |
126 | /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ | |
127 | #define ZS_MIN_ALLOC_SIZE \ | |
128 | MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) | |
129 | #define ZS_MAX_ALLOC_SIZE PAGE_SIZE | |
130 | ||
131 | /* | |
132 | * On systems with 4K page size, this gives 254 size classes! There is a | |
133 | * trader-off here: | |
134 | * - Large number of size classes is potentially wasteful as free page are | |
135 | * spread across these classes | |
136 | * - Small number of size classes causes large internal fragmentation | |
137 | * - Probably its better to use specific size classes (empirically | |
138 | * determined). NOTE: all those class sizes must be set as multiple of | |
139 | * ZS_ALIGN to make sure link_free itself never has to span 2 pages. | |
140 | * | |
141 | * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN | |
142 | * (reason above) | |
143 | */ | |
d662b8eb | 144 | #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) |
0959c63f SJ |
145 | #define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \ |
146 | ZS_SIZE_CLASS_DELTA + 1) | |
147 | ||
148 | /* | |
149 | * We do not maintain any list for completely empty or full pages | |
150 | */ | |
151 | enum fullness_group { | |
152 | ZS_ALMOST_FULL, | |
153 | ZS_ALMOST_EMPTY, | |
154 | _ZS_NR_FULLNESS_GROUPS, | |
155 | ||
156 | ZS_EMPTY, | |
157 | ZS_FULL | |
158 | }; | |
159 | ||
160 | /* | |
161 | * We assign a page to ZS_ALMOST_EMPTY fullness group when: | |
162 | * n <= N / f, where | |
163 | * n = number of allocated objects | |
164 | * N = total number of objects zspage can store | |
165 | * f = 1/fullness_threshold_frac | |
166 | * | |
167 | * Similarly, we assign zspage to: | |
168 | * ZS_ALMOST_FULL when n > N / f | |
169 | * ZS_EMPTY when n == 0 | |
170 | * ZS_FULL when n == N | |
171 | * | |
172 | * (see: fix_fullness_group()) | |
173 | */ | |
174 | static const int fullness_threshold_frac = 4; | |
175 | ||
176 | struct size_class { | |
177 | /* | |
178 | * Size of objects stored in this class. Must be multiple | |
179 | * of ZS_ALIGN. | |
180 | */ | |
181 | int size; | |
182 | unsigned int index; | |
183 | ||
184 | /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ | |
185 | int pages_per_zspage; | |
186 | ||
187 | spinlock_t lock; | |
188 | ||
189 | /* stats */ | |
190 | u64 pages_allocated; | |
191 | ||
192 | struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; | |
193 | }; | |
194 | ||
195 | /* | |
196 | * Placed within free objects to form a singly linked list. | |
197 | * For every zspage, first_page->freelist gives head of this list. | |
198 | * | |
199 | * This must be power of 2 and less than or equal to ZS_ALIGN | |
200 | */ | |
201 | struct link_free { | |
202 | /* Handle of next free chunk (encodes <PFN, obj_idx>) */ | |
203 | void *next; | |
204 | }; | |
205 | ||
206 | struct zs_pool { | |
207 | struct size_class size_class[ZS_SIZE_CLASSES]; | |
208 | ||
209 | gfp_t flags; /* allocation flags used when growing pool */ | |
0959c63f | 210 | }; |
61989a80 NG |
211 | |
212 | /* | |
213 | * A zspage's class index and fullness group | |
214 | * are encoded in its (first)page->mapping | |
215 | */ | |
216 | #define CLASS_IDX_BITS 28 | |
217 | #define FULLNESS_BITS 4 | |
218 | #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) | |
219 | #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) | |
220 | ||
f553646a SJ |
221 | /* |
222 | * By default, zsmalloc uses a copy-based object mapping method to access | |
223 | * allocations that span two pages. However, if a particular architecture | |
99155188 MK |
224 | * performs VM mapping faster than copying, then it should be added here |
225 | * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use | |
226 | * page table mapping rather than copying for object mapping. | |
f553646a SJ |
227 | */ |
228 | #if defined(CONFIG_ARM) | |
229 | #define USE_PGTABLE_MAPPING | |
230 | #endif | |
231 | ||
232 | struct mapping_area { | |
233 | #ifdef USE_PGTABLE_MAPPING | |
234 | struct vm_struct *vm; /* vm area for mapping object that span pages */ | |
235 | #else | |
236 | char *vm_buf; /* copy buffer for objects that span pages */ | |
237 | #endif | |
238 | char *vm_addr; /* address of kmap_atomic()'ed pages */ | |
239 | enum zs_mapmode vm_mm; /* mapping mode */ | |
240 | }; | |
241 | ||
242 | ||
61989a80 NG |
243 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |
244 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); | |
245 | ||
246 | static int is_first_page(struct page *page) | |
247 | { | |
a27545bf | 248 | return PagePrivate(page); |
61989a80 NG |
249 | } |
250 | ||
251 | static int is_last_page(struct page *page) | |
252 | { | |
a27545bf | 253 | return PagePrivate2(page); |
61989a80 NG |
254 | } |
255 | ||
256 | static void get_zspage_mapping(struct page *page, unsigned int *class_idx, | |
257 | enum fullness_group *fullness) | |
258 | { | |
259 | unsigned long m; | |
260 | BUG_ON(!is_first_page(page)); | |
261 | ||
262 | m = (unsigned long)page->mapping; | |
263 | *fullness = m & FULLNESS_MASK; | |
264 | *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; | |
265 | } | |
266 | ||
267 | static void set_zspage_mapping(struct page *page, unsigned int class_idx, | |
268 | enum fullness_group fullness) | |
269 | { | |
270 | unsigned long m; | |
271 | BUG_ON(!is_first_page(page)); | |
272 | ||
273 | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | | |
274 | (fullness & FULLNESS_MASK); | |
275 | page->mapping = (struct address_space *)m; | |
276 | } | |
277 | ||
278 | static int get_size_class_index(int size) | |
279 | { | |
280 | int idx = 0; | |
281 | ||
282 | if (likely(size > ZS_MIN_ALLOC_SIZE)) | |
283 | idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, | |
284 | ZS_SIZE_CLASS_DELTA); | |
285 | ||
286 | return idx; | |
287 | } | |
288 | ||
289 | static enum fullness_group get_fullness_group(struct page *page) | |
290 | { | |
291 | int inuse, max_objects; | |
292 | enum fullness_group fg; | |
293 | BUG_ON(!is_first_page(page)); | |
294 | ||
295 | inuse = page->inuse; | |
296 | max_objects = page->objects; | |
297 | ||
298 | if (inuse == 0) | |
299 | fg = ZS_EMPTY; | |
300 | else if (inuse == max_objects) | |
301 | fg = ZS_FULL; | |
302 | else if (inuse <= max_objects / fullness_threshold_frac) | |
303 | fg = ZS_ALMOST_EMPTY; | |
304 | else | |
305 | fg = ZS_ALMOST_FULL; | |
306 | ||
307 | return fg; | |
308 | } | |
309 | ||
310 | static void insert_zspage(struct page *page, struct size_class *class, | |
311 | enum fullness_group fullness) | |
312 | { | |
313 | struct page **head; | |
314 | ||
315 | BUG_ON(!is_first_page(page)); | |
316 | ||
317 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
318 | return; | |
319 | ||
320 | head = &class->fullness_list[fullness]; | |
321 | if (*head) | |
322 | list_add_tail(&page->lru, &(*head)->lru); | |
323 | ||
324 | *head = page; | |
325 | } | |
326 | ||
327 | static void remove_zspage(struct page *page, struct size_class *class, | |
328 | enum fullness_group fullness) | |
329 | { | |
330 | struct page **head; | |
331 | ||
332 | BUG_ON(!is_first_page(page)); | |
333 | ||
334 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
335 | return; | |
336 | ||
337 | head = &class->fullness_list[fullness]; | |
338 | BUG_ON(!*head); | |
339 | if (list_empty(&(*head)->lru)) | |
340 | *head = NULL; | |
341 | else if (*head == page) | |
342 | *head = (struct page *)list_entry((*head)->lru.next, | |
343 | struct page, lru); | |
344 | ||
345 | list_del_init(&page->lru); | |
346 | } | |
347 | ||
348 | static enum fullness_group fix_fullness_group(struct zs_pool *pool, | |
349 | struct page *page) | |
350 | { | |
351 | int class_idx; | |
352 | struct size_class *class; | |
353 | enum fullness_group currfg, newfg; | |
354 | ||
355 | BUG_ON(!is_first_page(page)); | |
356 | ||
357 | get_zspage_mapping(page, &class_idx, &currfg); | |
358 | newfg = get_fullness_group(page); | |
359 | if (newfg == currfg) | |
360 | goto out; | |
361 | ||
362 | class = &pool->size_class[class_idx]; | |
363 | remove_zspage(page, class, currfg); | |
364 | insert_zspage(page, class, newfg); | |
365 | set_zspage_mapping(page, class_idx, newfg); | |
366 | ||
367 | out: | |
368 | return newfg; | |
369 | } | |
370 | ||
371 | /* | |
372 | * We have to decide on how many pages to link together | |
373 | * to form a zspage for each size class. This is important | |
374 | * to reduce wastage due to unusable space left at end of | |
375 | * each zspage which is given as: | |
376 | * wastage = Zp - Zp % size_class | |
377 | * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... | |
378 | * | |
379 | * For example, for size class of 3/8 * PAGE_SIZE, we should | |
380 | * link together 3 PAGE_SIZE sized pages to form a zspage | |
381 | * since then we can perfectly fit in 8 such objects. | |
382 | */ | |
2e3b6154 | 383 | static int get_pages_per_zspage(int class_size) |
61989a80 NG |
384 | { |
385 | int i, max_usedpc = 0; | |
386 | /* zspage order which gives maximum used size per KB */ | |
387 | int max_usedpc_order = 1; | |
388 | ||
84d4faab | 389 | for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { |
61989a80 NG |
390 | int zspage_size; |
391 | int waste, usedpc; | |
392 | ||
393 | zspage_size = i * PAGE_SIZE; | |
394 | waste = zspage_size % class_size; | |
395 | usedpc = (zspage_size - waste) * 100 / zspage_size; | |
396 | ||
397 | if (usedpc > max_usedpc) { | |
398 | max_usedpc = usedpc; | |
399 | max_usedpc_order = i; | |
400 | } | |
401 | } | |
402 | ||
403 | return max_usedpc_order; | |
404 | } | |
405 | ||
406 | /* | |
407 | * A single 'zspage' is composed of many system pages which are | |
408 | * linked together using fields in struct page. This function finds | |
409 | * the first/head page, given any component page of a zspage. | |
410 | */ | |
411 | static struct page *get_first_page(struct page *page) | |
412 | { | |
413 | if (is_first_page(page)) | |
414 | return page; | |
415 | else | |
416 | return page->first_page; | |
417 | } | |
418 | ||
419 | static struct page *get_next_page(struct page *page) | |
420 | { | |
421 | struct page *next; | |
422 | ||
423 | if (is_last_page(page)) | |
424 | next = NULL; | |
425 | else if (is_first_page(page)) | |
426 | next = (struct page *)page->private; | |
427 | else | |
428 | next = list_entry(page->lru.next, struct page, lru); | |
429 | ||
430 | return next; | |
431 | } | |
432 | ||
433 | /* Encode <page, obj_idx> as a single handle value */ | |
434 | static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) | |
435 | { | |
436 | unsigned long handle; | |
437 | ||
438 | if (!page) { | |
439 | BUG_ON(obj_idx); | |
440 | return NULL; | |
441 | } | |
442 | ||
443 | handle = page_to_pfn(page) << OBJ_INDEX_BITS; | |
444 | handle |= (obj_idx & OBJ_INDEX_MASK); | |
445 | ||
446 | return (void *)handle; | |
447 | } | |
448 | ||
449 | /* Decode <page, obj_idx> pair from the given object handle */ | |
c2344348 | 450 | static void obj_handle_to_location(unsigned long handle, struct page **page, |
61989a80 NG |
451 | unsigned long *obj_idx) |
452 | { | |
c2344348 MK |
453 | *page = pfn_to_page(handle >> OBJ_INDEX_BITS); |
454 | *obj_idx = handle & OBJ_INDEX_MASK; | |
61989a80 NG |
455 | } |
456 | ||
457 | static unsigned long obj_idx_to_offset(struct page *page, | |
458 | unsigned long obj_idx, int class_size) | |
459 | { | |
460 | unsigned long off = 0; | |
461 | ||
462 | if (!is_first_page(page)) | |
463 | off = page->index; | |
464 | ||
465 | return off + obj_idx * class_size; | |
466 | } | |
467 | ||
f4477e90 NG |
468 | static void reset_page(struct page *page) |
469 | { | |
470 | clear_bit(PG_private, &page->flags); | |
471 | clear_bit(PG_private_2, &page->flags); | |
472 | set_page_private(page, 0); | |
473 | page->mapping = NULL; | |
474 | page->freelist = NULL; | |
475 | reset_page_mapcount(page); | |
476 | } | |
477 | ||
61989a80 NG |
478 | static void free_zspage(struct page *first_page) |
479 | { | |
f4477e90 | 480 | struct page *nextp, *tmp, *head_extra; |
61989a80 NG |
481 | |
482 | BUG_ON(!is_first_page(first_page)); | |
483 | BUG_ON(first_page->inuse); | |
484 | ||
f4477e90 | 485 | head_extra = (struct page *)page_private(first_page); |
61989a80 | 486 | |
f4477e90 | 487 | reset_page(first_page); |
61989a80 NG |
488 | __free_page(first_page); |
489 | ||
490 | /* zspage with only 1 system page */ | |
f4477e90 | 491 | if (!head_extra) |
61989a80 NG |
492 | return; |
493 | ||
f4477e90 | 494 | list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { |
61989a80 | 495 | list_del(&nextp->lru); |
f4477e90 | 496 | reset_page(nextp); |
61989a80 NG |
497 | __free_page(nextp); |
498 | } | |
f4477e90 NG |
499 | reset_page(head_extra); |
500 | __free_page(head_extra); | |
61989a80 NG |
501 | } |
502 | ||
503 | /* Initialize a newly allocated zspage */ | |
504 | static void init_zspage(struct page *first_page, struct size_class *class) | |
505 | { | |
506 | unsigned long off = 0; | |
507 | struct page *page = first_page; | |
508 | ||
509 | BUG_ON(!is_first_page(first_page)); | |
510 | while (page) { | |
511 | struct page *next_page; | |
512 | struct link_free *link; | |
513 | unsigned int i, objs_on_page; | |
514 | ||
515 | /* | |
516 | * page->index stores offset of first object starting | |
517 | * in the page. For the first page, this is always 0, | |
518 | * so we use first_page->index (aka ->freelist) to store | |
519 | * head of corresponding zspage's freelist. | |
520 | */ | |
521 | if (page != first_page) | |
522 | page->index = off; | |
523 | ||
524 | link = (struct link_free *)kmap_atomic(page) + | |
525 | off / sizeof(*link); | |
526 | objs_on_page = (PAGE_SIZE - off) / class->size; | |
527 | ||
528 | for (i = 1; i <= objs_on_page; i++) { | |
529 | off += class->size; | |
530 | if (off < PAGE_SIZE) { | |
531 | link->next = obj_location_to_handle(page, i); | |
532 | link += class->size / sizeof(*link); | |
533 | } | |
534 | } | |
535 | ||
536 | /* | |
537 | * We now come to the last (full or partial) object on this | |
538 | * page, which must point to the first object on the next | |
539 | * page (if present) | |
540 | */ | |
541 | next_page = get_next_page(page); | |
542 | link->next = obj_location_to_handle(next_page, 0); | |
543 | kunmap_atomic(link); | |
544 | page = next_page; | |
545 | off = (off + class->size) % PAGE_SIZE; | |
546 | } | |
547 | } | |
548 | ||
549 | /* | |
550 | * Allocate a zspage for the given size class | |
551 | */ | |
552 | static struct page *alloc_zspage(struct size_class *class, gfp_t flags) | |
553 | { | |
554 | int i, error; | |
b4b700c5 | 555 | struct page *first_page = NULL, *uninitialized_var(prev_page); |
61989a80 NG |
556 | |
557 | /* | |
558 | * Allocate individual pages and link them together as: | |
559 | * 1. first page->private = first sub-page | |
560 | * 2. all sub-pages are linked together using page->lru | |
561 | * 3. each sub-page is linked to the first page using page->first_page | |
562 | * | |
563 | * For each size class, First/Head pages are linked together using | |
564 | * page->lru. Also, we set PG_private to identify the first page | |
565 | * (i.e. no other sub-page has this flag set) and PG_private_2 to | |
566 | * identify the last page. | |
567 | */ | |
568 | error = -ENOMEM; | |
2e3b6154 | 569 | for (i = 0; i < class->pages_per_zspage; i++) { |
b4b700c5 | 570 | struct page *page; |
61989a80 NG |
571 | |
572 | page = alloc_page(flags); | |
573 | if (!page) | |
574 | goto cleanup; | |
575 | ||
576 | INIT_LIST_HEAD(&page->lru); | |
577 | if (i == 0) { /* first page */ | |
a27545bf | 578 | SetPagePrivate(page); |
61989a80 NG |
579 | set_page_private(page, 0); |
580 | first_page = page; | |
581 | first_page->inuse = 0; | |
582 | } | |
583 | if (i == 1) | |
584 | first_page->private = (unsigned long)page; | |
585 | if (i >= 1) | |
586 | page->first_page = first_page; | |
587 | if (i >= 2) | |
588 | list_add(&page->lru, &prev_page->lru); | |
2e3b6154 | 589 | if (i == class->pages_per_zspage - 1) /* last page */ |
a27545bf | 590 | SetPagePrivate2(page); |
61989a80 NG |
591 | prev_page = page; |
592 | } | |
593 | ||
594 | init_zspage(first_page, class); | |
595 | ||
596 | first_page->freelist = obj_location_to_handle(first_page, 0); | |
597 | /* Maximum number of objects we can store in this zspage */ | |
2e3b6154 | 598 | first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; |
61989a80 NG |
599 | |
600 | error = 0; /* Success */ | |
601 | ||
602 | cleanup: | |
603 | if (unlikely(error) && first_page) { | |
604 | free_zspage(first_page); | |
605 | first_page = NULL; | |
606 | } | |
607 | ||
608 | return first_page; | |
609 | } | |
610 | ||
611 | static struct page *find_get_zspage(struct size_class *class) | |
612 | { | |
613 | int i; | |
614 | struct page *page; | |
615 | ||
616 | for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { | |
617 | page = class->fullness_list[i]; | |
618 | if (page) | |
619 | break; | |
620 | } | |
621 | ||
622 | return page; | |
623 | } | |
624 | ||
f553646a SJ |
625 | #ifdef USE_PGTABLE_MAPPING |
626 | static inline int __zs_cpu_up(struct mapping_area *area) | |
627 | { | |
628 | /* | |
629 | * Make sure we don't leak memory if a cpu UP notification | |
630 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
631 | */ | |
632 | if (area->vm) | |
633 | return 0; | |
634 | area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); | |
635 | if (!area->vm) | |
636 | return -ENOMEM; | |
637 | return 0; | |
638 | } | |
639 | ||
640 | static inline void __zs_cpu_down(struct mapping_area *area) | |
641 | { | |
642 | if (area->vm) | |
643 | free_vm_area(area->vm); | |
644 | area->vm = NULL; | |
645 | } | |
646 | ||
647 | static inline void *__zs_map_object(struct mapping_area *area, | |
648 | struct page *pages[2], int off, int size) | |
649 | { | |
650 | BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages)); | |
651 | area->vm_addr = area->vm->addr; | |
652 | return area->vm_addr + off; | |
653 | } | |
654 | ||
655 | static inline void __zs_unmap_object(struct mapping_area *area, | |
656 | struct page *pages[2], int off, int size) | |
657 | { | |
658 | unsigned long addr = (unsigned long)area->vm_addr; | |
659 | unsigned long end = addr + (PAGE_SIZE * 2); | |
660 | ||
661 | flush_cache_vunmap(addr, end); | |
662 | unmap_kernel_range_noflush(addr, PAGE_SIZE * 2); | |
99155188 | 663 | flush_tlb_kernel_range(addr, end); |
f553646a SJ |
664 | } |
665 | ||
666 | #else /* USE_PGTABLE_MAPPING */ | |
667 | ||
668 | static inline int __zs_cpu_up(struct mapping_area *area) | |
669 | { | |
670 | /* | |
671 | * Make sure we don't leak memory if a cpu UP notification | |
672 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
673 | */ | |
674 | if (area->vm_buf) | |
675 | return 0; | |
676 | area->vm_buf = (char *)__get_free_page(GFP_KERNEL); | |
677 | if (!area->vm_buf) | |
678 | return -ENOMEM; | |
679 | return 0; | |
680 | } | |
681 | ||
682 | static inline void __zs_cpu_down(struct mapping_area *area) | |
683 | { | |
684 | if (area->vm_buf) | |
685 | free_page((unsigned long)area->vm_buf); | |
686 | area->vm_buf = NULL; | |
687 | } | |
688 | ||
689 | static void *__zs_map_object(struct mapping_area *area, | |
690 | struct page *pages[2], int off, int size) | |
5f601902 | 691 | { |
5f601902 SJ |
692 | int sizes[2]; |
693 | void *addr; | |
f553646a | 694 | char *buf = area->vm_buf; |
5f601902 | 695 | |
f553646a SJ |
696 | /* disable page faults to match kmap_atomic() return conditions */ |
697 | pagefault_disable(); | |
698 | ||
699 | /* no read fastpath */ | |
700 | if (area->vm_mm == ZS_MM_WO) | |
701 | goto out; | |
5f601902 SJ |
702 | |
703 | sizes[0] = PAGE_SIZE - off; | |
704 | sizes[1] = size - sizes[0]; | |
705 | ||
5f601902 SJ |
706 | /* copy object to per-cpu buffer */ |
707 | addr = kmap_atomic(pages[0]); | |
708 | memcpy(buf, addr + off, sizes[0]); | |
709 | kunmap_atomic(addr); | |
710 | addr = kmap_atomic(pages[1]); | |
711 | memcpy(buf + sizes[0], addr, sizes[1]); | |
712 | kunmap_atomic(addr); | |
f553646a SJ |
713 | out: |
714 | return area->vm_buf; | |
5f601902 SJ |
715 | } |
716 | ||
f553646a SJ |
717 | static void __zs_unmap_object(struct mapping_area *area, |
718 | struct page *pages[2], int off, int size) | |
5f601902 | 719 | { |
5f601902 SJ |
720 | int sizes[2]; |
721 | void *addr; | |
f553646a | 722 | char *buf = area->vm_buf; |
5f601902 | 723 | |
f553646a SJ |
724 | /* no write fastpath */ |
725 | if (area->vm_mm == ZS_MM_RO) | |
726 | goto out; | |
5f601902 SJ |
727 | |
728 | sizes[0] = PAGE_SIZE - off; | |
729 | sizes[1] = size - sizes[0]; | |
730 | ||
731 | /* copy per-cpu buffer to object */ | |
732 | addr = kmap_atomic(pages[0]); | |
733 | memcpy(addr + off, buf, sizes[0]); | |
734 | kunmap_atomic(addr); | |
735 | addr = kmap_atomic(pages[1]); | |
736 | memcpy(addr, buf + sizes[0], sizes[1]); | |
737 | kunmap_atomic(addr); | |
f553646a SJ |
738 | |
739 | out: | |
740 | /* enable page faults to match kunmap_atomic() return conditions */ | |
741 | pagefault_enable(); | |
5f601902 | 742 | } |
61989a80 | 743 | |
f553646a SJ |
744 | #endif /* USE_PGTABLE_MAPPING */ |
745 | ||
61989a80 NG |
746 | static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, |
747 | void *pcpu) | |
748 | { | |
f553646a | 749 | int ret, cpu = (long)pcpu; |
61989a80 NG |
750 | struct mapping_area *area; |
751 | ||
752 | switch (action) { | |
753 | case CPU_UP_PREPARE: | |
754 | area = &per_cpu(zs_map_area, cpu); | |
f553646a SJ |
755 | ret = __zs_cpu_up(area); |
756 | if (ret) | |
757 | return notifier_from_errno(ret); | |
61989a80 NG |
758 | break; |
759 | case CPU_DEAD: | |
760 | case CPU_UP_CANCELED: | |
761 | area = &per_cpu(zs_map_area, cpu); | |
f553646a | 762 | __zs_cpu_down(area); |
61989a80 NG |
763 | break; |
764 | } | |
765 | ||
766 | return NOTIFY_OK; | |
767 | } | |
768 | ||
769 | static struct notifier_block zs_cpu_nb = { | |
770 | .notifier_call = zs_cpu_notifier | |
771 | }; | |
772 | ||
773 | static void zs_exit(void) | |
774 | { | |
775 | int cpu; | |
776 | ||
777 | for_each_online_cpu(cpu) | |
778 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | |
779 | unregister_cpu_notifier(&zs_cpu_nb); | |
780 | } | |
781 | ||
782 | static int zs_init(void) | |
783 | { | |
784 | int cpu, ret; | |
785 | ||
786 | register_cpu_notifier(&zs_cpu_nb); | |
787 | for_each_online_cpu(cpu) { | |
788 | ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | |
789 | if (notifier_to_errno(ret)) | |
790 | goto fail; | |
791 | } | |
792 | return 0; | |
793 | fail: | |
794 | zs_exit(); | |
795 | return notifier_to_errno(ret); | |
796 | } | |
797 | ||
4bbc0bc0 DB |
798 | /** |
799 | * zs_create_pool - Creates an allocation pool to work from. | |
0d145a50 | 800 | * @flags: allocation flags used to allocate pool metadata |
4bbc0bc0 DB |
801 | * |
802 | * This function must be called before anything when using | |
803 | * the zsmalloc allocator. | |
804 | * | |
805 | * On success, a pointer to the newly created pool is returned, | |
806 | * otherwise NULL. | |
807 | */ | |
0d145a50 | 808 | struct zs_pool *zs_create_pool(gfp_t flags) |
61989a80 | 809 | { |
069f101f | 810 | int i, ovhd_size; |
61989a80 NG |
811 | struct zs_pool *pool; |
812 | ||
61989a80 NG |
813 | ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); |
814 | pool = kzalloc(ovhd_size, GFP_KERNEL); | |
815 | if (!pool) | |
816 | return NULL; | |
817 | ||
818 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
819 | int size; | |
820 | struct size_class *class; | |
821 | ||
822 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; | |
823 | if (size > ZS_MAX_ALLOC_SIZE) | |
824 | size = ZS_MAX_ALLOC_SIZE; | |
825 | ||
826 | class = &pool->size_class[i]; | |
827 | class->size = size; | |
828 | class->index = i; | |
829 | spin_lock_init(&class->lock); | |
2e3b6154 | 830 | class->pages_per_zspage = get_pages_per_zspage(size); |
61989a80 NG |
831 | |
832 | } | |
833 | ||
61989a80 | 834 | pool->flags = flags; |
61989a80 | 835 | |
61989a80 NG |
836 | return pool; |
837 | } | |
838 | EXPORT_SYMBOL_GPL(zs_create_pool); | |
839 | ||
840 | void zs_destroy_pool(struct zs_pool *pool) | |
841 | { | |
842 | int i; | |
843 | ||
844 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
845 | int fg; | |
846 | struct size_class *class = &pool->size_class[i]; | |
847 | ||
848 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { | |
849 | if (class->fullness_list[fg]) { | |
850 | pr_info("Freeing non-empty class with size " | |
851 | "%db, fullness group %d\n", | |
852 | class->size, fg); | |
853 | } | |
854 | } | |
855 | } | |
856 | kfree(pool); | |
857 | } | |
858 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | |
859 | ||
860 | /** | |
861 | * zs_malloc - Allocate block of given size from pool. | |
862 | * @pool: pool to allocate from | |
863 | * @size: size of block to allocate | |
61989a80 | 864 | * |
00a61d86 | 865 | * On success, handle to the allocated object is returned, |
c2344348 | 866 | * otherwise 0. |
61989a80 NG |
867 | * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. |
868 | */ | |
c2344348 | 869 | unsigned long zs_malloc(struct zs_pool *pool, size_t size) |
61989a80 | 870 | { |
c2344348 | 871 | unsigned long obj; |
61989a80 NG |
872 | struct link_free *link; |
873 | int class_idx; | |
874 | struct size_class *class; | |
875 | ||
876 | struct page *first_page, *m_page; | |
877 | unsigned long m_objidx, m_offset; | |
878 | ||
879 | if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) | |
c2344348 | 880 | return 0; |
61989a80 NG |
881 | |
882 | class_idx = get_size_class_index(size); | |
883 | class = &pool->size_class[class_idx]; | |
884 | BUG_ON(class_idx != class->index); | |
885 | ||
886 | spin_lock(&class->lock); | |
887 | first_page = find_get_zspage(class); | |
888 | ||
889 | if (!first_page) { | |
890 | spin_unlock(&class->lock); | |
891 | first_page = alloc_zspage(class, pool->flags); | |
892 | if (unlikely(!first_page)) | |
c2344348 | 893 | return 0; |
61989a80 NG |
894 | |
895 | set_zspage_mapping(first_page, class->index, ZS_EMPTY); | |
896 | spin_lock(&class->lock); | |
2e3b6154 | 897 | class->pages_allocated += class->pages_per_zspage; |
61989a80 NG |
898 | } |
899 | ||
c2344348 | 900 | obj = (unsigned long)first_page->freelist; |
61989a80 NG |
901 | obj_handle_to_location(obj, &m_page, &m_objidx); |
902 | m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); | |
903 | ||
904 | link = (struct link_free *)kmap_atomic(m_page) + | |
905 | m_offset / sizeof(*link); | |
906 | first_page->freelist = link->next; | |
907 | memset(link, POISON_INUSE, sizeof(*link)); | |
908 | kunmap_atomic(link); | |
909 | ||
910 | first_page->inuse++; | |
911 | /* Now move the zspage to another fullness group, if required */ | |
912 | fix_fullness_group(pool, first_page); | |
913 | spin_unlock(&class->lock); | |
914 | ||
915 | return obj; | |
916 | } | |
917 | EXPORT_SYMBOL_GPL(zs_malloc); | |
918 | ||
c2344348 | 919 | void zs_free(struct zs_pool *pool, unsigned long obj) |
61989a80 NG |
920 | { |
921 | struct link_free *link; | |
922 | struct page *first_page, *f_page; | |
923 | unsigned long f_objidx, f_offset; | |
924 | ||
925 | int class_idx; | |
926 | struct size_class *class; | |
927 | enum fullness_group fullness; | |
928 | ||
929 | if (unlikely(!obj)) | |
930 | return; | |
931 | ||
932 | obj_handle_to_location(obj, &f_page, &f_objidx); | |
933 | first_page = get_first_page(f_page); | |
934 | ||
935 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
936 | class = &pool->size_class[class_idx]; | |
937 | f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); | |
938 | ||
939 | spin_lock(&class->lock); | |
940 | ||
941 | /* Insert this object in containing zspage's freelist */ | |
942 | link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) | |
943 | + f_offset); | |
944 | link->next = first_page->freelist; | |
945 | kunmap_atomic(link); | |
c2344348 | 946 | first_page->freelist = (void *)obj; |
61989a80 NG |
947 | |
948 | first_page->inuse--; | |
949 | fullness = fix_fullness_group(pool, first_page); | |
950 | ||
951 | if (fullness == ZS_EMPTY) | |
2e3b6154 | 952 | class->pages_allocated -= class->pages_per_zspage; |
61989a80 NG |
953 | |
954 | spin_unlock(&class->lock); | |
955 | ||
956 | if (fullness == ZS_EMPTY) | |
957 | free_zspage(first_page); | |
958 | } | |
959 | EXPORT_SYMBOL_GPL(zs_free); | |
960 | ||
00a61d86 MK |
961 | /** |
962 | * zs_map_object - get address of allocated object from handle. | |
963 | * @pool: pool from which the object was allocated | |
964 | * @handle: handle returned from zs_malloc | |
965 | * | |
966 | * Before using an object allocated from zs_malloc, it must be mapped using | |
967 | * this function. When done with the object, it must be unmapped using | |
166cfda7 SJ |
968 | * zs_unmap_object. |
969 | * | |
970 | * Only one object can be mapped per cpu at a time. There is no protection | |
971 | * against nested mappings. | |
972 | * | |
973 | * This function returns with preemption and page faults disabled. | |
00a61d86 | 974 | */ |
b7418510 SJ |
975 | void *zs_map_object(struct zs_pool *pool, unsigned long handle, |
976 | enum zs_mapmode mm) | |
61989a80 NG |
977 | { |
978 | struct page *page; | |
979 | unsigned long obj_idx, off; | |
980 | ||
981 | unsigned int class_idx; | |
982 | enum fullness_group fg; | |
983 | struct size_class *class; | |
984 | struct mapping_area *area; | |
f553646a | 985 | struct page *pages[2]; |
61989a80 NG |
986 | |
987 | BUG_ON(!handle); | |
988 | ||
c60369f0 SJ |
989 | /* |
990 | * Because we use per-cpu mapping areas shared among the | |
991 | * pools/users, we can't allow mapping in interrupt context | |
992 | * because it can corrupt another users mappings. | |
993 | */ | |
994 | BUG_ON(in_interrupt()); | |
995 | ||
61989a80 NG |
996 | obj_handle_to_location(handle, &page, &obj_idx); |
997 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
998 | class = &pool->size_class[class_idx]; | |
999 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
1000 | ||
1001 | area = &get_cpu_var(zs_map_area); | |
f553646a | 1002 | area->vm_mm = mm; |
61989a80 NG |
1003 | if (off + class->size <= PAGE_SIZE) { |
1004 | /* this object is contained entirely within a page */ | |
1005 | area->vm_addr = kmap_atomic(page); | |
5f601902 | 1006 | return area->vm_addr + off; |
61989a80 NG |
1007 | } |
1008 | ||
f553646a SJ |
1009 | /* this object spans two pages */ |
1010 | pages[0] = page; | |
1011 | pages[1] = get_next_page(page); | |
1012 | BUG_ON(!pages[1]); | |
b7418510 | 1013 | |
f553646a | 1014 | return __zs_map_object(area, pages, off, class->size); |
61989a80 NG |
1015 | } |
1016 | EXPORT_SYMBOL_GPL(zs_map_object); | |
1017 | ||
c2344348 | 1018 | void zs_unmap_object(struct zs_pool *pool, unsigned long handle) |
61989a80 NG |
1019 | { |
1020 | struct page *page; | |
1021 | unsigned long obj_idx, off; | |
1022 | ||
1023 | unsigned int class_idx; | |
1024 | enum fullness_group fg; | |
1025 | struct size_class *class; | |
1026 | struct mapping_area *area; | |
1027 | ||
1028 | BUG_ON(!handle); | |
1029 | ||
1030 | obj_handle_to_location(handle, &page, &obj_idx); | |
1031 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
1032 | class = &pool->size_class[class_idx]; | |
1033 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
1034 | ||
f553646a SJ |
1035 | area = &__get_cpu_var(zs_map_area); |
1036 | if (off + class->size <= PAGE_SIZE) | |
1037 | kunmap_atomic(area->vm_addr); | |
1038 | else { | |
1039 | struct page *pages[2]; | |
1040 | ||
1041 | pages[0] = page; | |
1042 | pages[1] = get_next_page(page); | |
1043 | BUG_ON(!pages[1]); | |
b7418510 | 1044 | |
f553646a SJ |
1045 | __zs_unmap_object(area, pages, off, class->size); |
1046 | } | |
61989a80 NG |
1047 | put_cpu_var(zs_map_area); |
1048 | } | |
1049 | EXPORT_SYMBOL_GPL(zs_unmap_object); | |
1050 | ||
1051 | u64 zs_get_total_size_bytes(struct zs_pool *pool) | |
1052 | { | |
1053 | int i; | |
1054 | u64 npages = 0; | |
1055 | ||
1056 | for (i = 0; i < ZS_SIZE_CLASSES; i++) | |
1057 | npages += pool->size_class[i].pages_allocated; | |
1058 | ||
1059 | return npages << PAGE_SHIFT; | |
1060 | } | |
1061 | EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); | |
069f101f BH |
1062 | |
1063 | module_init(zs_init); | |
1064 | module_exit(zs_exit); | |
1065 | ||
1066 | MODULE_LICENSE("Dual BSD/GPL"); | |
1067 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |