Commit | Line | Data |
---|---|---|
1403b1a3 PN |
1 | /* |
2 | * Copyright (c) Red Hat Inc. | |
3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the | |
12 | * next paragraph) shall be included in all copies or substantial portions | |
13 | * of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Dave Airlie <airlied@redhat.com> | |
24 | * Jerome Glisse <jglisse@redhat.com> | |
25 | * Pauli Nieminen <suokkos@gmail.com> | |
26 | */ | |
27 | ||
28 | /* simple list based uncached page pool | |
29 | * - Pool collects resently freed pages for reuse | |
30 | * - Use page->lru to keep a free list | |
31 | * - doesn't track currently in use pages | |
32 | */ | |
25d0479a JP |
33 | |
34 | #define pr_fmt(fmt) "[TTM] " fmt | |
35 | ||
1403b1a3 PN |
36 | #include <linux/list.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/mm_types.h> | |
07458661 | 40 | #include <linux/module.h> |
1403b1a3 | 41 | #include <linux/mm.h> |
4cdc840a | 42 | #include <linux/seq_file.h> /* for seq_printf */ |
2125b8a4 | 43 | #include <linux/slab.h> |
f9820a46 | 44 | #include <linux/dma-mapping.h> |
1403b1a3 | 45 | |
60063497 | 46 | #include <linux/atomic.h> |
1403b1a3 | 47 | |
760285e7 DH |
48 | #include <drm/ttm/ttm_bo_driver.h> |
49 | #include <drm/ttm/ttm_page_alloc.h> | |
1403b1a3 | 50 | |
d6678651 LT |
51 | #ifdef TTM_HAS_AGP |
52 | #include <asm/agp.h> | |
53 | #endif | |
1403b1a3 PN |
54 | |
55 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | |
56 | #define SMALL_ALLOCATION 16 | |
57 | #define FREE_ALL_PAGES (~0U) | |
58 | /* times are in msecs */ | |
59 | #define PAGE_FREE_INTERVAL 1000 | |
60 | ||
61 | /** | |
62 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. | |
63 | * | |
64 | * @lock: Protects the shared pool from concurrnet access. Must be used with | |
65 | * irqsave/irqrestore variants because pool allocator maybe called from | |
66 | * delayed work. | |
67 | * @fill_lock: Prevent concurrent calls to fill. | |
68 | * @list: Pool of free uc/wc pages for fast reuse. | |
69 | * @gfp_flags: Flags to pass for alloc_page. | |
70 | * @npages: Number of pages in pool. | |
71 | */ | |
72 | struct ttm_page_pool { | |
73 | spinlock_t lock; | |
74 | bool fill_lock; | |
75 | struct list_head list; | |
0e57a3cc | 76 | gfp_t gfp_flags; |
1403b1a3 | 77 | unsigned npages; |
07458661 PN |
78 | char *name; |
79 | unsigned long nfrees; | |
80 | unsigned long nrefills; | |
1403b1a3 PN |
81 | }; |
82 | ||
c96af79e PN |
83 | /** |
84 | * Limits for the pool. They are handled without locks because only place where | |
85 | * they may change is in sysfs store. They won't have immediate effect anyway | |
4abe4389 | 86 | * so forcing serialization to access them is pointless. |
c96af79e PN |
87 | */ |
88 | ||
1403b1a3 PN |
89 | struct ttm_pool_opts { |
90 | unsigned alloc_size; | |
91 | unsigned max_size; | |
92 | unsigned small; | |
93 | }; | |
94 | ||
95 | #define NUM_POOLS 4 | |
96 | ||
97 | /** | |
98 | * struct ttm_pool_manager - Holds memory pools for fst allocation | |
99 | * | |
100 | * Manager is read only object for pool code so it doesn't need locking. | |
101 | * | |
102 | * @free_interval: minimum number of jiffies between freeing pages from pool. | |
103 | * @page_alloc_inited: reference counting for pool allocation. | |
104 | * @work: Work that is used to shrink the pool. Work is only run when there is | |
105 | * some pages to free. | |
106 | * @small_allocation: Limit in number of pages what is small allocation. | |
107 | * | |
108 | * @pools: All pool objects in use. | |
109 | **/ | |
110 | struct ttm_pool_manager { | |
c96af79e | 111 | struct kobject kobj; |
1403b1a3 | 112 | struct shrinker mm_shrink; |
1403b1a3 PN |
113 | struct ttm_pool_opts options; |
114 | ||
115 | union { | |
116 | struct ttm_page_pool pools[NUM_POOLS]; | |
117 | struct { | |
118 | struct ttm_page_pool wc_pool; | |
119 | struct ttm_page_pool uc_pool; | |
120 | struct ttm_page_pool wc_pool_dma32; | |
121 | struct ttm_page_pool uc_pool_dma32; | |
122 | } ; | |
123 | }; | |
124 | }; | |
125 | ||
c96af79e PN |
126 | static struct attribute ttm_page_pool_max = { |
127 | .name = "pool_max_size", | |
128 | .mode = S_IRUGO | S_IWUSR | |
129 | }; | |
130 | static struct attribute ttm_page_pool_small = { | |
131 | .name = "pool_small_allocation", | |
132 | .mode = S_IRUGO | S_IWUSR | |
133 | }; | |
134 | static struct attribute ttm_page_pool_alloc_size = { | |
135 | .name = "pool_allocation_size", | |
136 | .mode = S_IRUGO | S_IWUSR | |
137 | }; | |
138 | ||
139 | static struct attribute *ttm_pool_attrs[] = { | |
140 | &ttm_page_pool_max, | |
141 | &ttm_page_pool_small, | |
142 | &ttm_page_pool_alloc_size, | |
143 | NULL | |
144 | }; | |
145 | ||
146 | static void ttm_pool_kobj_release(struct kobject *kobj) | |
147 | { | |
148 | struct ttm_pool_manager *m = | |
149 | container_of(kobj, struct ttm_pool_manager, kobj); | |
5870a4d9 | 150 | kfree(m); |
c96af79e PN |
151 | } |
152 | ||
153 | static ssize_t ttm_pool_store(struct kobject *kobj, | |
154 | struct attribute *attr, const char *buffer, size_t size) | |
155 | { | |
156 | struct ttm_pool_manager *m = | |
157 | container_of(kobj, struct ttm_pool_manager, kobj); | |
158 | int chars; | |
159 | unsigned val; | |
160 | chars = sscanf(buffer, "%u", &val); | |
161 | if (chars == 0) | |
162 | return size; | |
163 | ||
164 | /* Convert kb to number of pages */ | |
165 | val = val / (PAGE_SIZE >> 10); | |
166 | ||
167 | if (attr == &ttm_page_pool_max) | |
168 | m->options.max_size = val; | |
169 | else if (attr == &ttm_page_pool_small) | |
170 | m->options.small = val; | |
171 | else if (attr == &ttm_page_pool_alloc_size) { | |
172 | if (val > NUM_PAGES_TO_ALLOC*8) { | |
25d0479a | 173 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
4abe4389 TH |
174 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
175 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
176 | return size; |
177 | } else if (val > NUM_PAGES_TO_ALLOC) { | |
25d0479a JP |
178 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
179 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
180 | } |
181 | m->options.alloc_size = val; | |
182 | } | |
183 | ||
184 | return size; | |
185 | } | |
186 | ||
187 | static ssize_t ttm_pool_show(struct kobject *kobj, | |
188 | struct attribute *attr, char *buffer) | |
189 | { | |
190 | struct ttm_pool_manager *m = | |
191 | container_of(kobj, struct ttm_pool_manager, kobj); | |
192 | unsigned val = 0; | |
193 | ||
194 | if (attr == &ttm_page_pool_max) | |
195 | val = m->options.max_size; | |
196 | else if (attr == &ttm_page_pool_small) | |
197 | val = m->options.small; | |
198 | else if (attr == &ttm_page_pool_alloc_size) | |
199 | val = m->options.alloc_size; | |
200 | ||
201 | val = val * (PAGE_SIZE >> 10); | |
202 | ||
203 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | |
204 | } | |
205 | ||
206 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | |
207 | .show = &ttm_pool_show, | |
208 | .store = &ttm_pool_store, | |
209 | }; | |
210 | ||
211 | static struct kobj_type ttm_pool_kobj_type = { | |
212 | .release = &ttm_pool_kobj_release, | |
213 | .sysfs_ops = &ttm_pool_sysfs_ops, | |
214 | .default_attrs = ttm_pool_attrs, | |
215 | }; | |
216 | ||
5870a4d9 | 217 | static struct ttm_pool_manager *_manager; |
1403b1a3 | 218 | |
975efdb1 | 219 | #ifndef CONFIG_X86 |
1403b1a3 PN |
220 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
221 | { | |
222 | #ifdef TTM_HAS_AGP | |
223 | int i; | |
224 | ||
225 | for (i = 0; i < addrinarray; i++) | |
226 | unmap_page_from_agp(pages[i]); | |
227 | #endif | |
228 | return 0; | |
229 | } | |
230 | ||
231 | static int set_pages_array_wc(struct page **pages, int addrinarray) | |
232 | { | |
233 | #ifdef TTM_HAS_AGP | |
234 | int i; | |
235 | ||
236 | for (i = 0; i < addrinarray; i++) | |
237 | map_page_into_agp(pages[i]); | |
238 | #endif | |
239 | return 0; | |
240 | } | |
241 | ||
242 | static int set_pages_array_uc(struct page **pages, int addrinarray) | |
243 | { | |
244 | #ifdef TTM_HAS_AGP | |
245 | int i; | |
246 | ||
247 | for (i = 0; i < addrinarray; i++) | |
248 | map_page_into_agp(pages[i]); | |
249 | #endif | |
250 | return 0; | |
251 | } | |
252 | #endif | |
253 | ||
254 | /** | |
255 | * Select the right pool or requested caching state and ttm flags. */ | |
256 | static struct ttm_page_pool *ttm_get_pool(int flags, | |
257 | enum ttm_caching_state cstate) | |
258 | { | |
259 | int pool_index; | |
260 | ||
261 | if (cstate == tt_cached) | |
262 | return NULL; | |
263 | ||
264 | if (cstate == tt_wc) | |
265 | pool_index = 0x0; | |
266 | else | |
267 | pool_index = 0x1; | |
268 | ||
269 | if (flags & TTM_PAGE_FLAG_DMA32) | |
270 | pool_index |= 0x2; | |
271 | ||
5870a4d9 | 272 | return &_manager->pools[pool_index]; |
1403b1a3 PN |
273 | } |
274 | ||
275 | /* set memory back to wb and free the pages. */ | |
276 | static void ttm_pages_put(struct page *pages[], unsigned npages) | |
277 | { | |
278 | unsigned i; | |
279 | if (set_pages_array_wb(pages, npages)) | |
25d0479a | 280 | pr_err("Failed to set %d pages to wb!\n", npages); |
1403b1a3 PN |
281 | for (i = 0; i < npages; ++i) |
282 | __free_page(pages[i]); | |
283 | } | |
284 | ||
285 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, | |
286 | unsigned freed_pages) | |
287 | { | |
288 | pool->npages -= freed_pages; | |
07458661 | 289 | pool->nfrees += freed_pages; |
1403b1a3 PN |
290 | } |
291 | ||
292 | /** | |
293 | * Free pages from pool. | |
294 | * | |
295 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | |
296 | * number of pages in one go. | |
297 | * | |
298 | * @pool: to free the pages from | |
299 | * @free_all: If set to true will free all pages in pool | |
300 | **/ | |
301 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | |
302 | { | |
303 | unsigned long irq_flags; | |
304 | struct page *p; | |
305 | struct page **pages_to_free; | |
306 | unsigned freed_pages = 0, | |
307 | npages_to_free = nr_free; | |
308 | ||
309 | if (NUM_PAGES_TO_ALLOC < nr_free) | |
310 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
311 | ||
312 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | |
313 | GFP_KERNEL); | |
314 | if (!pages_to_free) { | |
25d0479a | 315 | pr_err("Failed to allocate memory for pool free operation\n"); |
1403b1a3 PN |
316 | return 0; |
317 | } | |
318 | ||
319 | restart: | |
320 | spin_lock_irqsave(&pool->lock, irq_flags); | |
321 | ||
322 | list_for_each_entry_reverse(p, &pool->list, lru) { | |
323 | if (freed_pages >= npages_to_free) | |
324 | break; | |
325 | ||
326 | pages_to_free[freed_pages++] = p; | |
327 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | |
328 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | |
329 | /* remove range of pages from the pool */ | |
330 | __list_del(p->lru.prev, &pool->list); | |
331 | ||
332 | ttm_pool_update_free_locked(pool, freed_pages); | |
333 | /** | |
334 | * Because changing page caching is costly | |
335 | * we unlock the pool to prevent stalling. | |
336 | */ | |
337 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
338 | ||
339 | ttm_pages_put(pages_to_free, freed_pages); | |
340 | if (likely(nr_free != FREE_ALL_PAGES)) | |
341 | nr_free -= freed_pages; | |
342 | ||
343 | if (NUM_PAGES_TO_ALLOC >= nr_free) | |
344 | npages_to_free = nr_free; | |
345 | else | |
346 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
347 | ||
348 | freed_pages = 0; | |
349 | ||
350 | /* free all so restart the processing */ | |
351 | if (nr_free) | |
352 | goto restart; | |
353 | ||
0d74f86f | 354 | /* Not allowed to fall through or break because |
1403b1a3 PN |
355 | * following context is inside spinlock while we are |
356 | * outside here. | |
357 | */ | |
358 | goto out; | |
359 | ||
360 | } | |
361 | } | |
362 | ||
1403b1a3 PN |
363 | /* remove range of pages from the pool */ |
364 | if (freed_pages) { | |
365 | __list_del(&p->lru, &pool->list); | |
366 | ||
367 | ttm_pool_update_free_locked(pool, freed_pages); | |
368 | nr_free -= freed_pages; | |
369 | } | |
370 | ||
371 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
372 | ||
373 | if (freed_pages) | |
374 | ttm_pages_put(pages_to_free, freed_pages); | |
375 | out: | |
376 | kfree(pages_to_free); | |
377 | return nr_free; | |
378 | } | |
379 | ||
380 | /* Get good estimation how many pages are free in pools */ | |
381 | static int ttm_pool_get_num_unused_pages(void) | |
382 | { | |
383 | unsigned i; | |
384 | int total = 0; | |
385 | for (i = 0; i < NUM_POOLS; ++i) | |
5870a4d9 | 386 | total += _manager->pools[i].npages; |
1403b1a3 PN |
387 | |
388 | return total; | |
389 | } | |
390 | ||
391 | /** | |
4abe4389 | 392 | * Callback for mm to request pool to reduce number of page held. |
1403b1a3 | 393 | */ |
1495f230 YH |
394 | static int ttm_pool_mm_shrink(struct shrinker *shrink, |
395 | struct shrink_control *sc) | |
1403b1a3 PN |
396 | { |
397 | static atomic_t start_pool = ATOMIC_INIT(0); | |
398 | unsigned i; | |
399 | unsigned pool_offset = atomic_add_return(1, &start_pool); | |
400 | struct ttm_page_pool *pool; | |
1495f230 | 401 | int shrink_pages = sc->nr_to_scan; |
1403b1a3 PN |
402 | |
403 | pool_offset = pool_offset % NUM_POOLS; | |
404 | /* select start pool in round robin fashion */ | |
405 | for (i = 0; i < NUM_POOLS; ++i) { | |
406 | unsigned nr_free = shrink_pages; | |
407 | if (shrink_pages == 0) | |
408 | break; | |
5870a4d9 | 409 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
1403b1a3 PN |
410 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
411 | } | |
412 | /* return estimated number of unused pages in pool */ | |
413 | return ttm_pool_get_num_unused_pages(); | |
414 | } | |
415 | ||
416 | static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) | |
417 | { | |
418 | manager->mm_shrink.shrink = &ttm_pool_mm_shrink; | |
419 | manager->mm_shrink.seeks = 1; | |
420 | register_shrinker(&manager->mm_shrink); | |
421 | } | |
422 | ||
423 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | |
424 | { | |
425 | unregister_shrinker(&manager->mm_shrink); | |
426 | } | |
427 | ||
428 | static int ttm_set_pages_caching(struct page **pages, | |
429 | enum ttm_caching_state cstate, unsigned cpages) | |
430 | { | |
431 | int r = 0; | |
432 | /* Set page caching */ | |
433 | switch (cstate) { | |
434 | case tt_uncached: | |
435 | r = set_pages_array_uc(pages, cpages); | |
436 | if (r) | |
25d0479a | 437 | pr_err("Failed to set %d pages to uc!\n", cpages); |
1403b1a3 PN |
438 | break; |
439 | case tt_wc: | |
440 | r = set_pages_array_wc(pages, cpages); | |
441 | if (r) | |
25d0479a | 442 | pr_err("Failed to set %d pages to wc!\n", cpages); |
1403b1a3 PN |
443 | break; |
444 | default: | |
445 | break; | |
446 | } | |
447 | return r; | |
448 | } | |
449 | ||
450 | /** | |
451 | * Free pages the pages that failed to change the caching state. If there is | |
452 | * any pages that have changed their caching state already put them to the | |
453 | * pool. | |
454 | */ | |
455 | static void ttm_handle_caching_state_failure(struct list_head *pages, | |
456 | int ttm_flags, enum ttm_caching_state cstate, | |
457 | struct page **failed_pages, unsigned cpages) | |
458 | { | |
459 | unsigned i; | |
4abe4389 | 460 | /* Failed pages have to be freed */ |
1403b1a3 PN |
461 | for (i = 0; i < cpages; ++i) { |
462 | list_del(&failed_pages[i]->lru); | |
463 | __free_page(failed_pages[i]); | |
464 | } | |
465 | } | |
466 | ||
467 | /** | |
468 | * Allocate new pages with correct caching. | |
469 | * | |
470 | * This function is reentrant if caller updates count depending on number of | |
471 | * pages returned in pages array. | |
472 | */ | |
0e57a3cc | 473 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
1403b1a3 PN |
474 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
475 | { | |
476 | struct page **caching_array; | |
477 | struct page *p; | |
478 | int r = 0; | |
479 | unsigned i, cpages; | |
480 | unsigned max_cpages = min(count, | |
481 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); | |
482 | ||
483 | /* allocate array for page caching change */ | |
484 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | |
485 | ||
486 | if (!caching_array) { | |
25d0479a | 487 | pr_err("Unable to allocate table for new pages\n"); |
1403b1a3 PN |
488 | return -ENOMEM; |
489 | } | |
490 | ||
491 | for (i = 0, cpages = 0; i < count; ++i) { | |
492 | p = alloc_page(gfp_flags); | |
493 | ||
494 | if (!p) { | |
25d0479a | 495 | pr_err("Unable to get page %u\n", i); |
1403b1a3 PN |
496 | |
497 | /* store already allocated pages in the pool after | |
498 | * setting the caching state */ | |
499 | if (cpages) { | |
4abe4389 TH |
500 | r = ttm_set_pages_caching(caching_array, |
501 | cstate, cpages); | |
1403b1a3 PN |
502 | if (r) |
503 | ttm_handle_caching_state_failure(pages, | |
504 | ttm_flags, cstate, | |
505 | caching_array, cpages); | |
506 | } | |
507 | r = -ENOMEM; | |
508 | goto out; | |
509 | } | |
510 | ||
511 | #ifdef CONFIG_HIGHMEM | |
512 | /* gfp flags of highmem page should never be dma32 so we | |
513 | * we should be fine in such case | |
514 | */ | |
515 | if (!PageHighMem(p)) | |
516 | #endif | |
517 | { | |
518 | caching_array[cpages++] = p; | |
519 | if (cpages == max_cpages) { | |
520 | ||
521 | r = ttm_set_pages_caching(caching_array, | |
522 | cstate, cpages); | |
523 | if (r) { | |
524 | ttm_handle_caching_state_failure(pages, | |
525 | ttm_flags, cstate, | |
526 | caching_array, cpages); | |
527 | goto out; | |
528 | } | |
529 | cpages = 0; | |
530 | } | |
531 | } | |
532 | ||
533 | list_add(&p->lru, pages); | |
534 | } | |
535 | ||
536 | if (cpages) { | |
537 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | |
538 | if (r) | |
539 | ttm_handle_caching_state_failure(pages, | |
540 | ttm_flags, cstate, | |
541 | caching_array, cpages); | |
542 | } | |
543 | out: | |
544 | kfree(caching_array); | |
545 | ||
546 | return r; | |
547 | } | |
548 | ||
549 | /** | |
0d74f86f | 550 | * Fill the given pool if there aren't enough pages and the requested number of |
1403b1a3 PN |
551 | * pages is small. |
552 | */ | |
553 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |
554 | int ttm_flags, enum ttm_caching_state cstate, unsigned count, | |
555 | unsigned long *irq_flags) | |
556 | { | |
557 | struct page *p; | |
558 | int r; | |
559 | unsigned cpages = 0; | |
560 | /** | |
561 | * Only allow one pool fill operation at a time. | |
562 | * If pool doesn't have enough pages for the allocation new pages are | |
563 | * allocated from outside of pool. | |
564 | */ | |
565 | if (pool->fill_lock) | |
566 | return; | |
567 | ||
568 | pool->fill_lock = true; | |
569 | ||
0d74f86f KRW |
570 | /* If allocation request is small and there are not enough |
571 | * pages in a pool we fill the pool up first. */ | |
5870a4d9 | 572 | if (count < _manager->options.small |
1403b1a3 PN |
573 | && count > pool->npages) { |
574 | struct list_head new_pages; | |
5870a4d9 | 575 | unsigned alloc_size = _manager->options.alloc_size; |
1403b1a3 PN |
576 | |
577 | /** | |
578 | * Can't change page caching if in irqsave context. We have to | |
579 | * drop the pool->lock. | |
580 | */ | |
581 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | |
582 | ||
583 | INIT_LIST_HEAD(&new_pages); | |
584 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, | |
585 | cstate, alloc_size); | |
586 | spin_lock_irqsave(&pool->lock, *irq_flags); | |
587 | ||
588 | if (!r) { | |
589 | list_splice(&new_pages, &pool->list); | |
07458661 | 590 | ++pool->nrefills; |
1403b1a3 PN |
591 | pool->npages += alloc_size; |
592 | } else { | |
25d0479a | 593 | pr_err("Failed to fill pool (%p)\n", pool); |
1403b1a3 PN |
594 | /* If we have any pages left put them to the pool. */ |
595 | list_for_each_entry(p, &pool->list, lru) { | |
596 | ++cpages; | |
597 | } | |
598 | list_splice(&new_pages, &pool->list); | |
599 | pool->npages += cpages; | |
600 | } | |
601 | ||
602 | } | |
603 | pool->fill_lock = false; | |
604 | } | |
605 | ||
606 | /** | |
0d74f86f | 607 | * Cut 'count' number of pages from the pool and put them on the return list. |
1403b1a3 | 608 | * |
0d74f86f | 609 | * @return count of pages still required to fulfill the request. |
1403b1a3 PN |
610 | */ |
611 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, | |
822c4d9a JG |
612 | struct list_head *pages, |
613 | int ttm_flags, | |
614 | enum ttm_caching_state cstate, | |
615 | unsigned count) | |
1403b1a3 PN |
616 | { |
617 | unsigned long irq_flags; | |
618 | struct list_head *p; | |
619 | unsigned i; | |
620 | ||
621 | spin_lock_irqsave(&pool->lock, irq_flags); | |
622 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); | |
623 | ||
624 | if (count >= pool->npages) { | |
625 | /* take all pages from the pool */ | |
626 | list_splice_init(&pool->list, pages); | |
627 | count -= pool->npages; | |
628 | pool->npages = 0; | |
629 | goto out; | |
630 | } | |
631 | /* find the last pages to include for requested number of pages. Split | |
0d74f86f | 632 | * pool to begin and halve it to reduce search space. */ |
1403b1a3 PN |
633 | if (count <= pool->npages/2) { |
634 | i = 0; | |
635 | list_for_each(p, &pool->list) { | |
636 | if (++i == count) | |
637 | break; | |
638 | } | |
639 | } else { | |
640 | i = pool->npages + 1; | |
641 | list_for_each_prev(p, &pool->list) { | |
642 | if (--i == count) | |
643 | break; | |
644 | } | |
645 | } | |
0d74f86f | 646 | /* Cut 'count' number of pages from the pool */ |
1403b1a3 PN |
647 | list_cut_position(pages, &pool->list, p); |
648 | pool->npages -= count; | |
649 | count = 0; | |
650 | out: | |
651 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
652 | return count; | |
653 | } | |
654 | ||
8e7e7052 JG |
655 | /* Put all pages in pages list to correct pool to wait for reuse */ |
656 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |
657 | enum ttm_caching_state cstate) | |
658 | { | |
659 | unsigned long irq_flags; | |
660 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | |
661 | unsigned i; | |
662 | ||
663 | if (pool == NULL) { | |
664 | /* No pool for this memory type so free the pages */ | |
665 | for (i = 0; i < npages; i++) { | |
666 | if (pages[i]) { | |
667 | if (page_count(pages[i]) != 1) | |
25d0479a | 668 | pr_err("Erroneous page count. Leaking pages.\n"); |
8e7e7052 JG |
669 | __free_page(pages[i]); |
670 | pages[i] = NULL; | |
671 | } | |
672 | } | |
673 | return; | |
674 | } | |
675 | ||
676 | spin_lock_irqsave(&pool->lock, irq_flags); | |
677 | for (i = 0; i < npages; i++) { | |
678 | if (pages[i]) { | |
679 | if (page_count(pages[i]) != 1) | |
25d0479a | 680 | pr_err("Erroneous page count. Leaking pages.\n"); |
8e7e7052 JG |
681 | list_add_tail(&pages[i]->lru, &pool->list); |
682 | pages[i] = NULL; | |
683 | pool->npages++; | |
684 | } | |
685 | } | |
686 | /* Check that we don't go over the pool limit */ | |
687 | npages = 0; | |
688 | if (pool->npages > _manager->options.max_size) { | |
689 | npages = pool->npages - _manager->options.max_size; | |
690 | /* free at least NUM_PAGES_TO_ALLOC number of pages | |
691 | * to reduce calls to set_memory_wb */ | |
692 | if (npages < NUM_PAGES_TO_ALLOC) | |
693 | npages = NUM_PAGES_TO_ALLOC; | |
694 | } | |
695 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
696 | if (npages) | |
697 | ttm_page_pool_free(pool, npages); | |
698 | } | |
699 | ||
1403b1a3 PN |
700 | /* |
701 | * On success pages list will hold count number of correctly | |
702 | * cached pages. | |
703 | */ | |
8e7e7052 JG |
704 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
705 | enum ttm_caching_state cstate) | |
1403b1a3 PN |
706 | { |
707 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | |
822c4d9a | 708 | struct list_head plist; |
1403b1a3 | 709 | struct page *p = NULL; |
0e57a3cc | 710 | gfp_t gfp_flags = GFP_USER; |
822c4d9a | 711 | unsigned count; |
1403b1a3 PN |
712 | int r; |
713 | ||
714 | /* set zero flag for page allocation if required */ | |
715 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
716 | gfp_flags |= __GFP_ZERO; | |
717 | ||
718 | /* No pool for cached pages */ | |
719 | if (pool == NULL) { | |
720 | if (flags & TTM_PAGE_FLAG_DMA32) | |
721 | gfp_flags |= GFP_DMA32; | |
722 | else | |
e8613c0e | 723 | gfp_flags |= GFP_HIGHUSER; |
1403b1a3 | 724 | |
822c4d9a | 725 | for (r = 0; r < npages; ++r) { |
d87dfdbf | 726 | p = alloc_page(gfp_flags); |
1403b1a3 PN |
727 | if (!p) { |
728 | ||
25d0479a | 729 | pr_err("Unable to allocate page\n"); |
1403b1a3 PN |
730 | return -ENOMEM; |
731 | } | |
d87dfdbf | 732 | |
822c4d9a | 733 | pages[r] = p; |
1403b1a3 PN |
734 | } |
735 | return 0; | |
736 | } | |
737 | ||
1403b1a3 PN |
738 | /* combine zero flag to pool flags */ |
739 | gfp_flags |= pool->gfp_flags; | |
740 | ||
741 | /* First we take pages from the pool */ | |
822c4d9a JG |
742 | INIT_LIST_HEAD(&plist); |
743 | npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); | |
744 | count = 0; | |
745 | list_for_each_entry(p, &plist, lru) { | |
746 | pages[count++] = p; | |
747 | } | |
1403b1a3 PN |
748 | |
749 | /* clear the pages coming from the pool if requested */ | |
750 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | |
822c4d9a | 751 | list_for_each_entry(p, &plist, lru) { |
1403b1a3 PN |
752 | clear_page(page_address(p)); |
753 | } | |
754 | } | |
755 | ||
756 | /* If pool didn't have enough pages allocate new one. */ | |
822c4d9a | 757 | if (npages > 0) { |
1403b1a3 PN |
758 | /* ttm_alloc_new_pages doesn't reference pool so we can run |
759 | * multiple requests in parallel. | |
760 | **/ | |
822c4d9a JG |
761 | INIT_LIST_HEAD(&plist); |
762 | r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); | |
763 | list_for_each_entry(p, &plist, lru) { | |
764 | pages[count++] = p; | |
765 | } | |
1403b1a3 PN |
766 | if (r) { |
767 | /* If there is any pages in the list put them back to | |
768 | * the pool. */ | |
25d0479a | 769 | pr_err("Failed to allocate extra pages for large request\n"); |
8e7e7052 | 770 | ttm_put_pages(pages, count, flags, cstate); |
1403b1a3 PN |
771 | return r; |
772 | } | |
773 | } | |
774 | ||
1403b1a3 PN |
775 | return 0; |
776 | } | |
777 | ||
07458661 PN |
778 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, |
779 | char *name) | |
1403b1a3 PN |
780 | { |
781 | spin_lock_init(&pool->lock); | |
782 | pool->fill_lock = false; | |
783 | INIT_LIST_HEAD(&pool->list); | |
07458661 | 784 | pool->npages = pool->nfrees = 0; |
1403b1a3 | 785 | pool->gfp_flags = flags; |
07458661 | 786 | pool->name = name; |
1403b1a3 PN |
787 | } |
788 | ||
c96af79e | 789 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
1403b1a3 | 790 | { |
c96af79e | 791 | int ret; |
5870a4d9 FJ |
792 | |
793 | WARN_ON(_manager); | |
1403b1a3 | 794 | |
25d0479a | 795 | pr_info("Initializing pool allocator\n"); |
1403b1a3 | 796 | |
5870a4d9 | 797 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
1403b1a3 | 798 | |
5870a4d9 | 799 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
1403b1a3 | 800 | |
5870a4d9 | 801 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
1403b1a3 | 802 | |
5870a4d9 FJ |
803 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
804 | GFP_USER | GFP_DMA32, "wc dma"); | |
1403b1a3 | 805 | |
5870a4d9 FJ |
806 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
807 | GFP_USER | GFP_DMA32, "uc dma"); | |
1403b1a3 | 808 | |
5870a4d9 FJ |
809 | _manager->options.max_size = max_pages; |
810 | _manager->options.small = SMALL_ALLOCATION; | |
811 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | |
812 | ||
813 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | |
814 | &glob->kobj, "pool"); | |
c96af79e | 815 | if (unlikely(ret != 0)) { |
5870a4d9 FJ |
816 | kobject_put(&_manager->kobj); |
817 | _manager = NULL; | |
c96af79e PN |
818 | return ret; |
819 | } | |
820 | ||
5870a4d9 | 821 | ttm_pool_mm_shrink_init(_manager); |
1403b1a3 PN |
822 | |
823 | return 0; | |
824 | } | |
825 | ||
0e57a3cc | 826 | void ttm_page_alloc_fini(void) |
1403b1a3 PN |
827 | { |
828 | int i; | |
829 | ||
25d0479a | 830 | pr_info("Finalizing pool allocator\n"); |
5870a4d9 | 831 | ttm_pool_mm_shrink_fini(_manager); |
1403b1a3 PN |
832 | |
833 | for (i = 0; i < NUM_POOLS; ++i) | |
5870a4d9 | 834 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
c96af79e | 835 | |
5870a4d9 FJ |
836 | kobject_put(&_manager->kobj); |
837 | _manager = NULL; | |
1403b1a3 | 838 | } |
07458661 | 839 | |
b1e5f172 JG |
840 | int ttm_pool_populate(struct ttm_tt *ttm) |
841 | { | |
842 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; | |
843 | unsigned i; | |
844 | int ret; | |
845 | ||
846 | if (ttm->state != tt_unpopulated) | |
847 | return 0; | |
848 | ||
849 | for (i = 0; i < ttm->num_pages; ++i) { | |
8e7e7052 JG |
850 | ret = ttm_get_pages(&ttm->pages[i], 1, |
851 | ttm->page_flags, | |
852 | ttm->caching_state); | |
b1e5f172 JG |
853 | if (ret != 0) { |
854 | ttm_pool_unpopulate(ttm); | |
855 | return -ENOMEM; | |
856 | } | |
857 | ||
858 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], | |
859 | false, false); | |
860 | if (unlikely(ret != 0)) { | |
861 | ttm_pool_unpopulate(ttm); | |
862 | return -ENOMEM; | |
863 | } | |
864 | } | |
865 | ||
866 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
867 | ret = ttm_tt_swapin(ttm); | |
868 | if (unlikely(ret != 0)) { | |
869 | ttm_pool_unpopulate(ttm); | |
870 | return ret; | |
871 | } | |
872 | } | |
873 | ||
874 | ttm->state = tt_unbound; | |
875 | return 0; | |
876 | } | |
877 | EXPORT_SYMBOL(ttm_pool_populate); | |
878 | ||
879 | void ttm_pool_unpopulate(struct ttm_tt *ttm) | |
880 | { | |
881 | unsigned i; | |
882 | ||
883 | for (i = 0; i < ttm->num_pages; ++i) { | |
884 | if (ttm->pages[i]) { | |
885 | ttm_mem_global_free_page(ttm->glob->mem_glob, | |
886 | ttm->pages[i]); | |
887 | ttm_put_pages(&ttm->pages[i], 1, | |
888 | ttm->page_flags, | |
8e7e7052 | 889 | ttm->caching_state); |
b1e5f172 JG |
890 | } |
891 | } | |
892 | ttm->state = tt_unpopulated; | |
893 | } | |
894 | EXPORT_SYMBOL(ttm_pool_unpopulate); | |
895 | ||
07458661 PN |
896 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
897 | { | |
898 | struct ttm_page_pool *p; | |
899 | unsigned i; | |
900 | char *h[] = {"pool", "refills", "pages freed", "size"}; | |
5870a4d9 | 901 | if (!_manager) { |
07458661 PN |
902 | seq_printf(m, "No pool allocator running.\n"); |
903 | return 0; | |
904 | } | |
905 | seq_printf(m, "%6s %12s %13s %8s\n", | |
906 | h[0], h[1], h[2], h[3]); | |
907 | for (i = 0; i < NUM_POOLS; ++i) { | |
5870a4d9 | 908 | p = &_manager->pools[i]; |
07458661 PN |
909 | |
910 | seq_printf(m, "%6s %12ld %13ld %8d\n", | |
911 | p->name, p->nrefills, | |
912 | p->nfrees, p->npages); | |
913 | } | |
914 | return 0; | |
915 | } | |
916 | EXPORT_SYMBOL(ttm_page_alloc_debugfs); |