Commit | Line | Data |
---|---|---|
1403b1a3 PN |
1 | /* |
2 | * Copyright (c) Red Hat Inc. | |
3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the | |
12 | * next paragraph) shall be included in all copies or substantial portions | |
13 | * of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Dave Airlie <airlied@redhat.com> | |
24 | * Jerome Glisse <jglisse@redhat.com> | |
25 | * Pauli Nieminen <suokkos@gmail.com> | |
26 | */ | |
27 | ||
28 | /* simple list based uncached page pool | |
29 | * - Pool collects resently freed pages for reuse | |
30 | * - Use page->lru to keep a free list | |
31 | * - doesn't track currently in use pages | |
32 | */ | |
25d0479a JP |
33 | |
34 | #define pr_fmt(fmt) "[TTM] " fmt | |
35 | ||
1403b1a3 PN |
36 | #include <linux/list.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/mm_types.h> | |
07458661 | 40 | #include <linux/module.h> |
1403b1a3 | 41 | #include <linux/mm.h> |
4cdc840a | 42 | #include <linux/seq_file.h> /* for seq_printf */ |
2125b8a4 | 43 | #include <linux/slab.h> |
f9820a46 | 44 | #include <linux/dma-mapping.h> |
1403b1a3 | 45 | |
60063497 | 46 | #include <linux/atomic.h> |
1403b1a3 | 47 | |
760285e7 DH |
48 | #include <drm/ttm/ttm_bo_driver.h> |
49 | #include <drm/ttm/ttm_page_alloc.h> | |
1403b1a3 | 50 | |
e6bf6e57 | 51 | #if IS_ENABLED(CONFIG_AGP) |
d6678651 LT |
52 | #include <asm/agp.h> |
53 | #endif | |
ed3ba079 LA |
54 | #ifdef CONFIG_X86 |
55 | #include <asm/set_memory.h> | |
56 | #endif | |
1403b1a3 PN |
57 | |
58 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | |
59 | #define SMALL_ALLOCATION 16 | |
60 | #define FREE_ALL_PAGES (~0U) | |
61 | /* times are in msecs */ | |
62 | #define PAGE_FREE_INTERVAL 1000 | |
63 | ||
64 | /** | |
65 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. | |
66 | * | |
67 | * @lock: Protects the shared pool from concurrnet access. Must be used with | |
68 | * irqsave/irqrestore variants because pool allocator maybe called from | |
69 | * delayed work. | |
70 | * @fill_lock: Prevent concurrent calls to fill. | |
71 | * @list: Pool of free uc/wc pages for fast reuse. | |
72 | * @gfp_flags: Flags to pass for alloc_page. | |
73 | * @npages: Number of pages in pool. | |
74 | */ | |
75 | struct ttm_page_pool { | |
76 | spinlock_t lock; | |
77 | bool fill_lock; | |
78 | struct list_head list; | |
0e57a3cc | 79 | gfp_t gfp_flags; |
1403b1a3 | 80 | unsigned npages; |
07458661 PN |
81 | char *name; |
82 | unsigned long nfrees; | |
83 | unsigned long nrefills; | |
750a2503 | 84 | unsigned int order; |
1403b1a3 PN |
85 | }; |
86 | ||
c96af79e PN |
87 | /** |
88 | * Limits for the pool. They are handled without locks because only place where | |
89 | * they may change is in sysfs store. They won't have immediate effect anyway | |
4abe4389 | 90 | * so forcing serialization to access them is pointless. |
c96af79e PN |
91 | */ |
92 | ||
1403b1a3 PN |
93 | struct ttm_pool_opts { |
94 | unsigned alloc_size; | |
95 | unsigned max_size; | |
96 | unsigned small; | |
97 | }; | |
98 | ||
6ed4e2e6 | 99 | #define NUM_POOLS 6 |
1403b1a3 PN |
100 | |
101 | /** | |
102 | * struct ttm_pool_manager - Holds memory pools for fst allocation | |
103 | * | |
104 | * Manager is read only object for pool code so it doesn't need locking. | |
105 | * | |
106 | * @free_interval: minimum number of jiffies between freeing pages from pool. | |
107 | * @page_alloc_inited: reference counting for pool allocation. | |
108 | * @work: Work that is used to shrink the pool. Work is only run when there is | |
109 | * some pages to free. | |
110 | * @small_allocation: Limit in number of pages what is small allocation. | |
111 | * | |
112 | * @pools: All pool objects in use. | |
113 | **/ | |
114 | struct ttm_pool_manager { | |
c96af79e | 115 | struct kobject kobj; |
1403b1a3 | 116 | struct shrinker mm_shrink; |
1403b1a3 PN |
117 | struct ttm_pool_opts options; |
118 | ||
119 | union { | |
120 | struct ttm_page_pool pools[NUM_POOLS]; | |
121 | struct { | |
122 | struct ttm_page_pool wc_pool; | |
123 | struct ttm_page_pool uc_pool; | |
124 | struct ttm_page_pool wc_pool_dma32; | |
125 | struct ttm_page_pool uc_pool_dma32; | |
6ed4e2e6 CK |
126 | struct ttm_page_pool wc_pool_huge; |
127 | struct ttm_page_pool uc_pool_huge; | |
1403b1a3 PN |
128 | } ; |
129 | }; | |
130 | }; | |
131 | ||
c96af79e PN |
132 | static struct attribute ttm_page_pool_max = { |
133 | .name = "pool_max_size", | |
134 | .mode = S_IRUGO | S_IWUSR | |
135 | }; | |
136 | static struct attribute ttm_page_pool_small = { | |
137 | .name = "pool_small_allocation", | |
138 | .mode = S_IRUGO | S_IWUSR | |
139 | }; | |
140 | static struct attribute ttm_page_pool_alloc_size = { | |
141 | .name = "pool_allocation_size", | |
142 | .mode = S_IRUGO | S_IWUSR | |
143 | }; | |
144 | ||
145 | static struct attribute *ttm_pool_attrs[] = { | |
146 | &ttm_page_pool_max, | |
147 | &ttm_page_pool_small, | |
148 | &ttm_page_pool_alloc_size, | |
149 | NULL | |
150 | }; | |
151 | ||
152 | static void ttm_pool_kobj_release(struct kobject *kobj) | |
153 | { | |
154 | struct ttm_pool_manager *m = | |
155 | container_of(kobj, struct ttm_pool_manager, kobj); | |
5870a4d9 | 156 | kfree(m); |
c96af79e PN |
157 | } |
158 | ||
159 | static ssize_t ttm_pool_store(struct kobject *kobj, | |
160 | struct attribute *attr, const char *buffer, size_t size) | |
161 | { | |
162 | struct ttm_pool_manager *m = | |
163 | container_of(kobj, struct ttm_pool_manager, kobj); | |
164 | int chars; | |
165 | unsigned val; | |
166 | chars = sscanf(buffer, "%u", &val); | |
167 | if (chars == 0) | |
168 | return size; | |
169 | ||
170 | /* Convert kb to number of pages */ | |
171 | val = val / (PAGE_SIZE >> 10); | |
172 | ||
173 | if (attr == &ttm_page_pool_max) | |
174 | m->options.max_size = val; | |
175 | else if (attr == &ttm_page_pool_small) | |
176 | m->options.small = val; | |
177 | else if (attr == &ttm_page_pool_alloc_size) { | |
178 | if (val > NUM_PAGES_TO_ALLOC*8) { | |
25d0479a | 179 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
4abe4389 TH |
180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
181 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
182 | return size; |
183 | } else if (val > NUM_PAGES_TO_ALLOC) { | |
25d0479a JP |
184 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
185 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
186 | } |
187 | m->options.alloc_size = val; | |
188 | } | |
189 | ||
190 | return size; | |
191 | } | |
192 | ||
193 | static ssize_t ttm_pool_show(struct kobject *kobj, | |
194 | struct attribute *attr, char *buffer) | |
195 | { | |
196 | struct ttm_pool_manager *m = | |
197 | container_of(kobj, struct ttm_pool_manager, kobj); | |
198 | unsigned val = 0; | |
199 | ||
200 | if (attr == &ttm_page_pool_max) | |
201 | val = m->options.max_size; | |
202 | else if (attr == &ttm_page_pool_small) | |
203 | val = m->options.small; | |
204 | else if (attr == &ttm_page_pool_alloc_size) | |
205 | val = m->options.alloc_size; | |
206 | ||
207 | val = val * (PAGE_SIZE >> 10); | |
208 | ||
209 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | |
210 | } | |
211 | ||
212 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | |
213 | .show = &ttm_pool_show, | |
214 | .store = &ttm_pool_store, | |
215 | }; | |
216 | ||
217 | static struct kobj_type ttm_pool_kobj_type = { | |
218 | .release = &ttm_pool_kobj_release, | |
219 | .sysfs_ops = &ttm_pool_sysfs_ops, | |
220 | .default_attrs = ttm_pool_attrs, | |
221 | }; | |
222 | ||
5870a4d9 | 223 | static struct ttm_pool_manager *_manager; |
1403b1a3 | 224 | |
975efdb1 | 225 | #ifndef CONFIG_X86 |
154683dd RH |
226 | static int set_pages_wb(struct page *page, int numpages) |
227 | { | |
228 | #if IS_ENABLED(CONFIG_AGP) | |
229 | int i; | |
230 | ||
231 | for (i = 0; i < numpages; i++) | |
232 | unmap_page_from_agp(page++); | |
233 | #endif | |
234 | return 0; | |
235 | } | |
236 | ||
1403b1a3 PN |
237 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
238 | { | |
e6bf6e57 | 239 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
240 | int i; |
241 | ||
242 | for (i = 0; i < addrinarray; i++) | |
243 | unmap_page_from_agp(pages[i]); | |
244 | #endif | |
245 | return 0; | |
246 | } | |
247 | ||
248 | static int set_pages_array_wc(struct page **pages, int addrinarray) | |
249 | { | |
e6bf6e57 | 250 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
251 | int i; |
252 | ||
253 | for (i = 0; i < addrinarray; i++) | |
254 | map_page_into_agp(pages[i]); | |
255 | #endif | |
256 | return 0; | |
257 | } | |
258 | ||
259 | static int set_pages_array_uc(struct page **pages, int addrinarray) | |
260 | { | |
e6bf6e57 | 261 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
262 | int i; |
263 | ||
264 | for (i = 0; i < addrinarray; i++) | |
265 | map_page_into_agp(pages[i]); | |
266 | #endif | |
267 | return 0; | |
268 | } | |
269 | #endif | |
270 | ||
271 | /** | |
272 | * Select the right pool or requested caching state and ttm flags. */ | |
6ed4e2e6 CK |
273 | static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, |
274 | enum ttm_caching_state cstate) | |
1403b1a3 PN |
275 | { |
276 | int pool_index; | |
277 | ||
278 | if (cstate == tt_cached) | |
279 | return NULL; | |
280 | ||
281 | if (cstate == tt_wc) | |
282 | pool_index = 0x0; | |
283 | else | |
284 | pool_index = 0x1; | |
285 | ||
6ed4e2e6 CK |
286 | if (flags & TTM_PAGE_FLAG_DMA32) { |
287 | if (huge) | |
288 | return NULL; | |
1403b1a3 PN |
289 | pool_index |= 0x2; |
290 | ||
6ed4e2e6 CK |
291 | } else if (huge) { |
292 | pool_index |= 0x4; | |
293 | } | |
294 | ||
5870a4d9 | 295 | return &_manager->pools[pool_index]; |
1403b1a3 PN |
296 | } |
297 | ||
298 | /* set memory back to wb and free the pages. */ | |
444f8ef3 RH |
299 | static void ttm_pages_put(struct page *pages[], unsigned npages, |
300 | unsigned int order) | |
1403b1a3 | 301 | { |
444f8ef3 RH |
302 | unsigned int i, pages_nr = (1 << order); |
303 | ||
304 | if (order == 0) { | |
305 | if (set_pages_array_wb(pages, npages)) | |
306 | pr_err("Failed to set %d pages to wb!\n", npages); | |
307 | } | |
308 | ||
309 | for (i = 0; i < npages; ++i) { | |
310 | if (order > 0) { | |
311 | if (set_pages_wb(pages[i], pages_nr)) | |
312 | pr_err("Failed to set %d pages to wb!\n", pages_nr); | |
313 | } | |
314 | __free_pages(pages[i], order); | |
315 | } | |
1403b1a3 PN |
316 | } |
317 | ||
318 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, | |
319 | unsigned freed_pages) | |
320 | { | |
321 | pool->npages -= freed_pages; | |
07458661 | 322 | pool->nfrees += freed_pages; |
1403b1a3 PN |
323 | } |
324 | ||
325 | /** | |
326 | * Free pages from pool. | |
327 | * | |
328 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | |
329 | * number of pages in one go. | |
330 | * | |
331 | * @pool: to free the pages from | |
332 | * @free_all: If set to true will free all pages in pool | |
881fdaa5 | 333 | * @use_static: Safe to use static buffer |
1403b1a3 | 334 | **/ |
a91576d7 | 335 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, |
881fdaa5 | 336 | bool use_static) |
1403b1a3 | 337 | { |
881fdaa5 | 338 | static struct page *static_buf[NUM_PAGES_TO_ALLOC]; |
1403b1a3 PN |
339 | unsigned long irq_flags; |
340 | struct page *p; | |
341 | struct page **pages_to_free; | |
342 | unsigned freed_pages = 0, | |
343 | npages_to_free = nr_free; | |
344 | ||
345 | if (NUM_PAGES_TO_ALLOC < nr_free) | |
346 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
347 | ||
881fdaa5 TH |
348 | if (use_static) |
349 | pages_to_free = static_buf; | |
350 | else | |
6da2ec56 KC |
351 | pages_to_free = kmalloc_array(npages_to_free, |
352 | sizeof(struct page *), | |
353 | GFP_KERNEL); | |
1403b1a3 | 354 | if (!pages_to_free) { |
767601d1 | 355 | pr_debug("Failed to allocate memory for pool free operation\n"); |
1403b1a3 PN |
356 | return 0; |
357 | } | |
358 | ||
359 | restart: | |
360 | spin_lock_irqsave(&pool->lock, irq_flags); | |
361 | ||
362 | list_for_each_entry_reverse(p, &pool->list, lru) { | |
363 | if (freed_pages >= npages_to_free) | |
364 | break; | |
365 | ||
366 | pages_to_free[freed_pages++] = p; | |
367 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | |
368 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | |
369 | /* remove range of pages from the pool */ | |
370 | __list_del(p->lru.prev, &pool->list); | |
371 | ||
372 | ttm_pool_update_free_locked(pool, freed_pages); | |
373 | /** | |
374 | * Because changing page caching is costly | |
375 | * we unlock the pool to prevent stalling. | |
376 | */ | |
377 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
378 | ||
444f8ef3 | 379 | ttm_pages_put(pages_to_free, freed_pages, pool->order); |
1403b1a3 PN |
380 | if (likely(nr_free != FREE_ALL_PAGES)) |
381 | nr_free -= freed_pages; | |
382 | ||
383 | if (NUM_PAGES_TO_ALLOC >= nr_free) | |
384 | npages_to_free = nr_free; | |
385 | else | |
386 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
387 | ||
388 | freed_pages = 0; | |
389 | ||
390 | /* free all so restart the processing */ | |
391 | if (nr_free) | |
392 | goto restart; | |
393 | ||
0d74f86f | 394 | /* Not allowed to fall through or break because |
1403b1a3 PN |
395 | * following context is inside spinlock while we are |
396 | * outside here. | |
397 | */ | |
398 | goto out; | |
399 | ||
400 | } | |
401 | } | |
402 | ||
1403b1a3 PN |
403 | /* remove range of pages from the pool */ |
404 | if (freed_pages) { | |
405 | __list_del(&p->lru, &pool->list); | |
406 | ||
407 | ttm_pool_update_free_locked(pool, freed_pages); | |
408 | nr_free -= freed_pages; | |
409 | } | |
410 | ||
411 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
412 | ||
413 | if (freed_pages) | |
444f8ef3 | 414 | ttm_pages_put(pages_to_free, freed_pages, pool->order); |
1403b1a3 | 415 | out: |
881fdaa5 TH |
416 | if (pages_to_free != static_buf) |
417 | kfree(pages_to_free); | |
1403b1a3 PN |
418 | return nr_free; |
419 | } | |
420 | ||
1403b1a3 | 421 | /** |
4abe4389 | 422 | * Callback for mm to request pool to reduce number of page held. |
7dc19d5a DC |
423 | * |
424 | * XXX: (dchinner) Deadlock warning! | |
425 | * | |
7dc19d5a | 426 | * This code is crying out for a shrinker per pool.... |
1403b1a3 | 427 | */ |
7dc19d5a DC |
428 | static unsigned long |
429 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1403b1a3 | 430 | { |
71336e01 TH |
431 | static DEFINE_MUTEX(lock); |
432 | static unsigned start_pool; | |
1403b1a3 | 433 | unsigned i; |
71336e01 | 434 | unsigned pool_offset; |
1403b1a3 | 435 | struct ttm_page_pool *pool; |
1495f230 | 436 | int shrink_pages = sc->nr_to_scan; |
7dc19d5a | 437 | unsigned long freed = 0; |
750a2503 | 438 | unsigned int nr_free_pool; |
1403b1a3 | 439 | |
71336e01 TH |
440 | if (!mutex_trylock(&lock)) |
441 | return SHRINK_STOP; | |
442 | pool_offset = ++start_pool % NUM_POOLS; | |
1403b1a3 PN |
443 | /* select start pool in round robin fashion */ |
444 | for (i = 0; i < NUM_POOLS; ++i) { | |
445 | unsigned nr_free = shrink_pages; | |
1bfcbad1 RH |
446 | unsigned page_nr; |
447 | ||
1403b1a3 PN |
448 | if (shrink_pages == 0) |
449 | break; | |
750a2503 | 450 | |
5870a4d9 | 451 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
1bfcbad1 | 452 | page_nr = (1 << pool->order); |
881fdaa5 | 453 | /* OK to use static buffer since global mutex is held. */ |
1bfcbad1 | 454 | nr_free_pool = roundup(nr_free, page_nr) >> pool->order; |
750a2503 | 455 | shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); |
1bfcbad1 RH |
456 | freed += (nr_free_pool - shrink_pages) << pool->order; |
457 | if (freed >= sc->nr_to_scan) | |
458 | break; | |
0aaa59f5 | 459 | shrink_pages <<= pool->order; |
1403b1a3 | 460 | } |
71336e01 | 461 | mutex_unlock(&lock); |
7dc19d5a DC |
462 | return freed; |
463 | } | |
464 | ||
465 | ||
466 | static unsigned long | |
467 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
468 | { | |
469 | unsigned i; | |
470 | unsigned long count = 0; | |
750a2503 | 471 | struct ttm_page_pool *pool; |
7dc19d5a | 472 | |
750a2503 RH |
473 | for (i = 0; i < NUM_POOLS; ++i) { |
474 | pool = &_manager->pools[i]; | |
475 | count += (pool->npages << pool->order); | |
476 | } | |
7dc19d5a DC |
477 | |
478 | return count; | |
1403b1a3 PN |
479 | } |
480 | ||
e2721595 | 481 | static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) |
1403b1a3 | 482 | { |
7dc19d5a DC |
483 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
484 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; | |
1403b1a3 | 485 | manager->mm_shrink.seeks = 1; |
e2721595 | 486 | return register_shrinker(&manager->mm_shrink); |
1403b1a3 PN |
487 | } |
488 | ||
489 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | |
490 | { | |
491 | unregister_shrinker(&manager->mm_shrink); | |
492 | } | |
493 | ||
494 | static int ttm_set_pages_caching(struct page **pages, | |
495 | enum ttm_caching_state cstate, unsigned cpages) | |
496 | { | |
497 | int r = 0; | |
498 | /* Set page caching */ | |
499 | switch (cstate) { | |
500 | case tt_uncached: | |
501 | r = set_pages_array_uc(pages, cpages); | |
502 | if (r) | |
25d0479a | 503 | pr_err("Failed to set %d pages to uc!\n", cpages); |
1403b1a3 PN |
504 | break; |
505 | case tt_wc: | |
506 | r = set_pages_array_wc(pages, cpages); | |
507 | if (r) | |
25d0479a | 508 | pr_err("Failed to set %d pages to wc!\n", cpages); |
1403b1a3 PN |
509 | break; |
510 | default: | |
511 | break; | |
512 | } | |
513 | return r; | |
514 | } | |
515 | ||
516 | /** | |
517 | * Free pages the pages that failed to change the caching state. If there is | |
518 | * any pages that have changed their caching state already put them to the | |
519 | * pool. | |
520 | */ | |
521 | static void ttm_handle_caching_state_failure(struct list_head *pages, | |
522 | int ttm_flags, enum ttm_caching_state cstate, | |
523 | struct page **failed_pages, unsigned cpages) | |
524 | { | |
525 | unsigned i; | |
4abe4389 | 526 | /* Failed pages have to be freed */ |
1403b1a3 PN |
527 | for (i = 0; i < cpages; ++i) { |
528 | list_del(&failed_pages[i]->lru); | |
529 | __free_page(failed_pages[i]); | |
530 | } | |
531 | } | |
532 | ||
533 | /** | |
534 | * Allocate new pages with correct caching. | |
535 | * | |
536 | * This function is reentrant if caller updates count depending on number of | |
537 | * pages returned in pages array. | |
538 | */ | |
0e57a3cc | 539 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
6ed4e2e6 CK |
540 | int ttm_flags, enum ttm_caching_state cstate, |
541 | unsigned count, unsigned order) | |
1403b1a3 PN |
542 | { |
543 | struct page **caching_array; | |
544 | struct page *p; | |
545 | int r = 0; | |
6ed4e2e6 CK |
546 | unsigned i, j, cpages; |
547 | unsigned npages = 1 << order; | |
a8d25a86 | 548 | unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); |
1403b1a3 PN |
549 | |
550 | /* allocate array for page caching change */ | |
6da2ec56 KC |
551 | caching_array = kmalloc_array(max_cpages, sizeof(struct page *), |
552 | GFP_KERNEL); | |
1403b1a3 PN |
553 | |
554 | if (!caching_array) { | |
767601d1 | 555 | pr_debug("Unable to allocate table for new pages\n"); |
1403b1a3 PN |
556 | return -ENOMEM; |
557 | } | |
558 | ||
559 | for (i = 0, cpages = 0; i < count; ++i) { | |
6ed4e2e6 | 560 | p = alloc_pages(gfp_flags, order); |
1403b1a3 PN |
561 | |
562 | if (!p) { | |
767601d1 | 563 | pr_debug("Unable to get page %u\n", i); |
1403b1a3 PN |
564 | |
565 | /* store already allocated pages in the pool after | |
566 | * setting the caching state */ | |
567 | if (cpages) { | |
4abe4389 TH |
568 | r = ttm_set_pages_caching(caching_array, |
569 | cstate, cpages); | |
1403b1a3 PN |
570 | if (r) |
571 | ttm_handle_caching_state_failure(pages, | |
572 | ttm_flags, cstate, | |
573 | caching_array, cpages); | |
574 | } | |
575 | r = -ENOMEM; | |
576 | goto out; | |
577 | } | |
578 | ||
6ed4e2e6 CK |
579 | list_add(&p->lru, pages); |
580 | ||
1403b1a3 PN |
581 | #ifdef CONFIG_HIGHMEM |
582 | /* gfp flags of highmem page should never be dma32 so we | |
583 | * we should be fine in such case | |
584 | */ | |
6ed4e2e6 CK |
585 | if (PageHighMem(p)) |
586 | continue; | |
587 | ||
1403b1a3 | 588 | #endif |
6ed4e2e6 CK |
589 | for (j = 0; j < npages; ++j) { |
590 | caching_array[cpages++] = p++; | |
1403b1a3 PN |
591 | if (cpages == max_cpages) { |
592 | ||
593 | r = ttm_set_pages_caching(caching_array, | |
594 | cstate, cpages); | |
595 | if (r) { | |
596 | ttm_handle_caching_state_failure(pages, | |
597 | ttm_flags, cstate, | |
598 | caching_array, cpages); | |
599 | goto out; | |
600 | } | |
601 | cpages = 0; | |
602 | } | |
603 | } | |
1403b1a3 PN |
604 | } |
605 | ||
606 | if (cpages) { | |
607 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | |
608 | if (r) | |
609 | ttm_handle_caching_state_failure(pages, | |
610 | ttm_flags, cstate, | |
611 | caching_array, cpages); | |
612 | } | |
613 | out: | |
614 | kfree(caching_array); | |
615 | ||
616 | return r; | |
617 | } | |
618 | ||
619 | /** | |
0d74f86f | 620 | * Fill the given pool if there aren't enough pages and the requested number of |
1403b1a3 PN |
621 | * pages is small. |
622 | */ | |
6ed4e2e6 CK |
623 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, |
624 | enum ttm_caching_state cstate, | |
625 | unsigned count, unsigned long *irq_flags) | |
1403b1a3 PN |
626 | { |
627 | struct page *p; | |
628 | int r; | |
629 | unsigned cpages = 0; | |
630 | /** | |
631 | * Only allow one pool fill operation at a time. | |
632 | * If pool doesn't have enough pages for the allocation new pages are | |
633 | * allocated from outside of pool. | |
634 | */ | |
635 | if (pool->fill_lock) | |
636 | return; | |
637 | ||
638 | pool->fill_lock = true; | |
639 | ||
0d74f86f KRW |
640 | /* If allocation request is small and there are not enough |
641 | * pages in a pool we fill the pool up first. */ | |
5870a4d9 | 642 | if (count < _manager->options.small |
1403b1a3 PN |
643 | && count > pool->npages) { |
644 | struct list_head new_pages; | |
5870a4d9 | 645 | unsigned alloc_size = _manager->options.alloc_size; |
1403b1a3 PN |
646 | |
647 | /** | |
648 | * Can't change page caching if in irqsave context. We have to | |
649 | * drop the pool->lock. | |
650 | */ | |
651 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | |
652 | ||
653 | INIT_LIST_HEAD(&new_pages); | |
654 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, | |
6ed4e2e6 | 655 | cstate, alloc_size, 0); |
1403b1a3 PN |
656 | spin_lock_irqsave(&pool->lock, *irq_flags); |
657 | ||
658 | if (!r) { | |
659 | list_splice(&new_pages, &pool->list); | |
07458661 | 660 | ++pool->nrefills; |
1403b1a3 PN |
661 | pool->npages += alloc_size; |
662 | } else { | |
767601d1 | 663 | pr_debug("Failed to fill pool (%p)\n", pool); |
1403b1a3 | 664 | /* If we have any pages left put them to the pool. */ |
9afae271 | 665 | list_for_each_entry(p, &new_pages, lru) { |
1403b1a3 PN |
666 | ++cpages; |
667 | } | |
668 | list_splice(&new_pages, &pool->list); | |
669 | pool->npages += cpages; | |
670 | } | |
671 | ||
672 | } | |
673 | pool->fill_lock = false; | |
674 | } | |
675 | ||
676 | /** | |
8593e9b8 | 677 | * Allocate pages from the pool and put them on the return list. |
1403b1a3 | 678 | * |
8593e9b8 | 679 | * @return zero for success or negative error code. |
1403b1a3 | 680 | */ |
8593e9b8 CK |
681 | static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
682 | struct list_head *pages, | |
683 | int ttm_flags, | |
684 | enum ttm_caching_state cstate, | |
6ed4e2e6 | 685 | unsigned count, unsigned order) |
1403b1a3 PN |
686 | { |
687 | unsigned long irq_flags; | |
688 | struct list_head *p; | |
689 | unsigned i; | |
8593e9b8 | 690 | int r = 0; |
1403b1a3 PN |
691 | |
692 | spin_lock_irqsave(&pool->lock, irq_flags); | |
6ed4e2e6 CK |
693 | if (!order) |
694 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, | |
695 | &irq_flags); | |
1403b1a3 PN |
696 | |
697 | if (count >= pool->npages) { | |
698 | /* take all pages from the pool */ | |
699 | list_splice_init(&pool->list, pages); | |
700 | count -= pool->npages; | |
701 | pool->npages = 0; | |
702 | goto out; | |
703 | } | |
704 | /* find the last pages to include for requested number of pages. Split | |
0d74f86f | 705 | * pool to begin and halve it to reduce search space. */ |
1403b1a3 PN |
706 | if (count <= pool->npages/2) { |
707 | i = 0; | |
708 | list_for_each(p, &pool->list) { | |
709 | if (++i == count) | |
710 | break; | |
711 | } | |
712 | } else { | |
713 | i = pool->npages + 1; | |
714 | list_for_each_prev(p, &pool->list) { | |
715 | if (--i == count) | |
716 | break; | |
717 | } | |
718 | } | |
0d74f86f | 719 | /* Cut 'count' number of pages from the pool */ |
1403b1a3 PN |
720 | list_cut_position(pages, &pool->list, p); |
721 | pool->npages -= count; | |
722 | count = 0; | |
723 | out: | |
724 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
8593e9b8 CK |
725 | |
726 | /* clear the pages coming from the pool if requested */ | |
727 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | |
728 | struct page *page; | |
729 | ||
730 | list_for_each_entry(page, pages, lru) { | |
731 | if (PageHighMem(page)) | |
732 | clear_highpage(page); | |
733 | else | |
734 | clear_page(page_address(page)); | |
735 | } | |
736 | } | |
737 | ||
738 | /* If pool didn't have enough pages allocate new one. */ | |
739 | if (count) { | |
740 | gfp_t gfp_flags = pool->gfp_flags; | |
741 | ||
742 | /* set zero flag for page allocation if required */ | |
743 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
744 | gfp_flags |= __GFP_ZERO; | |
745 | ||
cb5f1a52 AG |
746 | if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY) |
747 | gfp_flags |= __GFP_RETRY_MAYFAIL; | |
748 | ||
8593e9b8 CK |
749 | /* ttm_alloc_new_pages doesn't reference pool so we can run |
750 | * multiple requests in parallel. | |
751 | **/ | |
752 | r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, | |
6ed4e2e6 | 753 | count, order); |
8593e9b8 CK |
754 | } |
755 | ||
756 | return r; | |
1403b1a3 PN |
757 | } |
758 | ||
8e7e7052 JG |
759 | /* Put all pages in pages list to correct pool to wait for reuse */ |
760 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |
761 | enum ttm_caching_state cstate) | |
762 | { | |
6ed4e2e6 | 763 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 764 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 765 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 766 | #endif |
8e7e7052 | 767 | unsigned long irq_flags; |
8e7e7052 JG |
768 | unsigned i; |
769 | ||
770 | if (pool == NULL) { | |
771 | /* No pool for this memory type so free the pages */ | |
0284f1ea CK |
772 | i = 0; |
773 | while (i < npages) { | |
5c42c64f CK |
774 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
775 | struct page *p = pages[i]; | |
776 | #endif | |
777 | unsigned order = 0, j; | |
0284f1ea CK |
778 | |
779 | if (!pages[i]) { | |
780 | ++i; | |
781 | continue; | |
782 | } | |
783 | ||
5c42c64f | 784 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
33d22c2e DA |
785 | if (!(flags & TTM_PAGE_FLAG_DMA32)) { |
786 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
787 | if (p++ != pages[i + j]) | |
788 | break; | |
5c42c64f | 789 | |
33d22c2e DA |
790 | if (j == HPAGE_PMD_NR) |
791 | order = HPAGE_PMD_ORDER; | |
792 | } | |
5c42c64f CK |
793 | #endif |
794 | ||
0284f1ea CK |
795 | if (page_count(pages[i]) != 1) |
796 | pr_err("Erroneous page count. Leaking pages.\n"); | |
0284f1ea CK |
797 | __free_pages(pages[i], order); |
798 | ||
5c42c64f CK |
799 | j = 1 << order; |
800 | while (j) { | |
0284f1ea | 801 | pages[i++] = NULL; |
5c42c64f | 802 | --j; |
8e7e7052 JG |
803 | } |
804 | } | |
805 | return; | |
806 | } | |
807 | ||
6ed4e2e6 CK |
808 | i = 0; |
809 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
810 | if (huge) { | |
811 | unsigned max_size, n2free; | |
812 | ||
813 | spin_lock_irqsave(&huge->lock, irq_flags); | |
814 | while (i < npages) { | |
815 | struct page *p = pages[i]; | |
816 | unsigned j; | |
817 | ||
818 | if (!p) | |
819 | break; | |
820 | ||
821 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
822 | if (p++ != pages[i + j]) | |
823 | break; | |
824 | ||
825 | if (j != HPAGE_PMD_NR) | |
826 | break; | |
827 | ||
828 | list_add_tail(&pages[i]->lru, &huge->list); | |
829 | ||
830 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
831 | pages[i++] = NULL; | |
832 | huge->npages++; | |
833 | } | |
834 | ||
835 | /* Check that we don't go over the pool limit */ | |
836 | max_size = _manager->options.max_size; | |
837 | max_size /= HPAGE_PMD_NR; | |
838 | if (huge->npages > max_size) | |
839 | n2free = huge->npages - max_size; | |
840 | else | |
841 | n2free = 0; | |
842 | spin_unlock_irqrestore(&huge->lock, irq_flags); | |
843 | if (n2free) | |
844 | ttm_page_pool_free(huge, n2free, false); | |
845 | } | |
846 | #endif | |
847 | ||
8e7e7052 | 848 | spin_lock_irqsave(&pool->lock, irq_flags); |
6ed4e2e6 | 849 | while (i < npages) { |
8e7e7052 JG |
850 | if (pages[i]) { |
851 | if (page_count(pages[i]) != 1) | |
25d0479a | 852 | pr_err("Erroneous page count. Leaking pages.\n"); |
8e7e7052 JG |
853 | list_add_tail(&pages[i]->lru, &pool->list); |
854 | pages[i] = NULL; | |
855 | pool->npages++; | |
856 | } | |
6ed4e2e6 | 857 | ++i; |
8e7e7052 JG |
858 | } |
859 | /* Check that we don't go over the pool limit */ | |
860 | npages = 0; | |
861 | if (pool->npages > _manager->options.max_size) { | |
862 | npages = pool->npages - _manager->options.max_size; | |
863 | /* free at least NUM_PAGES_TO_ALLOC number of pages | |
864 | * to reduce calls to set_memory_wb */ | |
865 | if (npages < NUM_PAGES_TO_ALLOC) | |
866 | npages = NUM_PAGES_TO_ALLOC; | |
867 | } | |
868 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
869 | if (npages) | |
881fdaa5 | 870 | ttm_page_pool_free(pool, npages, false); |
8e7e7052 JG |
871 | } |
872 | ||
1403b1a3 PN |
873 | /* |
874 | * On success pages list will hold count number of correctly | |
875 | * cached pages. | |
876 | */ | |
8e7e7052 JG |
877 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
878 | enum ttm_caching_state cstate) | |
1403b1a3 | 879 | { |
6ed4e2e6 | 880 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 881 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 882 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 883 | #endif |
822c4d9a | 884 | struct list_head plist; |
1403b1a3 | 885 | struct page *p = NULL; |
fdb1a223 | 886 | unsigned count, first; |
1403b1a3 PN |
887 | int r; |
888 | ||
1403b1a3 PN |
889 | /* No pool for cached pages */ |
890 | if (pool == NULL) { | |
8593e9b8 | 891 | gfp_t gfp_flags = GFP_USER; |
7d0a4282 TSD |
892 | unsigned i; |
893 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
894 | unsigned j; | |
895 | #endif | |
0284f1ea | 896 | |
8593e9b8 CK |
897 | /* set zero flag for page allocation if required */ |
898 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
899 | gfp_flags |= __GFP_ZERO; | |
900 | ||
cb5f1a52 AG |
901 | if (flags & TTM_PAGE_FLAG_NO_RETRY) |
902 | gfp_flags |= __GFP_RETRY_MAYFAIL; | |
903 | ||
1403b1a3 PN |
904 | if (flags & TTM_PAGE_FLAG_DMA32) |
905 | gfp_flags |= GFP_DMA32; | |
906 | else | |
e8613c0e | 907 | gfp_flags |= GFP_HIGHUSER; |
1403b1a3 | 908 | |
0284f1ea CK |
909 | i = 0; |
910 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
33d22c2e DA |
911 | if (!(gfp_flags & GFP_DMA32)) { |
912 | while (npages >= HPAGE_PMD_NR) { | |
913 | gfp_t huge_flags = gfp_flags; | |
0284f1ea | 914 | |
da291320 MD |
915 | huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
916 | __GFP_KSWAPD_RECLAIM; | |
33d22c2e DA |
917 | huge_flags &= ~__GFP_MOVABLE; |
918 | huge_flags &= ~__GFP_COMP; | |
919 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); | |
920 | if (!p) | |
921 | break; | |
0284f1ea | 922 | |
33d22c2e DA |
923 | for (j = 0; j < HPAGE_PMD_NR; ++j) |
924 | pages[i++] = p++; | |
0284f1ea | 925 | |
33d22c2e DA |
926 | npages -= HPAGE_PMD_NR; |
927 | } | |
0284f1ea CK |
928 | } |
929 | #endif | |
930 | ||
fdb1a223 | 931 | first = i; |
0284f1ea | 932 | while (npages) { |
d87dfdbf | 933 | p = alloc_page(gfp_flags); |
1403b1a3 | 934 | if (!p) { |
767601d1 | 935 | pr_debug("Unable to allocate page\n"); |
1403b1a3 PN |
936 | return -ENOMEM; |
937 | } | |
d87dfdbf | 938 | |
fdb1a223 CK |
939 | /* Swap the pages if we detect consecutive order */ |
940 | if (i > first && pages[i - 1] == p - 1) | |
941 | swap(p, pages[i - 1]); | |
942 | ||
0284f1ea CK |
943 | pages[i++] = p; |
944 | --npages; | |
1403b1a3 PN |
945 | } |
946 | return 0; | |
947 | } | |
948 | ||
6ed4e2e6 CK |
949 | count = 0; |
950 | ||
951 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
952 | if (huge && npages >= HPAGE_PMD_NR) { | |
953 | INIT_LIST_HEAD(&plist); | |
954 | ttm_page_pool_get_pages(huge, &plist, flags, cstate, | |
955 | npages / HPAGE_PMD_NR, | |
956 | HPAGE_PMD_ORDER); | |
957 | ||
958 | list_for_each_entry(p, &plist, lru) { | |
959 | unsigned j; | |
960 | ||
961 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
962 | pages[count++] = &p[j]; | |
963 | } | |
964 | } | |
965 | #endif | |
966 | ||
822c4d9a | 967 | INIT_LIST_HEAD(&plist); |
6ed4e2e6 CK |
968 | r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, |
969 | npages - count, 0); | |
8593e9b8 | 970 | |
ae937fe1 CK |
971 | first = count; |
972 | list_for_each_entry(p, &plist, lru) { | |
973 | struct page *tmp = p; | |
974 | ||
975 | /* Swap the pages if we detect consecutive order */ | |
976 | if (count > first && pages[count - 1] == tmp - 1) | |
977 | swap(tmp, pages[count - 1]); | |
978 | pages[count++] = tmp; | |
979 | } | |
1403b1a3 | 980 | |
8593e9b8 CK |
981 | if (r) { |
982 | /* If there is any pages in the list put them back to | |
983 | * the pool. | |
984 | */ | |
767601d1 | 985 | pr_debug("Failed to allocate extra pages for large request\n"); |
8593e9b8 CK |
986 | ttm_put_pages(pages, count, flags, cstate); |
987 | return r; | |
1403b1a3 PN |
988 | } |
989 | ||
1403b1a3 PN |
990 | return 0; |
991 | } | |
992 | ||
3b9c214a | 993 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, |
750a2503 | 994 | char *name, unsigned int order) |
1403b1a3 PN |
995 | { |
996 | spin_lock_init(&pool->lock); | |
997 | pool->fill_lock = false; | |
998 | INIT_LIST_HEAD(&pool->list); | |
07458661 | 999 | pool->npages = pool->nfrees = 0; |
1403b1a3 | 1000 | pool->gfp_flags = flags; |
07458661 | 1001 | pool->name = name; |
750a2503 | 1002 | pool->order = order; |
1403b1a3 PN |
1003 | } |
1004 | ||
c96af79e | 1005 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
1403b1a3 | 1006 | { |
c96af79e | 1007 | int ret; |
750a2503 RH |
1008 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1009 | unsigned order = HPAGE_PMD_ORDER; | |
1010 | #else | |
1011 | unsigned order = 0; | |
1012 | #endif | |
5870a4d9 FJ |
1013 | |
1014 | WARN_ON(_manager); | |
1403b1a3 | 1015 | |
25d0479a | 1016 | pr_info("Initializing pool allocator\n"); |
1403b1a3 | 1017 | |
5870a4d9 | 1018 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
19d859a7 XS |
1019 | if (!_manager) |
1020 | return -ENOMEM; | |
1403b1a3 | 1021 | |
750a2503 | 1022 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); |
1403b1a3 | 1023 | |
750a2503 | 1024 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); |
1403b1a3 | 1025 | |
5870a4d9 | 1026 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
750a2503 | 1027 | GFP_USER | GFP_DMA32, "wc dma", 0); |
1403b1a3 | 1028 | |
5870a4d9 | 1029 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
750a2503 | 1030 | GFP_USER | GFP_DMA32, "uc dma", 0); |
1403b1a3 | 1031 | |
6ed4e2e6 | 1032 | ttm_page_pool_init_locked(&_manager->wc_pool_huge, |
da291320 MD |
1033 | (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
1034 | __GFP_KSWAPD_RECLAIM) & | |
1035 | ~(__GFP_MOVABLE | __GFP_COMP), | |
750a2503 | 1036 | "wc huge", order); |
6ed4e2e6 CK |
1037 | |
1038 | ttm_page_pool_init_locked(&_manager->uc_pool_huge, | |
da291320 MD |
1039 | (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
1040 | __GFP_KSWAPD_RECLAIM) & | |
1041 | ~(__GFP_MOVABLE | __GFP_COMP) | |
750a2503 | 1042 | , "uc huge", order); |
6ed4e2e6 | 1043 | |
5870a4d9 FJ |
1044 | _manager->options.max_size = max_pages; |
1045 | _manager->options.small = SMALL_ALLOCATION; | |
1046 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | |
1047 | ||
1048 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | |
1049 | &glob->kobj, "pool"); | |
e2721595 RH |
1050 | if (unlikely(ret != 0)) |
1051 | goto error; | |
1403b1a3 | 1052 | |
e2721595 RH |
1053 | ret = ttm_pool_mm_shrink_init(_manager); |
1054 | if (unlikely(ret != 0)) | |
1055 | goto error; | |
1403b1a3 | 1056 | return 0; |
e2721595 RH |
1057 | |
1058 | error: | |
1059 | kobject_put(&_manager->kobj); | |
1060 | _manager = NULL; | |
1061 | return ret; | |
1403b1a3 PN |
1062 | } |
1063 | ||
0e57a3cc | 1064 | void ttm_page_alloc_fini(void) |
1403b1a3 PN |
1065 | { |
1066 | int i; | |
1067 | ||
25d0479a | 1068 | pr_info("Finalizing pool allocator\n"); |
5870a4d9 | 1069 | ttm_pool_mm_shrink_fini(_manager); |
1403b1a3 | 1070 | |
881fdaa5 | 1071 | /* OK to use static buffer since global mutex is no longer used. */ |
1403b1a3 | 1072 | for (i = 0; i < NUM_POOLS; ++i) |
881fdaa5 | 1073 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); |
c96af79e | 1074 | |
5870a4d9 FJ |
1075 | kobject_put(&_manager->kobj); |
1076 | _manager = NULL; | |
1403b1a3 | 1077 | } |
07458661 | 1078 | |
4d869f25 RH |
1079 | static void |
1080 | ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) | |
1081 | { | |
3231a769 | 1082 | struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; |
4d869f25 RH |
1083 | unsigned i; |
1084 | ||
1085 | if (mem_count_update == 0) | |
1086 | goto put_pages; | |
1087 | ||
1088 | for (i = 0; i < mem_count_update; ++i) { | |
1089 | if (!ttm->pages[i]) | |
1090 | continue; | |
1091 | ||
3231a769 | 1092 | ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); |
4d869f25 RH |
1093 | } |
1094 | ||
1095 | put_pages: | |
1096 | ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, | |
1097 | ttm->caching_state); | |
1098 | ttm->state = tt_unpopulated; | |
1099 | } | |
1100 | ||
d0cef9fa | 1101 | int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
b1e5f172 | 1102 | { |
3231a769 | 1103 | struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; |
b1e5f172 JG |
1104 | unsigned i; |
1105 | int ret; | |
1106 | ||
1107 | if (ttm->state != tt_unpopulated) | |
1108 | return 0; | |
1109 | ||
ec3fe391 RH |
1110 | if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) |
1111 | return -ENOMEM; | |
1112 | ||
c6e839a3 CK |
1113 | ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, |
1114 | ttm->caching_state); | |
1115 | if (unlikely(ret != 0)) { | |
4d869f25 | 1116 | ttm_pool_unpopulate_helper(ttm, 0); |
c6e839a3 CK |
1117 | return ret; |
1118 | } | |
b1e5f172 | 1119 | |
c6e839a3 | 1120 | for (i = 0; i < ttm->num_pages; ++i) { |
d188bfa5 | 1121 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], |
d0cef9fa | 1122 | PAGE_SIZE, ctx); |
b1e5f172 | 1123 | if (unlikely(ret != 0)) { |
4d869f25 | 1124 | ttm_pool_unpopulate_helper(ttm, i); |
b1e5f172 JG |
1125 | return -ENOMEM; |
1126 | } | |
1127 | } | |
1128 | ||
1129 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
1130 | ret = ttm_tt_swapin(ttm); | |
1131 | if (unlikely(ret != 0)) { | |
1132 | ttm_pool_unpopulate(ttm); | |
1133 | return ret; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | ttm->state = tt_unbound; | |
1138 | return 0; | |
1139 | } | |
1140 | EXPORT_SYMBOL(ttm_pool_populate); | |
1141 | ||
1142 | void ttm_pool_unpopulate(struct ttm_tt *ttm) | |
1143 | { | |
4d869f25 | 1144 | ttm_pool_unpopulate_helper(ttm, ttm->num_pages); |
b1e5f172 JG |
1145 | } |
1146 | EXPORT_SYMBOL(ttm_pool_unpopulate); | |
1147 | ||
d0cef9fa RH |
1148 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, |
1149 | struct ttm_operation_ctx *ctx) | |
a4dec819 | 1150 | { |
6056a1a5 | 1151 | unsigned i, j; |
a4dec819 TSD |
1152 | int r; |
1153 | ||
d0cef9fa | 1154 | r = ttm_pool_populate(&tt->ttm, ctx); |
a4dec819 TSD |
1155 | if (r) |
1156 | return r; | |
1157 | ||
6056a1a5 CK |
1158 | for (i = 0; i < tt->ttm.num_pages; ++i) { |
1159 | struct page *p = tt->ttm.pages[i]; | |
1160 | size_t num_pages = 1; | |
1161 | ||
1162 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1163 | if (++p != tt->ttm.pages[j]) | |
1164 | break; | |
1165 | ||
1166 | ++num_pages; | |
1167 | } | |
1168 | ||
a4dec819 | 1169 | tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], |
6056a1a5 | 1170 | 0, num_pages * PAGE_SIZE, |
a4dec819 TSD |
1171 | DMA_BIDIRECTIONAL); |
1172 | if (dma_mapping_error(dev, tt->dma_address[i])) { | |
1173 | while (i--) { | |
1174 | dma_unmap_page(dev, tt->dma_address[i], | |
1175 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
1176 | tt->dma_address[i] = 0; | |
1177 | } | |
1178 | ttm_pool_unpopulate(&tt->ttm); | |
1179 | return -EFAULT; | |
1180 | } | |
6056a1a5 CK |
1181 | |
1182 | for (j = 1; j < num_pages; ++j) { | |
1183 | tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; | |
1184 | ++i; | |
1185 | } | |
a4dec819 TSD |
1186 | } |
1187 | return 0; | |
1188 | } | |
1189 | EXPORT_SYMBOL(ttm_populate_and_map_pages); | |
1190 | ||
1191 | void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) | |
1192 | { | |
6056a1a5 CK |
1193 | unsigned i, j; |
1194 | ||
1195 | for (i = 0; i < tt->ttm.num_pages;) { | |
1196 | struct page *p = tt->ttm.pages[i]; | |
1197 | size_t num_pages = 1; | |
1198 | ||
1199 | if (!tt->dma_address[i] || !tt->ttm.pages[i]) { | |
1200 | ++i; | |
1201 | continue; | |
a4dec819 | 1202 | } |
6056a1a5 CK |
1203 | |
1204 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1205 | if (++p != tt->ttm.pages[j]) | |
1206 | break; | |
1207 | ||
1208 | ++num_pages; | |
1209 | } | |
1210 | ||
1211 | dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, | |
1212 | DMA_BIDIRECTIONAL); | |
1213 | ||
1214 | i += num_pages; | |
a4dec819 TSD |
1215 | } |
1216 | ttm_pool_unpopulate(&tt->ttm); | |
1217 | } | |
1218 | EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); | |
1219 | ||
07458661 PN |
1220 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
1221 | { | |
1222 | struct ttm_page_pool *p; | |
1223 | unsigned i; | |
1224 | char *h[] = {"pool", "refills", "pages freed", "size"}; | |
5870a4d9 | 1225 | if (!_manager) { |
07458661 PN |
1226 | seq_printf(m, "No pool allocator running.\n"); |
1227 | return 0; | |
1228 | } | |
6ed4e2e6 | 1229 | seq_printf(m, "%7s %12s %13s %8s\n", |
07458661 PN |
1230 | h[0], h[1], h[2], h[3]); |
1231 | for (i = 0; i < NUM_POOLS; ++i) { | |
5870a4d9 | 1232 | p = &_manager->pools[i]; |
07458661 | 1233 | |
6ed4e2e6 | 1234 | seq_printf(m, "%7s %12ld %13ld %8d\n", |
07458661 PN |
1235 | p->name, p->nrefills, |
1236 | p->nfrees, p->npages); | |
1237 | } | |
1238 | return 0; | |
1239 | } | |
1240 | EXPORT_SYMBOL(ttm_page_alloc_debugfs); |