Commit | Line | Data |
---|---|---|
1403b1a3 PN |
1 | /* |
2 | * Copyright (c) Red Hat Inc. | |
3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the | |
12 | * next paragraph) shall be included in all copies or substantial portions | |
13 | * of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Dave Airlie <airlied@redhat.com> | |
24 | * Jerome Glisse <jglisse@redhat.com> | |
25 | * Pauli Nieminen <suokkos@gmail.com> | |
26 | */ | |
27 | ||
28 | /* simple list based uncached page pool | |
29 | * - Pool collects resently freed pages for reuse | |
30 | * - Use page->lru to keep a free list | |
31 | * - doesn't track currently in use pages | |
32 | */ | |
25d0479a JP |
33 | |
34 | #define pr_fmt(fmt) "[TTM] " fmt | |
35 | ||
1403b1a3 PN |
36 | #include <linux/list.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/mm_types.h> | |
07458661 | 40 | #include <linux/module.h> |
1403b1a3 | 41 | #include <linux/mm.h> |
4cdc840a | 42 | #include <linux/seq_file.h> /* for seq_printf */ |
2125b8a4 | 43 | #include <linux/slab.h> |
f9820a46 | 44 | #include <linux/dma-mapping.h> |
1403b1a3 | 45 | |
60063497 | 46 | #include <linux/atomic.h> |
1403b1a3 | 47 | |
760285e7 DH |
48 | #include <drm/ttm/ttm_bo_driver.h> |
49 | #include <drm/ttm/ttm_page_alloc.h> | |
d55f9b87 | 50 | #include <drm/ttm/ttm_set_memory.h> |
1403b1a3 PN |
51 | |
52 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | |
53 | #define SMALL_ALLOCATION 16 | |
54 | #define FREE_ALL_PAGES (~0U) | |
55 | /* times are in msecs */ | |
56 | #define PAGE_FREE_INTERVAL 1000 | |
57 | ||
58 | /** | |
59 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. | |
60 | * | |
61 | * @lock: Protects the shared pool from concurrnet access. Must be used with | |
62 | * irqsave/irqrestore variants because pool allocator maybe called from | |
63 | * delayed work. | |
64 | * @fill_lock: Prevent concurrent calls to fill. | |
65 | * @list: Pool of free uc/wc pages for fast reuse. | |
66 | * @gfp_flags: Flags to pass for alloc_page. | |
67 | * @npages: Number of pages in pool. | |
68 | */ | |
69 | struct ttm_page_pool { | |
70 | spinlock_t lock; | |
71 | bool fill_lock; | |
72 | struct list_head list; | |
0e57a3cc | 73 | gfp_t gfp_flags; |
1403b1a3 | 74 | unsigned npages; |
07458661 PN |
75 | char *name; |
76 | unsigned long nfrees; | |
77 | unsigned long nrefills; | |
750a2503 | 78 | unsigned int order; |
1403b1a3 PN |
79 | }; |
80 | ||
c96af79e PN |
81 | /** |
82 | * Limits for the pool. They are handled without locks because only place where | |
83 | * they may change is in sysfs store. They won't have immediate effect anyway | |
4abe4389 | 84 | * so forcing serialization to access them is pointless. |
c96af79e PN |
85 | */ |
86 | ||
1403b1a3 PN |
87 | struct ttm_pool_opts { |
88 | unsigned alloc_size; | |
89 | unsigned max_size; | |
90 | unsigned small; | |
91 | }; | |
92 | ||
6ed4e2e6 | 93 | #define NUM_POOLS 6 |
1403b1a3 PN |
94 | |
95 | /** | |
96 | * struct ttm_pool_manager - Holds memory pools for fst allocation | |
97 | * | |
98 | * Manager is read only object for pool code so it doesn't need locking. | |
99 | * | |
100 | * @free_interval: minimum number of jiffies between freeing pages from pool. | |
101 | * @page_alloc_inited: reference counting for pool allocation. | |
102 | * @work: Work that is used to shrink the pool. Work is only run when there is | |
103 | * some pages to free. | |
104 | * @small_allocation: Limit in number of pages what is small allocation. | |
105 | * | |
106 | * @pools: All pool objects in use. | |
107 | **/ | |
108 | struct ttm_pool_manager { | |
c96af79e | 109 | struct kobject kobj; |
1403b1a3 | 110 | struct shrinker mm_shrink; |
1403b1a3 PN |
111 | struct ttm_pool_opts options; |
112 | ||
113 | union { | |
114 | struct ttm_page_pool pools[NUM_POOLS]; | |
115 | struct { | |
116 | struct ttm_page_pool wc_pool; | |
117 | struct ttm_page_pool uc_pool; | |
118 | struct ttm_page_pool wc_pool_dma32; | |
119 | struct ttm_page_pool uc_pool_dma32; | |
6ed4e2e6 CK |
120 | struct ttm_page_pool wc_pool_huge; |
121 | struct ttm_page_pool uc_pool_huge; | |
1403b1a3 PN |
122 | } ; |
123 | }; | |
124 | }; | |
125 | ||
c96af79e PN |
126 | static struct attribute ttm_page_pool_max = { |
127 | .name = "pool_max_size", | |
128 | .mode = S_IRUGO | S_IWUSR | |
129 | }; | |
130 | static struct attribute ttm_page_pool_small = { | |
131 | .name = "pool_small_allocation", | |
132 | .mode = S_IRUGO | S_IWUSR | |
133 | }; | |
134 | static struct attribute ttm_page_pool_alloc_size = { | |
135 | .name = "pool_allocation_size", | |
136 | .mode = S_IRUGO | S_IWUSR | |
137 | }; | |
138 | ||
139 | static struct attribute *ttm_pool_attrs[] = { | |
140 | &ttm_page_pool_max, | |
141 | &ttm_page_pool_small, | |
142 | &ttm_page_pool_alloc_size, | |
143 | NULL | |
144 | }; | |
145 | ||
146 | static void ttm_pool_kobj_release(struct kobject *kobj) | |
147 | { | |
148 | struct ttm_pool_manager *m = | |
149 | container_of(kobj, struct ttm_pool_manager, kobj); | |
5870a4d9 | 150 | kfree(m); |
c96af79e PN |
151 | } |
152 | ||
153 | static ssize_t ttm_pool_store(struct kobject *kobj, | |
154 | struct attribute *attr, const char *buffer, size_t size) | |
155 | { | |
156 | struct ttm_pool_manager *m = | |
157 | container_of(kobj, struct ttm_pool_manager, kobj); | |
158 | int chars; | |
159 | unsigned val; | |
160 | chars = sscanf(buffer, "%u", &val); | |
161 | if (chars == 0) | |
162 | return size; | |
163 | ||
164 | /* Convert kb to number of pages */ | |
165 | val = val / (PAGE_SIZE >> 10); | |
166 | ||
167 | if (attr == &ttm_page_pool_max) | |
168 | m->options.max_size = val; | |
169 | else if (attr == &ttm_page_pool_small) | |
170 | m->options.small = val; | |
171 | else if (attr == &ttm_page_pool_alloc_size) { | |
172 | if (val > NUM_PAGES_TO_ALLOC*8) { | |
25d0479a | 173 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
4abe4389 TH |
174 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
175 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
176 | return size; |
177 | } else if (val > NUM_PAGES_TO_ALLOC) { | |
25d0479a JP |
178 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
179 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
180 | } |
181 | m->options.alloc_size = val; | |
182 | } | |
183 | ||
184 | return size; | |
185 | } | |
186 | ||
187 | static ssize_t ttm_pool_show(struct kobject *kobj, | |
188 | struct attribute *attr, char *buffer) | |
189 | { | |
190 | struct ttm_pool_manager *m = | |
191 | container_of(kobj, struct ttm_pool_manager, kobj); | |
192 | unsigned val = 0; | |
193 | ||
194 | if (attr == &ttm_page_pool_max) | |
195 | val = m->options.max_size; | |
196 | else if (attr == &ttm_page_pool_small) | |
197 | val = m->options.small; | |
198 | else if (attr == &ttm_page_pool_alloc_size) | |
199 | val = m->options.alloc_size; | |
200 | ||
201 | val = val * (PAGE_SIZE >> 10); | |
202 | ||
203 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | |
204 | } | |
205 | ||
206 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | |
207 | .show = &ttm_pool_show, | |
208 | .store = &ttm_pool_store, | |
209 | }; | |
210 | ||
211 | static struct kobj_type ttm_pool_kobj_type = { | |
212 | .release = &ttm_pool_kobj_release, | |
213 | .sysfs_ops = &ttm_pool_sysfs_ops, | |
214 | .default_attrs = ttm_pool_attrs, | |
215 | }; | |
216 | ||
5870a4d9 | 217 | static struct ttm_pool_manager *_manager; |
1403b1a3 | 218 | |
1403b1a3 PN |
219 | /** |
220 | * Select the right pool or requested caching state and ttm flags. */ | |
6ed4e2e6 CK |
221 | static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, |
222 | enum ttm_caching_state cstate) | |
1403b1a3 PN |
223 | { |
224 | int pool_index; | |
225 | ||
226 | if (cstate == tt_cached) | |
227 | return NULL; | |
228 | ||
229 | if (cstate == tt_wc) | |
230 | pool_index = 0x0; | |
231 | else | |
232 | pool_index = 0x1; | |
233 | ||
6ed4e2e6 CK |
234 | if (flags & TTM_PAGE_FLAG_DMA32) { |
235 | if (huge) | |
236 | return NULL; | |
1403b1a3 PN |
237 | pool_index |= 0x2; |
238 | ||
6ed4e2e6 CK |
239 | } else if (huge) { |
240 | pool_index |= 0x4; | |
241 | } | |
242 | ||
5870a4d9 | 243 | return &_manager->pools[pool_index]; |
1403b1a3 PN |
244 | } |
245 | ||
246 | /* set memory back to wb and free the pages. */ | |
444f8ef3 RH |
247 | static void ttm_pages_put(struct page *pages[], unsigned npages, |
248 | unsigned int order) | |
1403b1a3 | 249 | { |
444f8ef3 RH |
250 | unsigned int i, pages_nr = (1 << order); |
251 | ||
252 | if (order == 0) { | |
d55f9b87 | 253 | if (ttm_set_pages_array_wb(pages, npages)) |
444f8ef3 RH |
254 | pr_err("Failed to set %d pages to wb!\n", npages); |
255 | } | |
256 | ||
257 | for (i = 0; i < npages; ++i) { | |
258 | if (order > 0) { | |
d55f9b87 | 259 | if (ttm_set_pages_wb(pages[i], pages_nr)) |
444f8ef3 RH |
260 | pr_err("Failed to set %d pages to wb!\n", pages_nr); |
261 | } | |
262 | __free_pages(pages[i], order); | |
263 | } | |
1403b1a3 PN |
264 | } |
265 | ||
266 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, | |
267 | unsigned freed_pages) | |
268 | { | |
269 | pool->npages -= freed_pages; | |
07458661 | 270 | pool->nfrees += freed_pages; |
1403b1a3 PN |
271 | } |
272 | ||
273 | /** | |
274 | * Free pages from pool. | |
275 | * | |
276 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | |
277 | * number of pages in one go. | |
278 | * | |
279 | * @pool: to free the pages from | |
280 | * @free_all: If set to true will free all pages in pool | |
881fdaa5 | 281 | * @use_static: Safe to use static buffer |
1403b1a3 | 282 | **/ |
a91576d7 | 283 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, |
881fdaa5 | 284 | bool use_static) |
1403b1a3 | 285 | { |
881fdaa5 | 286 | static struct page *static_buf[NUM_PAGES_TO_ALLOC]; |
1403b1a3 PN |
287 | unsigned long irq_flags; |
288 | struct page *p; | |
289 | struct page **pages_to_free; | |
290 | unsigned freed_pages = 0, | |
291 | npages_to_free = nr_free; | |
292 | ||
293 | if (NUM_PAGES_TO_ALLOC < nr_free) | |
294 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
295 | ||
881fdaa5 TH |
296 | if (use_static) |
297 | pages_to_free = static_buf; | |
298 | else | |
6da2ec56 KC |
299 | pages_to_free = kmalloc_array(npages_to_free, |
300 | sizeof(struct page *), | |
301 | GFP_KERNEL); | |
1403b1a3 | 302 | if (!pages_to_free) { |
767601d1 | 303 | pr_debug("Failed to allocate memory for pool free operation\n"); |
1403b1a3 PN |
304 | return 0; |
305 | } | |
306 | ||
307 | restart: | |
308 | spin_lock_irqsave(&pool->lock, irq_flags); | |
309 | ||
310 | list_for_each_entry_reverse(p, &pool->list, lru) { | |
311 | if (freed_pages >= npages_to_free) | |
312 | break; | |
313 | ||
314 | pages_to_free[freed_pages++] = p; | |
315 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | |
316 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | |
317 | /* remove range of pages from the pool */ | |
318 | __list_del(p->lru.prev, &pool->list); | |
319 | ||
320 | ttm_pool_update_free_locked(pool, freed_pages); | |
321 | /** | |
322 | * Because changing page caching is costly | |
323 | * we unlock the pool to prevent stalling. | |
324 | */ | |
325 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
326 | ||
444f8ef3 | 327 | ttm_pages_put(pages_to_free, freed_pages, pool->order); |
1403b1a3 PN |
328 | if (likely(nr_free != FREE_ALL_PAGES)) |
329 | nr_free -= freed_pages; | |
330 | ||
331 | if (NUM_PAGES_TO_ALLOC >= nr_free) | |
332 | npages_to_free = nr_free; | |
333 | else | |
334 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
335 | ||
336 | freed_pages = 0; | |
337 | ||
338 | /* free all so restart the processing */ | |
339 | if (nr_free) | |
340 | goto restart; | |
341 | ||
0d74f86f | 342 | /* Not allowed to fall through or break because |
1403b1a3 PN |
343 | * following context is inside spinlock while we are |
344 | * outside here. | |
345 | */ | |
346 | goto out; | |
347 | ||
348 | } | |
349 | } | |
350 | ||
1403b1a3 PN |
351 | /* remove range of pages from the pool */ |
352 | if (freed_pages) { | |
353 | __list_del(&p->lru, &pool->list); | |
354 | ||
355 | ttm_pool_update_free_locked(pool, freed_pages); | |
356 | nr_free -= freed_pages; | |
357 | } | |
358 | ||
359 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
360 | ||
361 | if (freed_pages) | |
444f8ef3 | 362 | ttm_pages_put(pages_to_free, freed_pages, pool->order); |
1403b1a3 | 363 | out: |
881fdaa5 TH |
364 | if (pages_to_free != static_buf) |
365 | kfree(pages_to_free); | |
1403b1a3 PN |
366 | return nr_free; |
367 | } | |
368 | ||
1403b1a3 | 369 | /** |
4abe4389 | 370 | * Callback for mm to request pool to reduce number of page held. |
7dc19d5a DC |
371 | * |
372 | * XXX: (dchinner) Deadlock warning! | |
373 | * | |
7dc19d5a | 374 | * This code is crying out for a shrinker per pool.... |
1403b1a3 | 375 | */ |
7dc19d5a DC |
376 | static unsigned long |
377 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1403b1a3 | 378 | { |
71336e01 TH |
379 | static DEFINE_MUTEX(lock); |
380 | static unsigned start_pool; | |
1403b1a3 | 381 | unsigned i; |
71336e01 | 382 | unsigned pool_offset; |
1403b1a3 | 383 | struct ttm_page_pool *pool; |
1495f230 | 384 | int shrink_pages = sc->nr_to_scan; |
7dc19d5a | 385 | unsigned long freed = 0; |
750a2503 | 386 | unsigned int nr_free_pool; |
1403b1a3 | 387 | |
71336e01 TH |
388 | if (!mutex_trylock(&lock)) |
389 | return SHRINK_STOP; | |
390 | pool_offset = ++start_pool % NUM_POOLS; | |
1403b1a3 PN |
391 | /* select start pool in round robin fashion */ |
392 | for (i = 0; i < NUM_POOLS; ++i) { | |
393 | unsigned nr_free = shrink_pages; | |
1bfcbad1 RH |
394 | unsigned page_nr; |
395 | ||
1403b1a3 PN |
396 | if (shrink_pages == 0) |
397 | break; | |
750a2503 | 398 | |
5870a4d9 | 399 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
1bfcbad1 | 400 | page_nr = (1 << pool->order); |
881fdaa5 | 401 | /* OK to use static buffer since global mutex is held. */ |
1bfcbad1 | 402 | nr_free_pool = roundup(nr_free, page_nr) >> pool->order; |
750a2503 | 403 | shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); |
1bfcbad1 RH |
404 | freed += (nr_free_pool - shrink_pages) << pool->order; |
405 | if (freed >= sc->nr_to_scan) | |
406 | break; | |
0aaa59f5 | 407 | shrink_pages <<= pool->order; |
1403b1a3 | 408 | } |
71336e01 | 409 | mutex_unlock(&lock); |
7dc19d5a DC |
410 | return freed; |
411 | } | |
412 | ||
413 | ||
414 | static unsigned long | |
415 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
416 | { | |
417 | unsigned i; | |
418 | unsigned long count = 0; | |
750a2503 | 419 | struct ttm_page_pool *pool; |
7dc19d5a | 420 | |
750a2503 RH |
421 | for (i = 0; i < NUM_POOLS; ++i) { |
422 | pool = &_manager->pools[i]; | |
423 | count += (pool->npages << pool->order); | |
424 | } | |
7dc19d5a DC |
425 | |
426 | return count; | |
1403b1a3 PN |
427 | } |
428 | ||
e2721595 | 429 | static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) |
1403b1a3 | 430 | { |
7dc19d5a DC |
431 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
432 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; | |
1403b1a3 | 433 | manager->mm_shrink.seeks = 1; |
e2721595 | 434 | return register_shrinker(&manager->mm_shrink); |
1403b1a3 PN |
435 | } |
436 | ||
437 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | |
438 | { | |
439 | unregister_shrinker(&manager->mm_shrink); | |
440 | } | |
441 | ||
442 | static int ttm_set_pages_caching(struct page **pages, | |
443 | enum ttm_caching_state cstate, unsigned cpages) | |
444 | { | |
445 | int r = 0; | |
446 | /* Set page caching */ | |
447 | switch (cstate) { | |
448 | case tt_uncached: | |
d55f9b87 | 449 | r = ttm_set_pages_array_uc(pages, cpages); |
1403b1a3 | 450 | if (r) |
25d0479a | 451 | pr_err("Failed to set %d pages to uc!\n", cpages); |
1403b1a3 PN |
452 | break; |
453 | case tt_wc: | |
d55f9b87 | 454 | r = ttm_set_pages_array_wc(pages, cpages); |
1403b1a3 | 455 | if (r) |
25d0479a | 456 | pr_err("Failed to set %d pages to wc!\n", cpages); |
1403b1a3 PN |
457 | break; |
458 | default: | |
459 | break; | |
460 | } | |
461 | return r; | |
462 | } | |
463 | ||
464 | /** | |
465 | * Free pages the pages that failed to change the caching state. If there is | |
466 | * any pages that have changed their caching state already put them to the | |
467 | * pool. | |
468 | */ | |
469 | static void ttm_handle_caching_state_failure(struct list_head *pages, | |
470 | int ttm_flags, enum ttm_caching_state cstate, | |
471 | struct page **failed_pages, unsigned cpages) | |
472 | { | |
473 | unsigned i; | |
4abe4389 | 474 | /* Failed pages have to be freed */ |
1403b1a3 PN |
475 | for (i = 0; i < cpages; ++i) { |
476 | list_del(&failed_pages[i]->lru); | |
477 | __free_page(failed_pages[i]); | |
478 | } | |
479 | } | |
480 | ||
481 | /** | |
482 | * Allocate new pages with correct caching. | |
483 | * | |
484 | * This function is reentrant if caller updates count depending on number of | |
485 | * pages returned in pages array. | |
486 | */ | |
0e57a3cc | 487 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
6ed4e2e6 CK |
488 | int ttm_flags, enum ttm_caching_state cstate, |
489 | unsigned count, unsigned order) | |
1403b1a3 PN |
490 | { |
491 | struct page **caching_array; | |
492 | struct page *p; | |
493 | int r = 0; | |
6ed4e2e6 CK |
494 | unsigned i, j, cpages; |
495 | unsigned npages = 1 << order; | |
a8d25a86 | 496 | unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); |
1403b1a3 PN |
497 | |
498 | /* allocate array for page caching change */ | |
6da2ec56 KC |
499 | caching_array = kmalloc_array(max_cpages, sizeof(struct page *), |
500 | GFP_KERNEL); | |
1403b1a3 PN |
501 | |
502 | if (!caching_array) { | |
767601d1 | 503 | pr_debug("Unable to allocate table for new pages\n"); |
1403b1a3 PN |
504 | return -ENOMEM; |
505 | } | |
506 | ||
507 | for (i = 0, cpages = 0; i < count; ++i) { | |
6ed4e2e6 | 508 | p = alloc_pages(gfp_flags, order); |
1403b1a3 PN |
509 | |
510 | if (!p) { | |
767601d1 | 511 | pr_debug("Unable to get page %u\n", i); |
1403b1a3 PN |
512 | |
513 | /* store already allocated pages in the pool after | |
514 | * setting the caching state */ | |
515 | if (cpages) { | |
4abe4389 TH |
516 | r = ttm_set_pages_caching(caching_array, |
517 | cstate, cpages); | |
1403b1a3 PN |
518 | if (r) |
519 | ttm_handle_caching_state_failure(pages, | |
520 | ttm_flags, cstate, | |
521 | caching_array, cpages); | |
522 | } | |
523 | r = -ENOMEM; | |
524 | goto out; | |
525 | } | |
526 | ||
6ed4e2e6 CK |
527 | list_add(&p->lru, pages); |
528 | ||
1403b1a3 PN |
529 | #ifdef CONFIG_HIGHMEM |
530 | /* gfp flags of highmem page should never be dma32 so we | |
531 | * we should be fine in such case | |
532 | */ | |
6ed4e2e6 CK |
533 | if (PageHighMem(p)) |
534 | continue; | |
535 | ||
1403b1a3 | 536 | #endif |
6ed4e2e6 CK |
537 | for (j = 0; j < npages; ++j) { |
538 | caching_array[cpages++] = p++; | |
1403b1a3 PN |
539 | if (cpages == max_cpages) { |
540 | ||
541 | r = ttm_set_pages_caching(caching_array, | |
542 | cstate, cpages); | |
543 | if (r) { | |
544 | ttm_handle_caching_state_failure(pages, | |
545 | ttm_flags, cstate, | |
546 | caching_array, cpages); | |
547 | goto out; | |
548 | } | |
549 | cpages = 0; | |
550 | } | |
551 | } | |
1403b1a3 PN |
552 | } |
553 | ||
554 | if (cpages) { | |
555 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | |
556 | if (r) | |
557 | ttm_handle_caching_state_failure(pages, | |
558 | ttm_flags, cstate, | |
559 | caching_array, cpages); | |
560 | } | |
561 | out: | |
562 | kfree(caching_array); | |
563 | ||
564 | return r; | |
565 | } | |
566 | ||
567 | /** | |
0d74f86f | 568 | * Fill the given pool if there aren't enough pages and the requested number of |
1403b1a3 PN |
569 | * pages is small. |
570 | */ | |
6ed4e2e6 CK |
571 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, |
572 | enum ttm_caching_state cstate, | |
573 | unsigned count, unsigned long *irq_flags) | |
1403b1a3 PN |
574 | { |
575 | struct page *p; | |
576 | int r; | |
577 | unsigned cpages = 0; | |
578 | /** | |
579 | * Only allow one pool fill operation at a time. | |
580 | * If pool doesn't have enough pages for the allocation new pages are | |
581 | * allocated from outside of pool. | |
582 | */ | |
583 | if (pool->fill_lock) | |
584 | return; | |
585 | ||
586 | pool->fill_lock = true; | |
587 | ||
0d74f86f KRW |
588 | /* If allocation request is small and there are not enough |
589 | * pages in a pool we fill the pool up first. */ | |
5870a4d9 | 590 | if (count < _manager->options.small |
1403b1a3 PN |
591 | && count > pool->npages) { |
592 | struct list_head new_pages; | |
5870a4d9 | 593 | unsigned alloc_size = _manager->options.alloc_size; |
1403b1a3 PN |
594 | |
595 | /** | |
596 | * Can't change page caching if in irqsave context. We have to | |
597 | * drop the pool->lock. | |
598 | */ | |
599 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | |
600 | ||
601 | INIT_LIST_HEAD(&new_pages); | |
602 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, | |
6ed4e2e6 | 603 | cstate, alloc_size, 0); |
1403b1a3 PN |
604 | spin_lock_irqsave(&pool->lock, *irq_flags); |
605 | ||
606 | if (!r) { | |
607 | list_splice(&new_pages, &pool->list); | |
07458661 | 608 | ++pool->nrefills; |
1403b1a3 PN |
609 | pool->npages += alloc_size; |
610 | } else { | |
767601d1 | 611 | pr_debug("Failed to fill pool (%p)\n", pool); |
1403b1a3 | 612 | /* If we have any pages left put them to the pool. */ |
9afae271 | 613 | list_for_each_entry(p, &new_pages, lru) { |
1403b1a3 PN |
614 | ++cpages; |
615 | } | |
616 | list_splice(&new_pages, &pool->list); | |
617 | pool->npages += cpages; | |
618 | } | |
619 | ||
620 | } | |
621 | pool->fill_lock = false; | |
622 | } | |
623 | ||
624 | /** | |
8593e9b8 | 625 | * Allocate pages from the pool and put them on the return list. |
1403b1a3 | 626 | * |
8593e9b8 | 627 | * @return zero for success or negative error code. |
1403b1a3 | 628 | */ |
8593e9b8 CK |
629 | static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
630 | struct list_head *pages, | |
631 | int ttm_flags, | |
632 | enum ttm_caching_state cstate, | |
6ed4e2e6 | 633 | unsigned count, unsigned order) |
1403b1a3 PN |
634 | { |
635 | unsigned long irq_flags; | |
636 | struct list_head *p; | |
637 | unsigned i; | |
8593e9b8 | 638 | int r = 0; |
1403b1a3 PN |
639 | |
640 | spin_lock_irqsave(&pool->lock, irq_flags); | |
6ed4e2e6 CK |
641 | if (!order) |
642 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, | |
643 | &irq_flags); | |
1403b1a3 PN |
644 | |
645 | if (count >= pool->npages) { | |
646 | /* take all pages from the pool */ | |
647 | list_splice_init(&pool->list, pages); | |
648 | count -= pool->npages; | |
649 | pool->npages = 0; | |
650 | goto out; | |
651 | } | |
652 | /* find the last pages to include for requested number of pages. Split | |
0d74f86f | 653 | * pool to begin and halve it to reduce search space. */ |
1403b1a3 PN |
654 | if (count <= pool->npages/2) { |
655 | i = 0; | |
656 | list_for_each(p, &pool->list) { | |
657 | if (++i == count) | |
658 | break; | |
659 | } | |
660 | } else { | |
661 | i = pool->npages + 1; | |
662 | list_for_each_prev(p, &pool->list) { | |
663 | if (--i == count) | |
664 | break; | |
665 | } | |
666 | } | |
0d74f86f | 667 | /* Cut 'count' number of pages from the pool */ |
1403b1a3 PN |
668 | list_cut_position(pages, &pool->list, p); |
669 | pool->npages -= count; | |
670 | count = 0; | |
671 | out: | |
672 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
8593e9b8 CK |
673 | |
674 | /* clear the pages coming from the pool if requested */ | |
675 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | |
676 | struct page *page; | |
677 | ||
678 | list_for_each_entry(page, pages, lru) { | |
679 | if (PageHighMem(page)) | |
680 | clear_highpage(page); | |
681 | else | |
682 | clear_page(page_address(page)); | |
683 | } | |
684 | } | |
685 | ||
686 | /* If pool didn't have enough pages allocate new one. */ | |
687 | if (count) { | |
688 | gfp_t gfp_flags = pool->gfp_flags; | |
689 | ||
690 | /* set zero flag for page allocation if required */ | |
691 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
692 | gfp_flags |= __GFP_ZERO; | |
693 | ||
cb5f1a52 AG |
694 | if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY) |
695 | gfp_flags |= __GFP_RETRY_MAYFAIL; | |
696 | ||
8593e9b8 CK |
697 | /* ttm_alloc_new_pages doesn't reference pool so we can run |
698 | * multiple requests in parallel. | |
699 | **/ | |
700 | r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, | |
6ed4e2e6 | 701 | count, order); |
8593e9b8 CK |
702 | } |
703 | ||
704 | return r; | |
1403b1a3 PN |
705 | } |
706 | ||
8e7e7052 JG |
707 | /* Put all pages in pages list to correct pool to wait for reuse */ |
708 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |
709 | enum ttm_caching_state cstate) | |
710 | { | |
6ed4e2e6 | 711 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 712 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 713 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 714 | #endif |
8e7e7052 | 715 | unsigned long irq_flags; |
8e7e7052 JG |
716 | unsigned i; |
717 | ||
718 | if (pool == NULL) { | |
719 | /* No pool for this memory type so free the pages */ | |
0284f1ea CK |
720 | i = 0; |
721 | while (i < npages) { | |
5c42c64f CK |
722 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
723 | struct page *p = pages[i]; | |
724 | #endif | |
725 | unsigned order = 0, j; | |
0284f1ea CK |
726 | |
727 | if (!pages[i]) { | |
728 | ++i; | |
729 | continue; | |
730 | } | |
731 | ||
5c42c64f | 732 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
a66477b0 CK |
733 | if (!(flags & TTM_PAGE_FLAG_DMA32) && |
734 | (npages - i) >= HPAGE_PMD_NR) { | |
ac1e516d | 735 | for (j = 1; j < HPAGE_PMD_NR; ++j) |
45339336 | 736 | if (++p != pages[i + j]) |
33d22c2e | 737 | break; |
5c42c64f | 738 | |
33d22c2e DA |
739 | if (j == HPAGE_PMD_NR) |
740 | order = HPAGE_PMD_ORDER; | |
741 | } | |
5c42c64f CK |
742 | #endif |
743 | ||
0284f1ea CK |
744 | if (page_count(pages[i]) != 1) |
745 | pr_err("Erroneous page count. Leaking pages.\n"); | |
0284f1ea CK |
746 | __free_pages(pages[i], order); |
747 | ||
5c42c64f CK |
748 | j = 1 << order; |
749 | while (j) { | |
0284f1ea | 750 | pages[i++] = NULL; |
5c42c64f | 751 | --j; |
8e7e7052 JG |
752 | } |
753 | } | |
754 | return; | |
755 | } | |
756 | ||
6ed4e2e6 CK |
757 | i = 0; |
758 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
759 | if (huge) { | |
760 | unsigned max_size, n2free; | |
761 | ||
762 | spin_lock_irqsave(&huge->lock, irq_flags); | |
a66477b0 | 763 | while ((npages - i) >= HPAGE_PMD_NR) { |
6ed4e2e6 CK |
764 | struct page *p = pages[i]; |
765 | unsigned j; | |
766 | ||
767 | if (!p) | |
768 | break; | |
769 | ||
ac1e516d | 770 | for (j = 1; j < HPAGE_PMD_NR; ++j) |
45339336 | 771 | if (++p != pages[i + j]) |
6ed4e2e6 CK |
772 | break; |
773 | ||
774 | if (j != HPAGE_PMD_NR) | |
775 | break; | |
776 | ||
777 | list_add_tail(&pages[i]->lru, &huge->list); | |
778 | ||
779 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
780 | pages[i++] = NULL; | |
781 | huge->npages++; | |
782 | } | |
783 | ||
784 | /* Check that we don't go over the pool limit */ | |
785 | max_size = _manager->options.max_size; | |
786 | max_size /= HPAGE_PMD_NR; | |
787 | if (huge->npages > max_size) | |
788 | n2free = huge->npages - max_size; | |
789 | else | |
790 | n2free = 0; | |
791 | spin_unlock_irqrestore(&huge->lock, irq_flags); | |
792 | if (n2free) | |
793 | ttm_page_pool_free(huge, n2free, false); | |
794 | } | |
795 | #endif | |
796 | ||
8e7e7052 | 797 | spin_lock_irqsave(&pool->lock, irq_flags); |
6ed4e2e6 | 798 | while (i < npages) { |
8e7e7052 JG |
799 | if (pages[i]) { |
800 | if (page_count(pages[i]) != 1) | |
25d0479a | 801 | pr_err("Erroneous page count. Leaking pages.\n"); |
8e7e7052 JG |
802 | list_add_tail(&pages[i]->lru, &pool->list); |
803 | pages[i] = NULL; | |
804 | pool->npages++; | |
805 | } | |
6ed4e2e6 | 806 | ++i; |
8e7e7052 JG |
807 | } |
808 | /* Check that we don't go over the pool limit */ | |
809 | npages = 0; | |
810 | if (pool->npages > _manager->options.max_size) { | |
811 | npages = pool->npages - _manager->options.max_size; | |
812 | /* free at least NUM_PAGES_TO_ALLOC number of pages | |
813 | * to reduce calls to set_memory_wb */ | |
814 | if (npages < NUM_PAGES_TO_ALLOC) | |
815 | npages = NUM_PAGES_TO_ALLOC; | |
816 | } | |
817 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
818 | if (npages) | |
881fdaa5 | 819 | ttm_page_pool_free(pool, npages, false); |
8e7e7052 JG |
820 | } |
821 | ||
1403b1a3 PN |
822 | /* |
823 | * On success pages list will hold count number of correctly | |
824 | * cached pages. | |
825 | */ | |
8e7e7052 JG |
826 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
827 | enum ttm_caching_state cstate) | |
1403b1a3 | 828 | { |
6ed4e2e6 | 829 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 830 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 831 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 832 | #endif |
822c4d9a | 833 | struct list_head plist; |
1403b1a3 | 834 | struct page *p = NULL; |
fdb1a223 | 835 | unsigned count, first; |
1403b1a3 PN |
836 | int r; |
837 | ||
1403b1a3 PN |
838 | /* No pool for cached pages */ |
839 | if (pool == NULL) { | |
8593e9b8 | 840 | gfp_t gfp_flags = GFP_USER; |
7d0a4282 TSD |
841 | unsigned i; |
842 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
843 | unsigned j; | |
844 | #endif | |
0284f1ea | 845 | |
8593e9b8 CK |
846 | /* set zero flag for page allocation if required */ |
847 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
848 | gfp_flags |= __GFP_ZERO; | |
849 | ||
cb5f1a52 AG |
850 | if (flags & TTM_PAGE_FLAG_NO_RETRY) |
851 | gfp_flags |= __GFP_RETRY_MAYFAIL; | |
852 | ||
1403b1a3 PN |
853 | if (flags & TTM_PAGE_FLAG_DMA32) |
854 | gfp_flags |= GFP_DMA32; | |
855 | else | |
e8613c0e | 856 | gfp_flags |= GFP_HIGHUSER; |
1403b1a3 | 857 | |
0284f1ea CK |
858 | i = 0; |
859 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
33d22c2e DA |
860 | if (!(gfp_flags & GFP_DMA32)) { |
861 | while (npages >= HPAGE_PMD_NR) { | |
862 | gfp_t huge_flags = gfp_flags; | |
0284f1ea | 863 | |
da291320 MD |
864 | huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
865 | __GFP_KSWAPD_RECLAIM; | |
33d22c2e DA |
866 | huge_flags &= ~__GFP_MOVABLE; |
867 | huge_flags &= ~__GFP_COMP; | |
868 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); | |
869 | if (!p) | |
870 | break; | |
0284f1ea | 871 | |
33d22c2e DA |
872 | for (j = 0; j < HPAGE_PMD_NR; ++j) |
873 | pages[i++] = p++; | |
0284f1ea | 874 | |
33d22c2e DA |
875 | npages -= HPAGE_PMD_NR; |
876 | } | |
0284f1ea CK |
877 | } |
878 | #endif | |
879 | ||
fdb1a223 | 880 | first = i; |
0284f1ea | 881 | while (npages) { |
d87dfdbf | 882 | p = alloc_page(gfp_flags); |
1403b1a3 | 883 | if (!p) { |
767601d1 | 884 | pr_debug("Unable to allocate page\n"); |
1403b1a3 PN |
885 | return -ENOMEM; |
886 | } | |
d87dfdbf | 887 | |
fdb1a223 CK |
888 | /* Swap the pages if we detect consecutive order */ |
889 | if (i > first && pages[i - 1] == p - 1) | |
890 | swap(p, pages[i - 1]); | |
891 | ||
0284f1ea CK |
892 | pages[i++] = p; |
893 | --npages; | |
1403b1a3 PN |
894 | } |
895 | return 0; | |
896 | } | |
897 | ||
6ed4e2e6 CK |
898 | count = 0; |
899 | ||
900 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
901 | if (huge && npages >= HPAGE_PMD_NR) { | |
902 | INIT_LIST_HEAD(&plist); | |
903 | ttm_page_pool_get_pages(huge, &plist, flags, cstate, | |
904 | npages / HPAGE_PMD_NR, | |
905 | HPAGE_PMD_ORDER); | |
906 | ||
907 | list_for_each_entry(p, &plist, lru) { | |
908 | unsigned j; | |
909 | ||
910 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
911 | pages[count++] = &p[j]; | |
912 | } | |
913 | } | |
914 | #endif | |
915 | ||
822c4d9a | 916 | INIT_LIST_HEAD(&plist); |
6ed4e2e6 CK |
917 | r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, |
918 | npages - count, 0); | |
8593e9b8 | 919 | |
ae937fe1 CK |
920 | first = count; |
921 | list_for_each_entry(p, &plist, lru) { | |
922 | struct page *tmp = p; | |
923 | ||
924 | /* Swap the pages if we detect consecutive order */ | |
925 | if (count > first && pages[count - 1] == tmp - 1) | |
926 | swap(tmp, pages[count - 1]); | |
927 | pages[count++] = tmp; | |
928 | } | |
1403b1a3 | 929 | |
8593e9b8 CK |
930 | if (r) { |
931 | /* If there is any pages in the list put them back to | |
932 | * the pool. | |
933 | */ | |
767601d1 | 934 | pr_debug("Failed to allocate extra pages for large request\n"); |
8593e9b8 CK |
935 | ttm_put_pages(pages, count, flags, cstate); |
936 | return r; | |
1403b1a3 PN |
937 | } |
938 | ||
1403b1a3 PN |
939 | return 0; |
940 | } | |
941 | ||
3b9c214a | 942 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, |
750a2503 | 943 | char *name, unsigned int order) |
1403b1a3 PN |
944 | { |
945 | spin_lock_init(&pool->lock); | |
946 | pool->fill_lock = false; | |
947 | INIT_LIST_HEAD(&pool->list); | |
07458661 | 948 | pool->npages = pool->nfrees = 0; |
1403b1a3 | 949 | pool->gfp_flags = flags; |
07458661 | 950 | pool->name = name; |
750a2503 | 951 | pool->order = order; |
1403b1a3 PN |
952 | } |
953 | ||
c96af79e | 954 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
1403b1a3 | 955 | { |
c96af79e | 956 | int ret; |
750a2503 RH |
957 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
958 | unsigned order = HPAGE_PMD_ORDER; | |
959 | #else | |
960 | unsigned order = 0; | |
961 | #endif | |
5870a4d9 FJ |
962 | |
963 | WARN_ON(_manager); | |
1403b1a3 | 964 | |
25d0479a | 965 | pr_info("Initializing pool allocator\n"); |
1403b1a3 | 966 | |
5870a4d9 | 967 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
19d859a7 XS |
968 | if (!_manager) |
969 | return -ENOMEM; | |
1403b1a3 | 970 | |
750a2503 | 971 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); |
1403b1a3 | 972 | |
750a2503 | 973 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); |
1403b1a3 | 974 | |
5870a4d9 | 975 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
750a2503 | 976 | GFP_USER | GFP_DMA32, "wc dma", 0); |
1403b1a3 | 977 | |
5870a4d9 | 978 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
750a2503 | 979 | GFP_USER | GFP_DMA32, "uc dma", 0); |
1403b1a3 | 980 | |
6ed4e2e6 | 981 | ttm_page_pool_init_locked(&_manager->wc_pool_huge, |
da291320 MD |
982 | (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
983 | __GFP_KSWAPD_RECLAIM) & | |
984 | ~(__GFP_MOVABLE | __GFP_COMP), | |
750a2503 | 985 | "wc huge", order); |
6ed4e2e6 CK |
986 | |
987 | ttm_page_pool_init_locked(&_manager->uc_pool_huge, | |
da291320 MD |
988 | (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
989 | __GFP_KSWAPD_RECLAIM) & | |
990 | ~(__GFP_MOVABLE | __GFP_COMP) | |
750a2503 | 991 | , "uc huge", order); |
6ed4e2e6 | 992 | |
5870a4d9 FJ |
993 | _manager->options.max_size = max_pages; |
994 | _manager->options.small = SMALL_ALLOCATION; | |
995 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | |
996 | ||
997 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | |
998 | &glob->kobj, "pool"); | |
e2721595 RH |
999 | if (unlikely(ret != 0)) |
1000 | goto error; | |
1403b1a3 | 1001 | |
e2721595 RH |
1002 | ret = ttm_pool_mm_shrink_init(_manager); |
1003 | if (unlikely(ret != 0)) | |
1004 | goto error; | |
1403b1a3 | 1005 | return 0; |
e2721595 RH |
1006 | |
1007 | error: | |
1008 | kobject_put(&_manager->kobj); | |
1009 | _manager = NULL; | |
1010 | return ret; | |
1403b1a3 PN |
1011 | } |
1012 | ||
0e57a3cc | 1013 | void ttm_page_alloc_fini(void) |
1403b1a3 PN |
1014 | { |
1015 | int i; | |
1016 | ||
25d0479a | 1017 | pr_info("Finalizing pool allocator\n"); |
5870a4d9 | 1018 | ttm_pool_mm_shrink_fini(_manager); |
1403b1a3 | 1019 | |
881fdaa5 | 1020 | /* OK to use static buffer since global mutex is no longer used. */ |
1403b1a3 | 1021 | for (i = 0; i < NUM_POOLS; ++i) |
881fdaa5 | 1022 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); |
c96af79e | 1023 | |
5870a4d9 FJ |
1024 | kobject_put(&_manager->kobj); |
1025 | _manager = NULL; | |
1403b1a3 | 1026 | } |
07458661 | 1027 | |
4d869f25 RH |
1028 | static void |
1029 | ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) | |
1030 | { | |
3231a769 | 1031 | struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; |
4d869f25 RH |
1032 | unsigned i; |
1033 | ||
1034 | if (mem_count_update == 0) | |
1035 | goto put_pages; | |
1036 | ||
1037 | for (i = 0; i < mem_count_update; ++i) { | |
1038 | if (!ttm->pages[i]) | |
1039 | continue; | |
1040 | ||
3231a769 | 1041 | ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); |
4d869f25 RH |
1042 | } |
1043 | ||
1044 | put_pages: | |
1045 | ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, | |
1046 | ttm->caching_state); | |
1047 | ttm->state = tt_unpopulated; | |
1048 | } | |
1049 | ||
d0cef9fa | 1050 | int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
b1e5f172 | 1051 | { |
3231a769 | 1052 | struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; |
b1e5f172 JG |
1053 | unsigned i; |
1054 | int ret; | |
1055 | ||
1056 | if (ttm->state != tt_unpopulated) | |
1057 | return 0; | |
1058 | ||
ec3fe391 RH |
1059 | if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) |
1060 | return -ENOMEM; | |
1061 | ||
c6e839a3 CK |
1062 | ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, |
1063 | ttm->caching_state); | |
1064 | if (unlikely(ret != 0)) { | |
4d869f25 | 1065 | ttm_pool_unpopulate_helper(ttm, 0); |
c6e839a3 CK |
1066 | return ret; |
1067 | } | |
b1e5f172 | 1068 | |
c6e839a3 | 1069 | for (i = 0; i < ttm->num_pages; ++i) { |
d188bfa5 | 1070 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], |
d0cef9fa | 1071 | PAGE_SIZE, ctx); |
b1e5f172 | 1072 | if (unlikely(ret != 0)) { |
4d869f25 | 1073 | ttm_pool_unpopulate_helper(ttm, i); |
b1e5f172 JG |
1074 | return -ENOMEM; |
1075 | } | |
1076 | } | |
1077 | ||
1078 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
1079 | ret = ttm_tt_swapin(ttm); | |
1080 | if (unlikely(ret != 0)) { | |
1081 | ttm_pool_unpopulate(ttm); | |
1082 | return ret; | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | ttm->state = tt_unbound; | |
1087 | return 0; | |
1088 | } | |
1089 | EXPORT_SYMBOL(ttm_pool_populate); | |
1090 | ||
1091 | void ttm_pool_unpopulate(struct ttm_tt *ttm) | |
1092 | { | |
4d869f25 | 1093 | ttm_pool_unpopulate_helper(ttm, ttm->num_pages); |
b1e5f172 JG |
1094 | } |
1095 | EXPORT_SYMBOL(ttm_pool_unpopulate); | |
1096 | ||
d0cef9fa RH |
1097 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, |
1098 | struct ttm_operation_ctx *ctx) | |
a4dec819 | 1099 | { |
6056a1a5 | 1100 | unsigned i, j; |
a4dec819 TSD |
1101 | int r; |
1102 | ||
d0cef9fa | 1103 | r = ttm_pool_populate(&tt->ttm, ctx); |
a4dec819 TSD |
1104 | if (r) |
1105 | return r; | |
1106 | ||
6056a1a5 CK |
1107 | for (i = 0; i < tt->ttm.num_pages; ++i) { |
1108 | struct page *p = tt->ttm.pages[i]; | |
1109 | size_t num_pages = 1; | |
1110 | ||
1111 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1112 | if (++p != tt->ttm.pages[j]) | |
1113 | break; | |
1114 | ||
1115 | ++num_pages; | |
1116 | } | |
1117 | ||
a4dec819 | 1118 | tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], |
6056a1a5 | 1119 | 0, num_pages * PAGE_SIZE, |
a4dec819 TSD |
1120 | DMA_BIDIRECTIONAL); |
1121 | if (dma_mapping_error(dev, tt->dma_address[i])) { | |
1122 | while (i--) { | |
1123 | dma_unmap_page(dev, tt->dma_address[i], | |
1124 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
1125 | tt->dma_address[i] = 0; | |
1126 | } | |
1127 | ttm_pool_unpopulate(&tt->ttm); | |
1128 | return -EFAULT; | |
1129 | } | |
6056a1a5 CK |
1130 | |
1131 | for (j = 1; j < num_pages; ++j) { | |
1132 | tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; | |
1133 | ++i; | |
1134 | } | |
a4dec819 TSD |
1135 | } |
1136 | return 0; | |
1137 | } | |
1138 | EXPORT_SYMBOL(ttm_populate_and_map_pages); | |
1139 | ||
1140 | void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) | |
1141 | { | |
6056a1a5 CK |
1142 | unsigned i, j; |
1143 | ||
1144 | for (i = 0; i < tt->ttm.num_pages;) { | |
1145 | struct page *p = tt->ttm.pages[i]; | |
1146 | size_t num_pages = 1; | |
1147 | ||
1148 | if (!tt->dma_address[i] || !tt->ttm.pages[i]) { | |
1149 | ++i; | |
1150 | continue; | |
a4dec819 | 1151 | } |
6056a1a5 CK |
1152 | |
1153 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1154 | if (++p != tt->ttm.pages[j]) | |
1155 | break; | |
1156 | ||
1157 | ++num_pages; | |
1158 | } | |
1159 | ||
1160 | dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, | |
1161 | DMA_BIDIRECTIONAL); | |
1162 | ||
1163 | i += num_pages; | |
a4dec819 TSD |
1164 | } |
1165 | ttm_pool_unpopulate(&tt->ttm); | |
1166 | } | |
1167 | EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); | |
1168 | ||
07458661 PN |
1169 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
1170 | { | |
1171 | struct ttm_page_pool *p; | |
1172 | unsigned i; | |
1173 | char *h[] = {"pool", "refills", "pages freed", "size"}; | |
5870a4d9 | 1174 | if (!_manager) { |
07458661 PN |
1175 | seq_printf(m, "No pool allocator running.\n"); |
1176 | return 0; | |
1177 | } | |
6ed4e2e6 | 1178 | seq_printf(m, "%7s %12s %13s %8s\n", |
07458661 PN |
1179 | h[0], h[1], h[2], h[3]); |
1180 | for (i = 0; i < NUM_POOLS; ++i) { | |
5870a4d9 | 1181 | p = &_manager->pools[i]; |
07458661 | 1182 | |
6ed4e2e6 | 1183 | seq_printf(m, "%7s %12ld %13ld %8d\n", |
07458661 PN |
1184 | p->name, p->nrefills, |
1185 | p->nfrees, p->npages); | |
1186 | } | |
1187 | return 0; | |
1188 | } | |
1189 | EXPORT_SYMBOL(ttm_page_alloc_debugfs); |