Merge tag 'v5.3-rc2' into drm-misc-fixes
[linux-2.6-block.git] / drivers / gpu / drm / ttm / ttm_page_alloc_dma.c
CommitLineData
2334b75f
KRW
1/*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26/*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 * the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 * when freed).
34 */
35
7aeb7448 36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
25d0479a
JP
37#define pr_fmt(fmt) "[TTM] " fmt
38
2334b75f
KRW
39#include <linux/dma-mapping.h>
40#include <linux/list.h>
41#include <linux/seq_file.h> /* for seq_printf */
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/highmem.h>
45#include <linux/mm_types.h>
46#include <linux/module.h>
47#include <linux/mm.h>
48#include <linux/atomic.h>
49#include <linux/device.h>
50#include <linux/kthread.h>
760285e7
DH
51#include <drm/ttm/ttm_bo_driver.h>
52#include <drm/ttm/ttm_page_alloc.h>
c7bb1e57 53#include <drm/ttm/ttm_set_memory.h>
2334b75f
KRW
54
55#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56#define SMALL_ALLOCATION 4
57#define FREE_ALL_PAGES (~0U)
648bc357 58#define VADDR_FLAG_HUGE_POOL 1UL
8f2112f8 59#define VADDR_FLAG_UPDATED_COUNT 2UL
2334b75f
KRW
60
61enum pool_type {
19dde589
CK
62 IS_UNDEFINED = 0,
63 IS_WC = 1 << 1,
64 IS_UC = 1 << 2,
65 IS_CACHED = 1 << 3,
648bc357
CK
66 IS_DMA32 = 1 << 4,
67 IS_HUGE = 1 << 5
2334b75f 68};
19dde589 69
2334b75f 70/*
648bc357 71 * The pool structure. There are up to nine pools:
2334b75f
KRW
72 * - generic (not restricted to DMA32):
73 * - write combined, uncached, cached.
74 * - dma32 (up to 2^32 - so up 4GB):
75 * - write combined, uncached, cached.
648bc357
CK
76 * - huge (not restricted to DMA32):
77 * - write combined, uncached, cached.
2334b75f
KRW
78 * for each 'struct device'. The 'cached' is for pages that are actively used.
79 * The other ones can be shrunk by the shrinker API if neccessary.
80 * @pools: The 'struct device->dma_pools' link.
81 * @type: Type of the pool
19dde589 82 * @lock: Protects the free_list from concurrnet access. Must be
2334b75f
KRW
83 * used with irqsave/irqrestore variants because pool allocator maybe called
84 * from delayed work.
2334b75f
KRW
85 * @free_list: Pool of pages that are free to be used. No order requirements.
86 * @dev: The device that is associated with these pools.
87 * @size: Size used during DMA allocation.
88 * @npages_free: Count of available pages for re-use.
89 * @npages_in_use: Count of pages that are in use.
90 * @nfrees: Stats when pool is shrinking.
91 * @nrefills: Stats when the pool is grown.
92 * @gfp_flags: Flags to pass for alloc_page.
93 * @name: Name of the pool.
94 * @dev_name: Name derieved from dev - similar to how dev_info works.
95 * Used during shutdown as the dev_info during release is unavailable.
96 */
97struct dma_pool {
98 struct list_head pools; /* The 'struct device->dma_pools link */
99 enum pool_type type;
100 spinlock_t lock;
2334b75f
KRW
101 struct list_head free_list;
102 struct device *dev;
103 unsigned size;
104 unsigned npages_free;
105 unsigned npages_in_use;
106 unsigned long nfrees; /* Stats when shrunk. */
107 unsigned long nrefills; /* Stats when grown. */
108 gfp_t gfp_flags;
109 char name[13]; /* "cached dma32" */
110 char dev_name[64]; /* Constructed from dev */
111};
112
113/*
114 * The accounting page keeping track of the allocated page along with
115 * the DMA address.
116 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
648bc357
CK
117 * @vaddr: The virtual address of the page and a flag if the page belongs to a
118 * huge pool
2334b75f
KRW
119 * @dma: The bus address of the page. If the page is not allocated
120 * via the DMA API, it will be -1.
121 */
122struct dma_page {
123 struct list_head page_list;
648bc357 124 unsigned long vaddr;
2334b75f
KRW
125 struct page *p;
126 dma_addr_t dma;
127};
128
129/*
130 * Limits for the pool. They are handled without locks because only place where
131 * they may change is in sysfs store. They won't have immediate effect anyway
132 * so forcing serialization to access them is pointless.
133 */
134
135struct ttm_pool_opts {
136 unsigned alloc_size;
137 unsigned max_size;
138 unsigned small;
139};
140
141/*
142 * Contains the list of all of the 'struct device' and their corresponding
143 * DMA pools. Guarded by _mutex->lock.
144 * @pools: The link to 'struct ttm_pool_manager->pools'
145 * @dev: The 'struct device' associated with the 'pool'
146 * @pool: The 'struct dma_pool' associated with the 'dev'
147 */
148struct device_pools {
149 struct list_head pools;
150 struct device *dev;
151 struct dma_pool *pool;
152};
153
154/*
155 * struct ttm_pool_manager - Holds memory pools for fast allocation
156 *
157 * @lock: Lock used when adding/removing from pools
158 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
159 * @options: Limits for the pool.
160 * @npools: Total amount of pools in existence.
161 * @shrinker: The structure used by [un|]register_shrinker
162 */
163struct ttm_pool_manager {
164 struct mutex lock;
165 struct list_head pools;
166 struct ttm_pool_opts options;
167 unsigned npools;
168 struct shrinker mm_shrink;
169 struct kobject kobj;
170};
171
172static struct ttm_pool_manager *_manager;
173
174static struct attribute ttm_page_pool_max = {
175 .name = "pool_max_size",
176 .mode = S_IRUGO | S_IWUSR
177};
178static struct attribute ttm_page_pool_small = {
179 .name = "pool_small_allocation",
180 .mode = S_IRUGO | S_IWUSR
181};
182static struct attribute ttm_page_pool_alloc_size = {
183 .name = "pool_allocation_size",
184 .mode = S_IRUGO | S_IWUSR
185};
186
187static struct attribute *ttm_pool_attrs[] = {
188 &ttm_page_pool_max,
189 &ttm_page_pool_small,
190 &ttm_page_pool_alloc_size,
191 NULL
192};
193
194static void ttm_pool_kobj_release(struct kobject *kobj)
195{
196 struct ttm_pool_manager *m =
197 container_of(kobj, struct ttm_pool_manager, kobj);
198 kfree(m);
199}
200
201static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
202 const char *buffer, size_t size)
203{
204 struct ttm_pool_manager *m =
205 container_of(kobj, struct ttm_pool_manager, kobj);
206 int chars;
207 unsigned val;
c68edaa0 208
2334b75f
KRW
209 chars = sscanf(buffer, "%u", &val);
210 if (chars == 0)
211 return size;
212
213 /* Convert kb to number of pages */
214 val = val / (PAGE_SIZE >> 10);
215
c68edaa0 216 if (attr == &ttm_page_pool_max) {
2334b75f 217 m->options.max_size = val;
c68edaa0 218 } else if (attr == &ttm_page_pool_small) {
2334b75f 219 m->options.small = val;
c68edaa0 220 } else if (attr == &ttm_page_pool_alloc_size) {
2334b75f 221 if (val > NUM_PAGES_TO_ALLOC*8) {
25d0479a 222 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
2334b75f
KRW
223 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
224 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
225 return size;
226 } else if (val > NUM_PAGES_TO_ALLOC) {
25d0479a
JP
227 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
2334b75f
KRW
229 }
230 m->options.alloc_size = val;
231 }
232
233 return size;
234}
235
236static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
237 char *buffer)
238{
239 struct ttm_pool_manager *m =
240 container_of(kobj, struct ttm_pool_manager, kobj);
241 unsigned val = 0;
242
243 if (attr == &ttm_page_pool_max)
244 val = m->options.max_size;
245 else if (attr == &ttm_page_pool_small)
246 val = m->options.small;
247 else if (attr == &ttm_page_pool_alloc_size)
248 val = m->options.alloc_size;
249
250 val = val * (PAGE_SIZE >> 10);
251
252 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
253}
254
255static const struct sysfs_ops ttm_pool_sysfs_ops = {
256 .show = &ttm_pool_show,
257 .store = &ttm_pool_store,
258};
259
260static struct kobj_type ttm_pool_kobj_type = {
261 .release = &ttm_pool_kobj_release,
262 .sysfs_ops = &ttm_pool_sysfs_ops,
263 .default_attrs = ttm_pool_attrs,
264};
265
2334b75f
KRW
266static int ttm_set_pages_caching(struct dma_pool *pool,
267 struct page **pages, unsigned cpages)
268{
269 int r = 0;
270 /* Set page caching */
271 if (pool->type & IS_UC) {
fe710322 272 r = ttm_set_pages_array_uc(pages, cpages);
2334b75f 273 if (r)
25d0479a 274 pr_err("%s: Failed to set %d pages to uc!\n",
2334b75f
KRW
275 pool->dev_name, cpages);
276 }
277 if (pool->type & IS_WC) {
c7bb1e57 278 r = ttm_set_pages_array_wc(pages, cpages);
2334b75f 279 if (r)
25d0479a 280 pr_err("%s: Failed to set %d pages to wc!\n",
2334b75f
KRW
281 pool->dev_name, cpages);
282 }
283 return r;
284}
285
286static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
287{
3457b305 288 unsigned long attrs = 0;
2334b75f 289 dma_addr_t dma = d_page->dma;
648bc357 290 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
3457b305
FH
291 if (pool->type & IS_HUGE)
292 attrs = DMA_ATTR_NO_WARN;
293
294 dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
2334b75f
KRW
295
296 kfree(d_page);
297 d_page = NULL;
298}
299static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
300{
301 struct dma_page *d_page;
40b4667b 302 unsigned long attrs = 0;
648bc357 303 void *vaddr;
2334b75f
KRW
304
305 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
306 if (!d_page)
307 return NULL;
308
40b4667b
CK
309 if (pool->type & IS_HUGE)
310 attrs = DMA_ATTR_NO_WARN;
311
312 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
313 pool->gfp_flags, attrs);
648bc357
CK
314 if (vaddr) {
315 if (is_vmalloc_addr(vaddr))
316 d_page->p = vmalloc_to_page(vaddr);
1c34d824 317 else
648bc357
CK
318 d_page->p = virt_to_page(vaddr);
319 d_page->vaddr = (unsigned long)vaddr;
320 if (pool->type & IS_HUGE)
321 d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
1c34d824 322 } else {
2334b75f
KRW
323 kfree(d_page);
324 d_page = NULL;
325 }
326 return d_page;
327}
328static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
329{
330 enum pool_type type = IS_UNDEFINED;
331
332 if (flags & TTM_PAGE_FLAG_DMA32)
333 type |= IS_DMA32;
334 if (cstate == tt_cached)
335 type |= IS_CACHED;
336 else if (cstate == tt_uncached)
337 type |= IS_UC;
338 else
339 type |= IS_WC;
340
341 return type;
342}
343
344static void ttm_pool_update_free_locked(struct dma_pool *pool,
345 unsigned freed_pages)
346{
347 pool->npages_free -= freed_pages;
348 pool->nfrees += freed_pages;
349
350}
351
352/* set memory back to wb and free the pages. */
648bc357
CK
353static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
354{
355 struct page *page = d_page->p;
610b399f 356 unsigned num_pages;
648bc357
CK
357
358 /* Don't set WB on WB page pool. */
359 if (!(pool->type & IS_CACHED)) {
360 num_pages = pool->size / PAGE_SIZE;
610b399f
BN
361 if (ttm_set_pages_wb(page, num_pages))
362 pr_err("%s: Failed to set %d pages to wb!\n",
363 pool->dev_name, num_pages);
648bc357
CK
364 }
365
366 list_del(&d_page->page_list);
367 __ttm_dma_free_page(pool, d_page);
368}
369
2334b75f
KRW
370static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
371 struct page *pages[], unsigned npages)
372{
373 struct dma_page *d_page, *tmp;
374
648bc357
CK
375 if (pool->type & IS_HUGE) {
376 list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
377 ttm_dma_page_put(pool, d_page);
378
379 return;
380 }
381
36d7c537
KRW
382 /* Don't set WB on WB page pool. */
383 if (npages && !(pool->type & IS_CACHED) &&
c7bb1e57 384 ttm_set_pages_array_wb(pages, npages))
25d0479a
JP
385 pr_err("%s: Failed to set %d pages to wb!\n",
386 pool->dev_name, npages);
2334b75f
KRW
387
388 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
389 list_del(&d_page->page_list);
390 __ttm_dma_free_page(pool, d_page);
391 }
392}
393
2334b75f
KRW
394/*
395 * Free pages from pool.
396 *
397 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
398 * number of pages in one go.
399 *
400 * @pool: to free the pages from
401 * @nr_free: If set to true will free all pages in pool
881fdaa5 402 * @use_static: Safe to use static buffer
2334b75f 403 **/
a91576d7 404static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
881fdaa5 405 bool use_static)
2334b75f 406{
881fdaa5 407 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
2334b75f
KRW
408 unsigned long irq_flags;
409 struct dma_page *dma_p, *tmp;
410 struct page **pages_to_free;
411 struct list_head d_pages;
412 unsigned freed_pages = 0,
413 npages_to_free = nr_free;
414
415 if (NUM_PAGES_TO_ALLOC < nr_free)
416 npages_to_free = NUM_PAGES_TO_ALLOC;
02b29caf 417
881fdaa5
TH
418 if (use_static)
419 pages_to_free = static_buf;
420 else
6da2ec56
KC
421 pages_to_free = kmalloc_array(npages_to_free,
422 sizeof(struct page *),
423 GFP_KERNEL);
2334b75f
KRW
424
425 if (!pages_to_free) {
767601d1 426 pr_debug("%s: Failed to allocate memory for pool free operation\n",
25d0479a 427 pool->dev_name);
2334b75f
KRW
428 return 0;
429 }
430 INIT_LIST_HEAD(&d_pages);
431restart:
432 spin_lock_irqsave(&pool->lock, irq_flags);
433
434 /* We picking the oldest ones off the list */
435 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
436 page_list) {
437 if (freed_pages >= npages_to_free)
438 break;
439
440 /* Move the dma_page from one list to another. */
441 list_move(&dma_p->page_list, &d_pages);
442
443 pages_to_free[freed_pages++] = dma_p->p;
444 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
445 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
446
447 ttm_pool_update_free_locked(pool, freed_pages);
448 /**
449 * Because changing page caching is costly
450 * we unlock the pool to prevent stalling.
451 */
452 spin_unlock_irqrestore(&pool->lock, irq_flags);
453
454 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
455 freed_pages);
456
457 INIT_LIST_HEAD(&d_pages);
458
459 if (likely(nr_free != FREE_ALL_PAGES))
460 nr_free -= freed_pages;
461
462 if (NUM_PAGES_TO_ALLOC >= nr_free)
463 npages_to_free = nr_free;
464 else
465 npages_to_free = NUM_PAGES_TO_ALLOC;
466
467 freed_pages = 0;
468
469 /* free all so restart the processing */
470 if (nr_free)
471 goto restart;
472
473 /* Not allowed to fall through or break because
474 * following context is inside spinlock while we are
475 * outside here.
476 */
477 goto out;
478
479 }
480 }
481
482 /* remove range of pages from the pool */
483 if (freed_pages) {
484 ttm_pool_update_free_locked(pool, freed_pages);
485 nr_free -= freed_pages;
486 }
487
488 spin_unlock_irqrestore(&pool->lock, irq_flags);
489
490 if (freed_pages)
491 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
492out:
881fdaa5
TH
493 if (pages_to_free != static_buf)
494 kfree(pages_to_free);
2334b75f
KRW
495 return nr_free;
496}
497
498static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
499{
500 struct device_pools *p;
501 struct dma_pool *pool;
502
503 if (!dev)
504 return;
505
506 mutex_lock(&_manager->lock);
507 list_for_each_entry_reverse(p, &_manager->pools, pools) {
508 if (p->dev != dev)
509 continue;
510 pool = p->pool;
511 if (pool->type != type)
512 continue;
513
514 list_del(&p->pools);
515 kfree(p);
516 _manager->npools--;
517 break;
518 }
519 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
520 if (pool->type != type)
521 continue;
522 /* Takes a spinlock.. */
881fdaa5
TH
523 /* OK to use static buffer since global mutex is held. */
524 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
2334b75f
KRW
525 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
526 /* This code path is called after _all_ references to the
527 * struct device has been dropped - so nobody should be
528 * touching it. In case somebody is trying to _add_ we are
529 * guarded by the mutex. */
530 list_del(&pool->pools);
531 kfree(pool);
532 break;
533 }
534 mutex_unlock(&_manager->lock);
535}
536
537/*
538 * On free-ing of the 'struct device' this deconstructor is run.
539 * Albeit the pool might have already been freed earlier.
540 */
541static void ttm_dma_pool_release(struct device *dev, void *res)
542{
543 struct dma_pool *pool = *(struct dma_pool **)res;
544
545 if (pool)
546 ttm_dma_free_pool(dev, pool->type);
547}
548
549static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
550{
551 return *(struct dma_pool **)res == match_data;
552}
553
554static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
555 enum pool_type type)
556{
648bc357
CK
557 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
558 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
2334b75f
KRW
559 struct device_pools *sec_pool = NULL;
560 struct dma_pool *pool = NULL, **ptr;
561 unsigned i;
562 int ret = -ENODEV;
563 char *p;
564
565 if (!dev)
566 return NULL;
567
568 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
569 if (!ptr)
570 return NULL;
571
572 ret = -ENOMEM;
573
574 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
575 dev_to_node(dev));
576 if (!pool)
577 goto err_mem;
578
579 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
580 dev_to_node(dev));
581 if (!sec_pool)
582 goto err_mem;
583
584 INIT_LIST_HEAD(&sec_pool->pools);
585 sec_pool->dev = dev;
586 sec_pool->pool = pool;
587
588 INIT_LIST_HEAD(&pool->free_list);
2334b75f
KRW
589 INIT_LIST_HEAD(&pool->pools);
590 spin_lock_init(&pool->lock);
591 pool->dev = dev;
592 pool->npages_free = pool->npages_in_use = 0;
593 pool->nfrees = 0;
594 pool->gfp_flags = flags;
648bc357
CK
595 if (type & IS_HUGE)
596#ifdef CONFIG_TRANSPARENT_HUGEPAGE
597 pool->size = HPAGE_PMD_SIZE;
598#else
599 BUG();
600#endif
601 else
602 pool->size = PAGE_SIZE;
2334b75f
KRW
603 pool->type = type;
604 pool->nrefills = 0;
605 p = pool->name;
648bc357 606 for (i = 0; i < ARRAY_SIZE(t); i++) {
2334b75f
KRW
607 if (type & t[i]) {
608 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
609 "%s", n[i]);
610 }
611 }
612 *p = 0;
613 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
614 * - the kobj->name has already been deallocated.*/
615 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
616 dev_driver_string(dev), dev_name(dev));
617 mutex_lock(&_manager->lock);
618 /* You can get the dma_pool from either the global: */
619 list_add(&sec_pool->pools, &_manager->pools);
620 _manager->npools++;
621 /* or from 'struct device': */
622 list_add(&pool->pools, &dev->dma_pools);
623 mutex_unlock(&_manager->lock);
624
625 *ptr = pool;
626 devres_add(dev, ptr);
627
628 return pool;
629err_mem:
630 devres_free(ptr);
631 kfree(sec_pool);
632 kfree(pool);
633 return ERR_PTR(ret);
634}
635
636static struct dma_pool *ttm_dma_find_pool(struct device *dev,
637 enum pool_type type)
638{
add3d95d 639 struct dma_pool *pool, *tmp;
2334b75f
KRW
640
641 if (type == IS_UNDEFINED)
add3d95d 642 return NULL;
2334b75f
KRW
643
644 /* NB: We iterate on the 'struct dev' which has no spinlock, but
645 * it does have a kref which we have taken. The kref is taken during
646 * graphic driver loading - in the drm_pci_init it calls either
647 * pci_dev_get or pci_register_driver which both end up taking a kref
648 * on 'struct device'.
649 *
650 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
651 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
652 * thing is at that point of time there are no pages associated with the
653 * driver so this function will not be called.
654 */
add3d95d
TSD
655 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
656 if (pool->type == type)
657 return pool;
658 return NULL;
2334b75f
KRW
659}
660
661/*
662 * Free pages the pages that failed to change the caching state. If there
663 * are pages that have changed their caching state already put them to the
664 * pool.
665 */
666static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
667 struct list_head *d_pages,
668 struct page **failed_pages,
669 unsigned cpages)
670{
671 struct dma_page *d_page, *tmp;
672 struct page *p;
673 unsigned i = 0;
674
675 p = failed_pages[0];
676 if (!p)
677 return;
678 /* Find the failed page. */
679 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
680 if (d_page->p != p)
681 continue;
682 /* .. and then progress over the full list. */
683 list_del(&d_page->page_list);
684 __ttm_dma_free_page(pool, d_page);
685 if (++i < cpages)
686 p = failed_pages[i];
687 else
688 break;
689 }
690
691}
692
693/*
694 * Allocate 'count' pages, and put 'need' number of them on the
695 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
696 * The full list of pages should also be on 'd_pages'.
697 * We return zero for success, and negative numbers as errors.
698 */
699static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
700 struct list_head *d_pages,
701 unsigned count)
702{
703 struct page **caching_array;
704 struct dma_page *dma_p;
705 struct page *p;
706 int r = 0;
648bc357 707 unsigned i, j, npages, cpages;
2334b75f
KRW
708 unsigned max_cpages = min(count,
709 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
710
711 /* allocate array for page caching change */
6da2ec56
KC
712 caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
713 GFP_KERNEL);
2334b75f
KRW
714
715 if (!caching_array) {
767601d1 716 pr_debug("%s: Unable to allocate table for new pages\n",
25d0479a 717 pool->dev_name);
2334b75f
KRW
718 return -ENOMEM;
719 }
720
42bdbb6e 721 if (count > 1)
2334b75f 722 pr_debug("%s: (%s:%d) Getting %d pages\n",
25d0479a 723 pool->dev_name, pool->name, current->pid, count);
2334b75f
KRW
724
725 for (i = 0, cpages = 0; i < count; ++i) {
726 dma_p = __ttm_dma_alloc_page(pool);
727 if (!dma_p) {
767601d1
MD
728 pr_debug("%s: Unable to get page %u\n",
729 pool->dev_name, i);
2334b75f
KRW
730
731 /* store already allocated pages in the pool after
732 * setting the caching state */
733 if (cpages) {
734 r = ttm_set_pages_caching(pool, caching_array,
735 cpages);
736 if (r)
737 ttm_dma_handle_caching_state_failure(
738 pool, d_pages, caching_array,
739 cpages);
740 }
741 r = -ENOMEM;
742 goto out;
743 }
744 p = dma_p->p;
648bc357
CK
745 list_add(&dma_p->page_list, d_pages);
746
2334b75f
KRW
747#ifdef CONFIG_HIGHMEM
748 /* gfp flags of highmem page should never be dma32 so we
749 * we should be fine in such case
750 */
648bc357
CK
751 if (PageHighMem(p))
752 continue;
2334b75f 753#endif
648bc357
CK
754
755 npages = pool->size / PAGE_SIZE;
756 for (j = 0; j < npages; ++j) {
757 caching_array[cpages++] = p + j;
2334b75f
KRW
758 if (cpages == max_cpages) {
759 /* Note: Cannot hold the spinlock */
760 r = ttm_set_pages_caching(pool, caching_array,
648bc357 761 cpages);
2334b75f
KRW
762 if (r) {
763 ttm_dma_handle_caching_state_failure(
648bc357
CK
764 pool, d_pages, caching_array,
765 cpages);
2334b75f
KRW
766 goto out;
767 }
768 cpages = 0;
769 }
770 }
2334b75f
KRW
771 }
772
773 if (cpages) {
774 r = ttm_set_pages_caching(pool, caching_array, cpages);
775 if (r)
776 ttm_dma_handle_caching_state_failure(pool, d_pages,
777 caching_array, cpages);
778 }
779out:
780 kfree(caching_array);
781 return r;
782}
783
784/*
785 * @return count of pages still required to fulfill the request.
8e7e7052 786 */
2334b75f
KRW
787static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
788 unsigned long *irq_flags)
789{
790 unsigned count = _manager->options.small;
791 int r = pool->npages_free;
792
793 if (count > pool->npages_free) {
794 struct list_head d_pages;
795
796 INIT_LIST_HEAD(&d_pages);
797
798 spin_unlock_irqrestore(&pool->lock, *irq_flags);
799
800 /* Returns how many more are neccessary to fulfill the
801 * request. */
802 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
803
804 spin_lock_irqsave(&pool->lock, *irq_flags);
805 if (!r) {
806 /* Add the fresh to the end.. */
807 list_splice(&d_pages, &pool->free_list);
808 ++pool->nrefills;
809 pool->npages_free += count;
810 r = count;
811 } else {
812 struct dma_page *d_page;
813 unsigned cpages = 0;
814
767601d1
MD
815 pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
816 pool->dev_name, pool->name, r);
2334b75f
KRW
817
818 list_for_each_entry(d_page, &d_pages, page_list) {
819 cpages++;
820 }
821 list_splice_tail(&d_pages, &pool->free_list);
822 pool->npages_free += cpages;
823 r = cpages;
824 }
825 }
826 return r;
827}
828
829/*
2334b75f
KRW
830 * The populate list is actually a stack (not that is matters as TTM
831 * allocates one page at a time.
8f2112f8 832 * return dma_page pointer if success, otherwise NULL.
2334b75f 833 */
8f2112f8 834static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
8e7e7052 835 struct ttm_dma_tt *ttm_dma,
2334b75f
KRW
836 unsigned index)
837{
8f2112f8 838 struct dma_page *d_page = NULL;
8e7e7052 839 struct ttm_tt *ttm = &ttm_dma->ttm;
2334b75f 840 unsigned long irq_flags;
8f2112f8 841 int count;
2334b75f
KRW
842
843 spin_lock_irqsave(&pool->lock, irq_flags);
844 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
845 if (count) {
846 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
847 ttm->pages[index] = d_page->p;
8e7e7052
JG
848 ttm_dma->dma_address[index] = d_page->dma;
849 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
2334b75f
KRW
850 pool->npages_in_use += 1;
851 pool->npages_free -= 1;
852 }
853 spin_unlock_irqrestore(&pool->lock, irq_flags);
8f2112f8 854 return d_page;
2334b75f
KRW
855}
856
648bc357
CK
857static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
858{
859 struct ttm_tt *ttm = &ttm_dma->ttm;
860 gfp_t gfp_flags;
861
862 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
863 gfp_flags = GFP_USER | GFP_DMA32;
864 else
865 gfp_flags = GFP_HIGHUSER;
866 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
867 gfp_flags |= __GFP_ZERO;
868
869 if (huge) {
da291320
MD
870 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
871 __GFP_KSWAPD_RECLAIM;
648bc357 872 gfp_flags &= ~__GFP_MOVABLE;
f4c80991 873 gfp_flags &= ~__GFP_COMP;
648bc357
CK
874 }
875
cb5f1a52
AG
876 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
877 gfp_flags |= __GFP_RETRY_MAYFAIL;
878
648bc357
CK
879 return gfp_flags;
880}
881
2334b75f
KRW
882/*
883 * On success pages list will hold count number of correctly
884 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
885 */
d0cef9fa
RH
886int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
887 struct ttm_operation_ctx *ctx)
2334b75f 888{
8e7e7052 889 struct ttm_tt *ttm = &ttm_dma->ttm;
3231a769 890 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
648bc357 891 unsigned long num_pages = ttm->num_pages;
2334b75f 892 struct dma_pool *pool;
8f2112f8 893 struct dma_page *d_page;
2334b75f
KRW
894 enum pool_type type;
895 unsigned i;
2334b75f
KRW
896 int ret;
897
898 if (ttm->state != tt_unpopulated)
899 return 0;
900
ec3fe391
RH
901 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
902 return -ENOMEM;
903
648bc357
CK
904 INIT_LIST_HEAD(&ttm_dma->pages_list);
905 i = 0;
906
2334b75f 907 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
648bc357
CK
908
909#ifdef CONFIG_TRANSPARENT_HUGEPAGE
910 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
911 goto skip_huge;
912
913 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
2334b75f 914 if (!pool) {
648bc357 915 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
19dde589 916
648bc357
CK
917 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
918 if (IS_ERR_OR_NULL(pool))
919 goto skip_huge;
920 }
19dde589 921
648bc357
CK
922 while (num_pages >= HPAGE_PMD_NR) {
923 unsigned j;
924
8f2112f8
RH
925 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
926 if (!d_page)
648bc357
CK
927 break;
928
929 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
d0cef9fa 930 pool->size, ctx);
648bc357
CK
931 if (unlikely(ret != 0)) {
932 ttm_dma_unpopulate(ttm_dma, dev);
2334b75f
KRW
933 return -ENOMEM;
934 }
648bc357 935
8f2112f8 936 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
648bc357
CK
937 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
938 ttm->pages[j] = ttm->pages[j - 1] + 1;
939 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
940 PAGE_SIZE;
941 }
942
943 i += HPAGE_PMD_NR;
944 num_pages -= HPAGE_PMD_NR;
2334b75f
KRW
945 }
946
648bc357
CK
947skip_huge:
948#endif
949
950 pool = ttm_dma_find_pool(dev, type);
951 if (!pool) {
952 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
953
954 pool = ttm_dma_pool_init(dev, gfp_flags, type);
955 if (IS_ERR_OR_NULL(pool))
956 return -ENOMEM;
957 }
958
959 while (num_pages) {
8f2112f8
RH
960 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
961 if (!d_page) {
8e7e7052 962 ttm_dma_unpopulate(ttm_dma, dev);
2334b75f
KRW
963 return -ENOMEM;
964 }
965
d188bfa5 966 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
d0cef9fa 967 pool->size, ctx);
2334b75f 968 if (unlikely(ret != 0)) {
8e7e7052 969 ttm_dma_unpopulate(ttm_dma, dev);
2334b75f
KRW
970 return -ENOMEM;
971 }
648bc357 972
8f2112f8 973 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
648bc357
CK
974 ++i;
975 --num_pages;
2334b75f
KRW
976 }
977
978 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
979 ret = ttm_tt_swapin(ttm);
980 if (unlikely(ret != 0)) {
8e7e7052 981 ttm_dma_unpopulate(ttm_dma, dev);
2334b75f
KRW
982 return ret;
983 }
984 }
985
986 ttm->state = tt_unbound;
987 return 0;
988}
989EXPORT_SYMBOL_GPL(ttm_dma_populate);
990
2334b75f 991/* Put all pages in pages list to correct pool to wait for reuse */
8e7e7052 992void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
2334b75f 993{
8e7e7052 994 struct ttm_tt *ttm = &ttm_dma->ttm;
3231a769 995 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
2334b75f
KRW
996 struct dma_pool *pool;
997 struct dma_page *d_page, *next;
998 enum pool_type type;
999 bool is_cached = false;
648bc357 1000 unsigned count, i, npages = 0;
2334b75f
KRW
1001 unsigned long irq_flags;
1002
1003 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
648bc357
CK
1004
1005#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1006 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
1007 if (pool) {
1008 count = 0;
1009 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1010 page_list) {
1011 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
1012 continue;
1013
1014 count++;
8f2112f8 1015 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
3231a769
CK
1016 ttm_mem_global_free_page(mem_glob, d_page->p,
1017 pool->size);
8f2112f8
RH
1018 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1019 }
648bc357
CK
1020 ttm_dma_page_put(pool, d_page);
1021 }
1022
1023 spin_lock_irqsave(&pool->lock, irq_flags);
1024 pool->npages_in_use -= count;
1025 pool->nfrees += count;
1026 spin_unlock_irqrestore(&pool->lock, irq_flags);
1027 }
1028#endif
1029
2334b75f 1030 pool = ttm_dma_find_pool(dev, type);
0e113315 1031 if (!pool)
2334b75f 1032 return;
0e113315 1033
2334b75f
KRW
1034 is_cached = (ttm_dma_find_pool(pool->dev,
1035 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
1036
1037 /* make sure pages array match list and count number of pages */
648bc357 1038 count = 0;
8f2112f8
RH
1039 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1040 page_list) {
2334b75f
KRW
1041 ttm->pages[count] = d_page->p;
1042 count++;
8f2112f8
RH
1043
1044 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
3231a769
CK
1045 ttm_mem_global_free_page(mem_glob, d_page->p,
1046 pool->size);
8f2112f8
RH
1047 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1048 }
1049
1050 if (is_cached)
1051 ttm_dma_page_put(pool, d_page);
2334b75f
KRW
1052 }
1053
1054 spin_lock_irqsave(&pool->lock, irq_flags);
1055 pool->npages_in_use -= count;
1056 if (is_cached) {
1057 pool->nfrees += count;
1058 } else {
1059 pool->npages_free += count;
8e7e7052 1060 list_splice(&ttm_dma->pages_list, &pool->free_list);
e9308884
JG
1061 /*
1062 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
1063 * to free in order to minimize calls to set_memory_wb().
1064 */
1065 if (pool->npages_free >= (_manager->options.max_size +
1066 NUM_PAGES_TO_ALLOC))
2c05114d 1067 npages = pool->npages_free - _manager->options.max_size;
2334b75f
KRW
1068 }
1069 spin_unlock_irqrestore(&pool->lock, irq_flags);
1070
8e7e7052 1071 INIT_LIST_HEAD(&ttm_dma->pages_list);
2334b75f
KRW
1072 for (i = 0; i < ttm->num_pages; i++) {
1073 ttm->pages[i] = NULL;
8e7e7052 1074 ttm_dma->dma_address[i] = 0;
2334b75f
KRW
1075 }
1076
2c05114d
KRW
1077 /* shrink pool if necessary (only on !is_cached pools)*/
1078 if (npages)
881fdaa5 1079 ttm_dma_page_pool_free(pool, npages, false);
2334b75f
KRW
1080 ttm->state = tt_unpopulated;
1081}
1082EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1083
1084/**
1085 * Callback for mm to request pool to reduce number of page held.
7dc19d5a
DC
1086 *
1087 * XXX: (dchinner) Deadlock warning!
1088 *
7dc19d5a
DC
1089 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1090 * shrinkers
2334b75f 1091 */
7dc19d5a
DC
1092static unsigned long
1093ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2334b75f 1094{
46c2df68 1095 static unsigned start_pool;
2334b75f 1096 unsigned idx = 0;
46c2df68 1097 unsigned pool_offset;
2334b75f
KRW
1098 unsigned shrink_pages = sc->nr_to_scan;
1099 struct device_pools *p;
7dc19d5a 1100 unsigned long freed = 0;
2334b75f
KRW
1101
1102 if (list_empty(&_manager->pools))
7dc19d5a 1103 return SHRINK_STOP;
2334b75f 1104
22e71691
TH
1105 if (!mutex_trylock(&_manager->lock))
1106 return SHRINK_STOP;
11e504cc
TH
1107 if (!_manager->npools)
1108 goto out;
46c2df68 1109 pool_offset = ++start_pool % _manager->npools;
2334b75f
KRW
1110 list_for_each_entry(p, &_manager->pools, pools) {
1111 unsigned nr_free;
1112
7920aa5a 1113 if (!p->dev)
2334b75f
KRW
1114 continue;
1115 if (shrink_pages == 0)
1116 break;
1117 /* Do it in round-robin fashion. */
1118 if (++idx < pool_offset)
1119 continue;
1120 nr_free = shrink_pages;
881fdaa5
TH
1121 /* OK to use static buffer since global mutex is held. */
1122 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
7dc19d5a
DC
1123 freed += nr_free - shrink_pages;
1124
2334b75f 1125 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
25d0479a
JP
1126 p->pool->dev_name, p->pool->name, current->pid,
1127 nr_free, shrink_pages);
2334b75f 1128 }
11e504cc 1129out:
2334b75f 1130 mutex_unlock(&_manager->lock);
7dc19d5a
DC
1131 return freed;
1132}
1133
1134static unsigned long
1135ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1136{
1137 struct device_pools *p;
1138 unsigned long count = 0;
1139
22e71691
TH
1140 if (!mutex_trylock(&_manager->lock))
1141 return 0;
7dc19d5a
DC
1142 list_for_each_entry(p, &_manager->pools, pools)
1143 count += p->pool->npages_free;
1144 mutex_unlock(&_manager->lock);
1145 return count;
2334b75f
KRW
1146}
1147
e2721595 1148static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
2334b75f 1149{
7dc19d5a
DC
1150 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1151 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
2334b75f 1152 manager->mm_shrink.seeks = 1;
e2721595 1153 return register_shrinker(&manager->mm_shrink);
2334b75f
KRW
1154}
1155
1156static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1157{
1158 unregister_shrinker(&manager->mm_shrink);
1159}
1160
1161int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1162{
e2721595 1163 int ret;
2334b75f
KRW
1164
1165 WARN_ON(_manager);
1166
25d0479a 1167 pr_info("Initializing DMA pool allocator\n");
2334b75f
KRW
1168
1169 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1170 if (!_manager)
e2721595 1171 return -ENOMEM;
2334b75f
KRW
1172
1173 mutex_init(&_manager->lock);
1174 INIT_LIST_HEAD(&_manager->pools);
1175
1176 _manager->options.max_size = max_pages;
1177 _manager->options.small = SMALL_ALLOCATION;
1178 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1179
1180 /* This takes care of auto-freeing the _manager */
1181 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1182 &glob->kobj, "dma_pool");
e2721595
RH
1183 if (unlikely(ret != 0))
1184 goto error;
1185
1186 ret = ttm_dma_pool_mm_shrink_init(_manager);
1187 if (unlikely(ret != 0))
1188 goto error;
2334b75f 1189 return 0;
e2721595
RH
1190
1191error:
1192 kobject_put(&_manager->kobj);
1193 _manager = NULL;
2334b75f
KRW
1194 return ret;
1195}
1196
1197void ttm_dma_page_alloc_fini(void)
1198{
1199 struct device_pools *p, *t;
1200
25d0479a 1201 pr_info("Finalizing DMA pool allocator\n");
2334b75f
KRW
1202 ttm_dma_pool_mm_shrink_fini(_manager);
1203
1204 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1205 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1206 current->pid);
1207 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1208 ttm_dma_pool_match, p->pool));
1209 ttm_dma_free_pool(p->dev, p->pool->type);
1210 }
1211 kobject_put(&_manager->kobj);
1212 _manager = NULL;
1213}
1214
1215int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1216{
1217 struct device_pools *p;
1218 struct dma_pool *pool = NULL;
2334b75f
KRW
1219
1220 if (!_manager) {
1221 seq_printf(m, "No pool allocator running.\n");
1222 return 0;
1223 }
cf7d1bdf 1224 seq_printf(m, " pool refills pages freed inuse available name\n");
2334b75f
KRW
1225 mutex_lock(&_manager->lock);
1226 list_for_each_entry(p, &_manager->pools, pools) {
1227 struct device *dev = p->dev;
1228 if (!dev)
1229 continue;
1230 pool = p->pool;
1231 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1232 pool->name, pool->nrefills,
1233 pool->nfrees, pool->npages_in_use,
1234 pool->npages_free,
1235 pool->dev_name);
1236 }
1237 mutex_unlock(&_manager->lock);
1238 return 0;
1239}
1240EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
7aeb7448
TH
1241
1242#endif