1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/poison.h>
30 #include <linux/sched.h>
31 #include <linux/sched/mm.h>
32 #include <linux/slab.h>
33 #include <linux/stat.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/wait.h>
39 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40 #define DMAPOOL_DEBUG 1
43 struct dma_pool { /* the pool */
44 struct list_head page_list;
48 unsigned int allocation;
49 unsigned int boundary;
51 struct list_head pools;
54 struct dma_page { /* cacheable header for 'allocation' bytes */
55 struct list_head page_list;
62 static DEFINE_MUTEX(pools_lock);
63 static DEFINE_MUTEX(pools_reg_lock);
65 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
68 struct dma_page *page;
69 struct dma_pool *pool;
71 size = sysfs_emit(buf, "poolinfo - 0.1\n");
73 mutex_lock(&pools_lock);
74 list_for_each_entry(pool, &dev->dma_pools, pools) {
78 spin_lock_irq(&pool->lock);
79 list_for_each_entry(page, &pool->page_list, page_list) {
81 blocks += page->in_use;
83 spin_unlock_irq(&pool->lock);
85 /* per-pool info, no real statistics yet */
86 size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
89 (pool->allocation / pool->size),
92 mutex_unlock(&pools_lock);
97 static DEVICE_ATTR_RO(pools);
100 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
101 * @name: name of pool, for diagnostics
102 * @dev: device that will be doing the DMA
103 * @size: size of the blocks in this pool.
104 * @align: alignment requirement for blocks; must be a power of two
105 * @boundary: returned blocks won't cross this power of two boundary
106 * Context: not in_interrupt()
108 * Given one of these pools, dma_pool_alloc()
109 * may be used to allocate memory. Such memory will all have "consistent"
110 * DMA mappings, accessible by the device and its driver without using
111 * cache flushing primitives. The actual size of blocks allocated may be
112 * larger than requested because of alignment.
114 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
115 * cross that size boundary. This is useful for devices which have
116 * addressing restrictions on individual DMA transfers, such as not crossing
117 * boundaries of 4KBytes.
119 * Return: a dma allocation pool with the requested characteristics, or
120 * %NULL if one can't be created.
122 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
123 size_t size, size_t align, size_t boundary)
125 struct dma_pool *retval;
134 else if (align & (align - 1))
137 if (size == 0 || size > INT_MAX)
142 size = ALIGN(size, align);
143 allocation = max_t(size_t, size, PAGE_SIZE);
146 boundary = allocation;
147 else if ((boundary < size) || (boundary & (boundary - 1)))
150 boundary = min(boundary, allocation);
152 retval = kmalloc(sizeof(*retval), GFP_KERNEL);
156 strscpy(retval->name, name, sizeof(retval->name));
160 INIT_LIST_HEAD(&retval->page_list);
161 spin_lock_init(&retval->lock);
163 retval->boundary = boundary;
164 retval->allocation = allocation;
166 INIT_LIST_HEAD(&retval->pools);
169 * pools_lock ensures that the ->dma_pools list does not get corrupted.
170 * pools_reg_lock ensures that there is not a race between
171 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
172 * when the first invocation of dma_pool_create() failed on
173 * device_create_file() and the second assumes that it has been done (I
174 * know it is a short window).
176 mutex_lock(&pools_reg_lock);
177 mutex_lock(&pools_lock);
178 if (list_empty(&dev->dma_pools))
180 list_add(&retval->pools, &dev->dma_pools);
181 mutex_unlock(&pools_lock);
185 err = device_create_file(dev, &dev_attr_pools);
187 mutex_lock(&pools_lock);
188 list_del(&retval->pools);
189 mutex_unlock(&pools_lock);
190 mutex_unlock(&pools_reg_lock);
195 mutex_unlock(&pools_reg_lock);
198 EXPORT_SYMBOL(dma_pool_create);
200 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
202 unsigned int offset = 0;
203 unsigned int next_boundary = pool->boundary;
206 unsigned int next = offset + pool->size;
207 if (unlikely((next + pool->size) >= next_boundary)) {
208 next = next_boundary;
209 next_boundary += pool->boundary;
211 *(int *)(page->vaddr + offset) = next;
213 } while (offset < pool->allocation);
216 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
218 struct dma_page *page;
220 page = kmalloc(sizeof(*page), mem_flags);
223 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
224 &page->dma, mem_flags);
227 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
229 pool_initialise_page(pool, page);
239 static inline bool is_page_busy(struct dma_page *page)
241 return page->in_use != 0;
244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
246 dma_addr_t dma = page->dma;
249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252 list_del(&page->page_list);
257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt()
261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call.
264 void dma_pool_destroy(struct dma_pool *pool)
266 struct dma_page *page, *tmp;
272 mutex_lock(&pools_reg_lock);
273 mutex_lock(&pools_lock);
274 list_del(&pool->pools);
275 if (list_empty(&pool->dev->dma_pools))
277 mutex_unlock(&pools_lock);
279 device_remove_file(pool->dev, &dev_attr_pools);
280 mutex_unlock(&pools_reg_lock);
282 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
283 if (is_page_busy(page)) {
284 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
285 pool->name, page->vaddr);
286 /* leak the still-in-use consistent memory */
287 list_del(&page->page_list);
290 pool_free_page(pool, page);
295 EXPORT_SYMBOL(dma_pool_destroy);
298 * dma_pool_alloc - get a block of consistent memory
299 * @pool: dma pool that will produce the block
300 * @mem_flags: GFP_* bitmask
301 * @handle: pointer to dma address of block
303 * Return: the kernel virtual address of a currently unused block,
304 * and reports its dma address through the handle.
305 * If such a memory block can't be allocated, %NULL is returned.
307 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
311 struct dma_page *page;
315 might_alloc(mem_flags);
317 spin_lock_irqsave(&pool->lock, flags);
318 list_for_each_entry(page, &pool->page_list, page_list) {
319 if (page->offset < pool->allocation)
323 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
324 spin_unlock_irqrestore(&pool->lock, flags);
326 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
330 spin_lock_irqsave(&pool->lock, flags);
332 list_add(&page->page_list, &pool->page_list);
335 offset = page->offset;
336 page->offset = *(int *)(page->vaddr + offset);
337 retval = offset + page->vaddr;
338 *handle = offset + page->dma;
343 /* page->offset is stored in first 4 bytes */
344 for (i = sizeof(page->offset); i < pool->size; i++) {
345 if (data[i] == POOL_POISON_FREED)
347 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
348 __func__, pool->name, retval);
351 * Dump the first 4 bytes even if they are not
354 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
355 data, pool->size, 1);
359 if (!want_init_on_alloc(mem_flags))
360 memset(retval, POOL_POISON_ALLOCATED, pool->size);
362 spin_unlock_irqrestore(&pool->lock, flags);
364 if (want_init_on_alloc(mem_flags))
365 memset(retval, 0, pool->size);
369 EXPORT_SYMBOL(dma_pool_alloc);
371 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
373 struct dma_page *page;
375 list_for_each_entry(page, &pool->page_list, page_list) {
378 if ((dma - page->dma) < pool->allocation)
385 * dma_pool_free - put block back into dma pool
386 * @pool: the dma pool holding the block
387 * @vaddr: virtual address of block
388 * @dma: dma address of block
390 * Caller promises neither device nor driver will again touch this block
391 * unless it is first re-allocated.
393 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
395 struct dma_page *page;
399 spin_lock_irqsave(&pool->lock, flags);
400 page = pool_find_page(pool, dma);
402 spin_unlock_irqrestore(&pool->lock, flags);
403 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
404 __func__, pool->name, vaddr, &dma);
408 offset = vaddr - page->vaddr;
409 if (want_init_on_free())
410 memset(vaddr, 0, pool->size);
412 if ((dma - page->dma) != offset) {
413 spin_unlock_irqrestore(&pool->lock, flags);
414 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
415 __func__, pool->name, vaddr, &dma);
419 unsigned int chain = page->offset;
420 while (chain < pool->allocation) {
421 if (chain != offset) {
422 chain = *(int *)(page->vaddr + chain);
425 spin_unlock_irqrestore(&pool->lock, flags);
426 dev_err(pool->dev, "%s %s, dma %pad already free\n",
427 __func__, pool->name, &dma);
431 memset(vaddr, POOL_POISON_FREED, pool->size);
435 *(int *)vaddr = page->offset;
436 page->offset = offset;
438 * Resist a temptation to do
439 * if (!is_page_busy(page)) pool_free_page(pool, page);
440 * Better have a few empty pages hang around.
442 spin_unlock_irqrestore(&pool->lock, flags);
444 EXPORT_SYMBOL(dma_pool_free);
449 static void dmam_pool_release(struct device *dev, void *res)
451 struct dma_pool *pool = *(struct dma_pool **)res;
453 dma_pool_destroy(pool);
456 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
458 return *(struct dma_pool **)res == match_data;
462 * dmam_pool_create - Managed dma_pool_create()
463 * @name: name of pool, for diagnostics
464 * @dev: device that will be doing the DMA
465 * @size: size of the blocks in this pool.
466 * @align: alignment requirement for blocks; must be a power of two
467 * @allocation: returned blocks won't cross this boundary (or zero)
469 * Managed dma_pool_create(). DMA pool created with this function is
470 * automatically destroyed on driver detach.
472 * Return: a managed dma allocation pool with the requested
473 * characteristics, or %NULL if one can't be created.
475 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
476 size_t size, size_t align, size_t allocation)
478 struct dma_pool **ptr, *pool;
480 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
484 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
486 devres_add(dev, ptr);
492 EXPORT_SYMBOL(dmam_pool_create);
495 * dmam_pool_destroy - Managed dma_pool_destroy()
496 * @pool: dma pool that will be destroyed
498 * Managed dma_pool_destroy().
500 void dmam_pool_destroy(struct dma_pool *pool)
502 struct device *dev = pool->dev;
504 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
506 EXPORT_SYMBOL(dmam_pool_destroy);