Commit | Line | Data |
---|---|---|
b2139ce0 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6182a094 MW |
2 | /* |
3 | * DMA Pool allocator | |
4 | * | |
5 | * Copyright 2001 David Brownell | |
6 | * Copyright 2007 Intel Corporation | |
7 | * Author: Matthew Wilcox <willy@linux.intel.com> | |
8 | * | |
6182a094 MW |
9 | * This allocator returns small blocks of a given size which are DMA-able by |
10 | * the given device. It uses the dma_alloc_coherent page allocator to get | |
11 | * new pages, then splits them up into blocks of the required size. | |
12 | * Many older drivers still have their own code to do this. | |
13 | * | |
14 | * The current design of this allocator is fairly simple. The pool is | |
15 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | |
16 | * allocated pages. Each page in the page_list is split into blocks of at | |
a35a3455 MW |
17 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
18 | * list of free blocks within the page. Used blocks aren't tracked, but we | |
19 | * keep a count of how many are currently allocated from each page. | |
6182a094 | 20 | */ |
1da177e4 LT |
21 | |
22 | #include <linux/device.h> | |
1da177e4 LT |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/dmapool.h> | |
6182a094 MW |
25 | #include <linux/kernel.h> |
26 | #include <linux/list.h> | |
b95f1b31 | 27 | #include <linux/export.h> |
6182a094 | 28 | #include <linux/mutex.h> |
c9cf5528 | 29 | #include <linux/poison.h> |
e8edc6e0 | 30 | #include <linux/sched.h> |
0f2f89b6 | 31 | #include <linux/sched/mm.h> |
6182a094 | 32 | #include <linux/slab.h> |
7c77509c | 33 | #include <linux/stat.h> |
6182a094 MW |
34 | #include <linux/spinlock.h> |
35 | #include <linux/string.h> | |
36 | #include <linux/types.h> | |
37 | #include <linux/wait.h> | |
1da177e4 | 38 | |
b5ee5bef AK |
39 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
40 | #define DMAPOOL_DEBUG 1 | |
41 | #endif | |
42 | ||
e87aa773 MW |
43 | struct dma_pool { /* the pool */ |
44 | struct list_head page_list; | |
45 | spinlock_t lock; | |
e87aa773 MW |
46 | size_t size; |
47 | struct device *dev; | |
48 | size_t allocation; | |
e34f44b3 | 49 | size_t boundary; |
e87aa773 | 50 | char name[32]; |
e87aa773 | 51 | struct list_head pools; |
1da177e4 LT |
52 | }; |
53 | ||
e87aa773 MW |
54 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
55 | struct list_head page_list; | |
56 | void *vaddr; | |
57 | dma_addr_t dma; | |
a35a3455 MW |
58 | unsigned int in_use; |
59 | unsigned int offset; | |
1da177e4 LT |
60 | }; |
61 | ||
e87aa773 | 62 | static DEFINE_MUTEX(pools_lock); |
01c2965f | 63 | static DEFINE_MUTEX(pools_reg_lock); |
1da177e4 | 64 | |
e8df2c70 | 65 | static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 | 66 | { |
08cc96c8 | 67 | int size; |
1da177e4 LT |
68 | struct dma_page *page; |
69 | struct dma_pool *pool; | |
70 | ||
08cc96c8 | 71 | size = sysfs_emit(buf, "poolinfo - 0.1\n"); |
1da177e4 | 72 | |
b2366d68 | 73 | mutex_lock(&pools_lock); |
1da177e4 LT |
74 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
75 | unsigned pages = 0; | |
76 | unsigned blocks = 0; | |
77 | ||
c4956823 | 78 | spin_lock_irq(&pool->lock); |
1da177e4 LT |
79 | list_for_each_entry(page, &pool->page_list, page_list) { |
80 | pages++; | |
81 | blocks += page->in_use; | |
82 | } | |
c4956823 | 83 | spin_unlock_irq(&pool->lock); |
1da177e4 LT |
84 | |
85 | /* per-pool info, no real statistics yet */ | |
08cc96c8 TB |
86 | size += sysfs_emit_at(buf, size, "%-16s %4u %4zu %4zu %2u\n", |
87 | pool->name, blocks, | |
88 | pages * (pool->allocation / pool->size), | |
89 | pool->size, pages); | |
1da177e4 | 90 | } |
b2366d68 | 91 | mutex_unlock(&pools_lock); |
1da177e4 | 92 | |
08cc96c8 | 93 | return size; |
1da177e4 | 94 | } |
e87aa773 | 95 | |
e8df2c70 | 96 | static DEVICE_ATTR_RO(pools); |
1da177e4 LT |
97 | |
98 | /** | |
99 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
100 | * @name: name of pool, for diagnostics | |
101 | * @dev: device that will be doing the DMA | |
102 | * @size: size of the blocks in this pool. | |
103 | * @align: alignment requirement for blocks; must be a power of two | |
e34f44b3 | 104 | * @boundary: returned blocks won't cross this power of two boundary |
a862f68a | 105 | * Context: not in_interrupt() |
1da177e4 | 106 | * |
a862f68a | 107 | * Given one of these pools, dma_pool_alloc() |
1da177e4 LT |
108 | * may be used to allocate memory. Such memory will all have "consistent" |
109 | * DMA mappings, accessible by the device and its driver without using | |
110 | * cache flushing primitives. The actual size of blocks allocated may be | |
111 | * larger than requested because of alignment. | |
112 | * | |
e34f44b3 | 113 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
1da177e4 LT |
114 | * cross that size boundary. This is useful for devices which have |
115 | * addressing restrictions on individual DMA transfers, such as not crossing | |
116 | * boundaries of 4KBytes. | |
a862f68a MR |
117 | * |
118 | * Return: a dma allocation pool with the requested characteristics, or | |
119 | * %NULL if one can't be created. | |
1da177e4 | 120 | */ |
e87aa773 | 121 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
e34f44b3 | 122 | size_t size, size_t align, size_t boundary) |
1da177e4 | 123 | { |
e87aa773 | 124 | struct dma_pool *retval; |
e34f44b3 | 125 | size_t allocation; |
01c2965f | 126 | bool empty = false; |
1da177e4 | 127 | |
67a540c6 TB |
128 | if (!dev) |
129 | return NULL; | |
130 | ||
baa2ef83 | 131 | if (align == 0) |
1da177e4 | 132 | align = 1; |
baa2ef83 | 133 | else if (align & (align - 1)) |
1da177e4 | 134 | return NULL; |
1da177e4 | 135 | |
baa2ef83 | 136 | if (size == 0) |
399154be | 137 | return NULL; |
baa2ef83 | 138 | else if (size < 4) |
a35a3455 | 139 | size = 4; |
399154be | 140 | |
1386f7a3 | 141 | size = ALIGN(size, align); |
e34f44b3 MW |
142 | allocation = max_t(size_t, size, PAGE_SIZE); |
143 | ||
baa2ef83 | 144 | if (!boundary) |
e34f44b3 | 145 | boundary = allocation; |
baa2ef83 | 146 | else if ((boundary < size) || (boundary & (boundary - 1))) |
1da177e4 LT |
147 | return NULL; |
148 | ||
cc6266f0 | 149 | retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
e34f44b3 | 150 | if (!retval) |
1da177e4 LT |
151 | return retval; |
152 | ||
943f229e | 153 | strscpy(retval->name, name, sizeof(retval->name)); |
1da177e4 LT |
154 | |
155 | retval->dev = dev; | |
156 | ||
e87aa773 MW |
157 | INIT_LIST_HEAD(&retval->page_list); |
158 | spin_lock_init(&retval->lock); | |
1da177e4 | 159 | retval->size = size; |
e34f44b3 | 160 | retval->boundary = boundary; |
1da177e4 | 161 | retval->allocation = allocation; |
1da177e4 | 162 | |
cc6b664a DY |
163 | INIT_LIST_HEAD(&retval->pools); |
164 | ||
01c2965f SAS |
165 | /* |
166 | * pools_lock ensures that the ->dma_pools list does not get corrupted. | |
167 | * pools_reg_lock ensures that there is not a race between | |
168 | * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() | |
169 | * when the first invocation of dma_pool_create() failed on | |
170 | * device_create_file() and the second assumes that it has been done (I | |
171 | * know it is a short window). | |
172 | */ | |
173 | mutex_lock(&pools_reg_lock); | |
cc6b664a | 174 | mutex_lock(&pools_lock); |
01c2965f SAS |
175 | if (list_empty(&dev->dma_pools)) |
176 | empty = true; | |
177 | list_add(&retval->pools, &dev->dma_pools); | |
cc6b664a | 178 | mutex_unlock(&pools_lock); |
01c2965f SAS |
179 | if (empty) { |
180 | int err; | |
181 | ||
182 | err = device_create_file(dev, &dev_attr_pools); | |
183 | if (err) { | |
184 | mutex_lock(&pools_lock); | |
185 | list_del(&retval->pools); | |
186 | mutex_unlock(&pools_lock); | |
187 | mutex_unlock(&pools_reg_lock); | |
188 | kfree(retval); | |
189 | return NULL; | |
190 | } | |
191 | } | |
192 | mutex_unlock(&pools_reg_lock); | |
1da177e4 LT |
193 | return retval; |
194 | } | |
e87aa773 | 195 | EXPORT_SYMBOL(dma_pool_create); |
1da177e4 | 196 | |
a35a3455 MW |
197 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
198 | { | |
199 | unsigned int offset = 0; | |
e34f44b3 | 200 | unsigned int next_boundary = pool->boundary; |
a35a3455 MW |
201 | |
202 | do { | |
203 | unsigned int next = offset + pool->size; | |
e34f44b3 MW |
204 | if (unlikely((next + pool->size) >= next_boundary)) { |
205 | next = next_boundary; | |
206 | next_boundary += pool->boundary; | |
207 | } | |
a35a3455 MW |
208 | *(int *)(page->vaddr + offset) = next; |
209 | offset = next; | |
210 | } while (offset < pool->allocation); | |
211 | } | |
212 | ||
e87aa773 | 213 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 | 214 | { |
e87aa773 | 215 | struct dma_page *page; |
1da177e4 | 216 | |
a35a3455 | 217 | page = kmalloc(sizeof(*page), mem_flags); |
1da177e4 LT |
218 | if (!page) |
219 | return NULL; | |
a35a3455 | 220 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
e87aa773 | 221 | &page->dma, mem_flags); |
1da177e4 | 222 | if (page->vaddr) { |
b5ee5bef | 223 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 224 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 225 | #endif |
a35a3455 | 226 | pool_initialise_page(pool, page); |
1da177e4 | 227 | page->in_use = 0; |
a35a3455 | 228 | page->offset = 0; |
1da177e4 | 229 | } else { |
e87aa773 | 230 | kfree(page); |
1da177e4 LT |
231 | page = NULL; |
232 | } | |
233 | return page; | |
234 | } | |
235 | ||
d9e7e37b | 236 | static inline bool is_page_busy(struct dma_page *page) |
1da177e4 | 237 | { |
a35a3455 | 238 | return page->in_use != 0; |
1da177e4 LT |
239 | } |
240 | ||
e87aa773 | 241 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4 | 242 | { |
e87aa773 | 243 | dma_addr_t dma = page->dma; |
1da177e4 | 244 | |
b5ee5bef | 245 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 246 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 247 | #endif |
e87aa773 MW |
248 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
249 | list_del(&page->page_list); | |
250 | kfree(page); | |
1da177e4 LT |
251 | } |
252 | ||
1da177e4 LT |
253 | /** |
254 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
255 | * @pool: dma pool that will be destroyed | |
256 | * Context: !in_interrupt() | |
257 | * | |
258 | * Caller guarantees that no more memory from the pool is in use, | |
259 | * and that nothing will try to use the pool after this call. | |
260 | */ | |
e87aa773 | 261 | void dma_pool_destroy(struct dma_pool *pool) |
1da177e4 | 262 | { |
42286f83 | 263 | struct dma_page *page, *tmp; |
01c2965f SAS |
264 | bool empty = false; |
265 | ||
44d7175d SS |
266 | if (unlikely(!pool)) |
267 | return; | |
268 | ||
01c2965f | 269 | mutex_lock(&pools_reg_lock); |
b2366d68 | 270 | mutex_lock(&pools_lock); |
e87aa773 | 271 | list_del(&pool->pools); |
67a540c6 | 272 | if (list_empty(&pool->dev->dma_pools)) |
01c2965f | 273 | empty = true; |
b2366d68 | 274 | mutex_unlock(&pools_lock); |
01c2965f SAS |
275 | if (empty) |
276 | device_remove_file(pool->dev, &dev_attr_pools); | |
277 | mutex_unlock(&pools_reg_lock); | |
1da177e4 | 278 | |
42286f83 | 279 | list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { |
a35a3455 | 280 | if (is_page_busy(page)) { |
67a540c6 TB |
281 | dev_err(pool->dev, "%s %s, %p busy\n", __func__, |
282 | pool->name, page->vaddr); | |
1da177e4 | 283 | /* leak the still-in-use consistent memory */ |
e87aa773 MW |
284 | list_del(&page->page_list); |
285 | kfree(page); | |
1da177e4 | 286 | } else |
e87aa773 | 287 | pool_free_page(pool, page); |
1da177e4 LT |
288 | } |
289 | ||
e87aa773 | 290 | kfree(pool); |
1da177e4 | 291 | } |
e87aa773 | 292 | EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4 LT |
293 | |
294 | /** | |
295 | * dma_pool_alloc - get a block of consistent memory | |
296 | * @pool: dma pool that will produce the block | |
297 | * @mem_flags: GFP_* bitmask | |
298 | * @handle: pointer to dma address of block | |
299 | * | |
a862f68a | 300 | * Return: the kernel virtual address of a currently unused block, |
1da177e4 | 301 | * and reports its dma address through the handle. |
6182a094 | 302 | * If such a memory block can't be allocated, %NULL is returned. |
1da177e4 | 303 | */ |
e87aa773 MW |
304 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
305 | dma_addr_t *handle) | |
1da177e4 | 306 | { |
e87aa773 MW |
307 | unsigned long flags; |
308 | struct dma_page *page; | |
e87aa773 MW |
309 | size_t offset; |
310 | void *retval; | |
311 | ||
0f2f89b6 | 312 | might_alloc(mem_flags); |
ea05c844 | 313 | |
e87aa773 | 314 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 315 | list_for_each_entry(page, &pool->page_list, page_list) { |
a35a3455 MW |
316 | if (page->offset < pool->allocation) |
317 | goto ready; | |
1da177e4 | 318 | } |
1da177e4 | 319 | |
387870f2 MS |
320 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
321 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 | 322 | |
fa23f56d | 323 | page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
387870f2 MS |
324 | if (!page) |
325 | return NULL; | |
1da177e4 | 326 | |
387870f2 | 327 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 328 | |
387870f2 | 329 | list_add(&page->page_list, &pool->page_list); |
e87aa773 | 330 | ready: |
1da177e4 | 331 | page->in_use++; |
a35a3455 MW |
332 | offset = page->offset; |
333 | page->offset = *(int *)(page->vaddr + offset); | |
1da177e4 LT |
334 | retval = offset + page->vaddr; |
335 | *handle = offset + page->dma; | |
b5ee5bef | 336 | #ifdef DMAPOOL_DEBUG |
5de55b26 MC |
337 | { |
338 | int i; | |
339 | u8 *data = retval; | |
340 | /* page->offset is stored in first 4 bytes */ | |
341 | for (i = sizeof(page->offset); i < pool->size; i++) { | |
342 | if (data[i] == POOL_POISON_FREED) | |
343 | continue; | |
67a540c6 TB |
344 | dev_err(pool->dev, "%s %s, %p (corrupted)\n", |
345 | __func__, pool->name, retval); | |
5de55b26 MC |
346 | |
347 | /* | |
348 | * Dump the first 4 bytes even if they are not | |
349 | * POOL_POISON_FREED | |
350 | */ | |
351 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, | |
352 | data, pool->size, 1); | |
353 | break; | |
354 | } | |
355 | } | |
fa23f56d SS |
356 | if (!(mem_flags & __GFP_ZERO)) |
357 | memset(retval, POOL_POISON_ALLOCATED, pool->size); | |
1da177e4 | 358 | #endif |
e87aa773 | 359 | spin_unlock_irqrestore(&pool->lock, flags); |
fa23f56d | 360 | |
6471384a | 361 | if (want_init_on_alloc(mem_flags)) |
fa23f56d SS |
362 | memset(retval, 0, pool->size); |
363 | ||
1da177e4 LT |
364 | return retval; |
365 | } | |
e87aa773 | 366 | EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4 | 367 | |
e87aa773 | 368 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4 | 369 | { |
e87aa773 | 370 | struct dma_page *page; |
1da177e4 | 371 | |
1da177e4 LT |
372 | list_for_each_entry(page, &pool->page_list, page_list) { |
373 | if (dma < page->dma) | |
374 | continue; | |
676bd991 | 375 | if ((dma - page->dma) < pool->allocation) |
84bc227d | 376 | return page; |
1da177e4 | 377 | } |
84bc227d | 378 | return NULL; |
1da177e4 LT |
379 | } |
380 | ||
1da177e4 LT |
381 | /** |
382 | * dma_pool_free - put block back into dma pool | |
383 | * @pool: the dma pool holding the block | |
384 | * @vaddr: virtual address of block | |
385 | * @dma: dma address of block | |
386 | * | |
387 | * Caller promises neither device nor driver will again touch this block | |
388 | * unless it is first re-allocated. | |
389 | */ | |
e87aa773 | 390 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4 | 391 | { |
e87aa773 MW |
392 | struct dma_page *page; |
393 | unsigned long flags; | |
a35a3455 | 394 | unsigned int offset; |
1da177e4 | 395 | |
84bc227d | 396 | spin_lock_irqsave(&pool->lock, flags); |
e87aa773 MW |
397 | page = pool_find_page(pool, dma); |
398 | if (!page) { | |
84bc227d | 399 | spin_unlock_irqrestore(&pool->lock, flags); |
67a540c6 TB |
400 | dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", |
401 | __func__, pool->name, vaddr, &dma); | |
1da177e4 LT |
402 | return; |
403 | } | |
404 | ||
a35a3455 | 405 | offset = vaddr - page->vaddr; |
6471384a AP |
406 | if (want_init_on_free()) |
407 | memset(vaddr, 0, pool->size); | |
b5ee5bef | 408 | #ifdef DMAPOOL_DEBUG |
a35a3455 | 409 | if ((dma - page->dma) != offset) { |
84bc227d | 410 | spin_unlock_irqrestore(&pool->lock, flags); |
67a540c6 TB |
411 | dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", |
412 | __func__, pool->name, vaddr, &dma); | |
1da177e4 LT |
413 | return; |
414 | } | |
a35a3455 MW |
415 | { |
416 | unsigned int chain = page->offset; | |
417 | while (chain < pool->allocation) { | |
418 | if (chain != offset) { | |
419 | chain = *(int *)(page->vaddr + chain); | |
420 | continue; | |
421 | } | |
84bc227d | 422 | spin_unlock_irqrestore(&pool->lock, flags); |
67a540c6 TB |
423 | dev_err(pool->dev, "%s %s, dma %pad already free\n", |
424 | __func__, pool->name, &dma); | |
a35a3455 MW |
425 | return; |
426 | } | |
1da177e4 | 427 | } |
e87aa773 | 428 | memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4 LT |
429 | #endif |
430 | ||
1da177e4 | 431 | page->in_use--; |
a35a3455 MW |
432 | *(int *)vaddr = page->offset; |
433 | page->offset = offset; | |
1da177e4 LT |
434 | /* |
435 | * Resist a temptation to do | |
a35a3455 | 436 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
1da177e4 LT |
437 | * Better have a few empty pages hang around. |
438 | */ | |
e87aa773 | 439 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 440 | } |
e87aa773 | 441 | EXPORT_SYMBOL(dma_pool_free); |
1da177e4 | 442 | |
9ac7849e TH |
443 | /* |
444 | * Managed DMA pool | |
445 | */ | |
446 | static void dmam_pool_release(struct device *dev, void *res) | |
447 | { | |
448 | struct dma_pool *pool = *(struct dma_pool **)res; | |
449 | ||
450 | dma_pool_destroy(pool); | |
451 | } | |
452 | ||
453 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
454 | { | |
455 | return *(struct dma_pool **)res == match_data; | |
456 | } | |
457 | ||
458 | /** | |
459 | * dmam_pool_create - Managed dma_pool_create() | |
460 | * @name: name of pool, for diagnostics | |
461 | * @dev: device that will be doing the DMA | |
462 | * @size: size of the blocks in this pool. | |
463 | * @align: alignment requirement for blocks; must be a power of two | |
464 | * @allocation: returned blocks won't cross this boundary (or zero) | |
465 | * | |
466 | * Managed dma_pool_create(). DMA pool created with this function is | |
467 | * automatically destroyed on driver detach. | |
a862f68a MR |
468 | * |
469 | * Return: a managed dma allocation pool with the requested | |
470 | * characteristics, or %NULL if one can't be created. | |
9ac7849e TH |
471 | */ |
472 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
473 | size_t size, size_t align, size_t allocation) | |
474 | { | |
475 | struct dma_pool **ptr, *pool; | |
476 | ||
477 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
478 | if (!ptr) | |
479 | return NULL; | |
480 | ||
481 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
482 | if (pool) | |
483 | devres_add(dev, ptr); | |
484 | else | |
485 | devres_free(ptr); | |
486 | ||
487 | return pool; | |
488 | } | |
e87aa773 | 489 | EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e TH |
490 | |
491 | /** | |
492 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
493 | * @pool: dma pool that will be destroyed | |
494 | * | |
495 | * Managed dma_pool_destroy(). | |
496 | */ | |
497 | void dmam_pool_destroy(struct dma_pool *pool) | |
498 | { | |
499 | struct device *dev = pool->dev; | |
500 | ||
172cb4b3 | 501 | WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); |
9ac7849e | 502 | } |
e87aa773 | 503 | EXPORT_SYMBOL(dmam_pool_destroy); |