dmapool: consolidate page initialization
[linux-block.git] / mm / dmapool.c
CommitLineData
b2139ce0 1// SPDX-License-Identifier: GPL-2.0-only
6182a094
MW
2/*
3 * DMA Pool allocator
4 *
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
8 *
6182a094
MW
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
13 *
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
a35a3455
MW
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
6182a094 20 */
1da177e4
LT
21
22#include <linux/device.h>
1da177e4
LT
23#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
6182a094
MW
25#include <linux/kernel.h>
26#include <linux/list.h>
b95f1b31 27#include <linux/export.h>
6182a094 28#include <linux/mutex.h>
c9cf5528 29#include <linux/poison.h>
e8edc6e0 30#include <linux/sched.h>
0f2f89b6 31#include <linux/sched/mm.h>
6182a094 32#include <linux/slab.h>
7c77509c 33#include <linux/stat.h>
6182a094
MW
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/wait.h>
1da177e4 38
b5ee5bef
AK
39#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40#define DMAPOOL_DEBUG 1
41#endif
42
e87aa773
MW
43struct dma_pool { /* the pool */
44 struct list_head page_list;
45 spinlock_t lock;
e87aa773 46 struct device *dev;
79023352
TB
47 unsigned int size;
48 unsigned int allocation;
49 unsigned int boundary;
e87aa773 50 char name[32];
e87aa773 51 struct list_head pools;
1da177e4
LT
52};
53
e87aa773
MW
54struct dma_page { /* cacheable header for 'allocation' bytes */
55 struct list_head page_list;
56 void *vaddr;
57 dma_addr_t dma;
a35a3455
MW
58 unsigned int in_use;
59 unsigned int offset;
1da177e4
LT
60};
61
e87aa773 62static DEFINE_MUTEX(pools_lock);
01c2965f 63static DEFINE_MUTEX(pools_reg_lock);
1da177e4 64
e8df2c70 65static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4 66{
08cc96c8 67 int size;
1da177e4
LT
68 struct dma_page *page;
69 struct dma_pool *pool;
70
08cc96c8 71 size = sysfs_emit(buf, "poolinfo - 0.1\n");
1da177e4 72
b2366d68 73 mutex_lock(&pools_lock);
1da177e4
LT
74 list_for_each_entry(pool, &dev->dma_pools, pools) {
75 unsigned pages = 0;
79023352 76 size_t blocks = 0;
1da177e4 77
c4956823 78 spin_lock_irq(&pool->lock);
1da177e4
LT
79 list_for_each_entry(page, &pool->page_list, page_list) {
80 pages++;
81 blocks += page->in_use;
82 }
c4956823 83 spin_unlock_irq(&pool->lock);
1da177e4
LT
84
85 /* per-pool info, no real statistics yet */
79023352 86 size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
08cc96c8 87 pool->name, blocks,
79023352
TB
88 (size_t) pages *
89 (pool->allocation / pool->size),
08cc96c8 90 pool->size, pages);
1da177e4 91 }
b2366d68 92 mutex_unlock(&pools_lock);
1da177e4 93
08cc96c8 94 return size;
1da177e4 95}
e87aa773 96
e8df2c70 97static DEVICE_ATTR_RO(pools);
1da177e4 98
d93e08b7
KB
99#ifdef DMAPOOL_DEBUG
100static void pool_check_block(struct dma_pool *pool, void *retval,
101 unsigned int offset, gfp_t mem_flags)
102{
103 int i;
104 u8 *data = retval;
105 /* page->offset is stored in first 4 bytes */
106 for (i = sizeof(offset); i < pool->size; i++) {
107 if (data[i] == POOL_POISON_FREED)
108 continue;
109 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
110 __func__, pool->name, retval);
111
112 /*
113 * Dump the first 4 bytes even if they are not
114 * POOL_POISON_FREED
115 */
116 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
117 data, pool->size, 1);
118 break;
119 }
120 if (!want_init_on_alloc(mem_flags))
121 memset(retval, POOL_POISON_ALLOCATED, pool->size);
122}
123
124static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
125 void *vaddr, dma_addr_t dma)
126{
127 unsigned int offset = vaddr - page->vaddr;
128 unsigned int chain = page->offset;
129
130 if ((dma - page->dma) != offset) {
131 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
132 __func__, pool->name, vaddr, &dma);
133 return true;
134 }
135
136 while (chain < pool->allocation) {
137 if (chain != offset) {
138 chain = *(int *)(page->vaddr + chain);
139 continue;
140 }
141 dev_err(pool->dev, "%s %s, dma %pad already free\n",
142 __func__, pool->name, &dma);
143 return true;
144 }
145 memset(vaddr, POOL_POISON_FREED, pool->size);
146 return false;
147}
148
149static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
150{
151 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
152}
153#else
154static void pool_check_block(struct dma_pool *pool, void *retval,
155 unsigned int offset, gfp_t mem_flags)
156
157{
158}
159
160static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
161 void *vaddr, dma_addr_t dma)
162{
163 return false;
164}
165
166static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
167{
168}
169#endif
170
1da177e4
LT
171/**
172 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
173 * @name: name of pool, for diagnostics
174 * @dev: device that will be doing the DMA
175 * @size: size of the blocks in this pool.
176 * @align: alignment requirement for blocks; must be a power of two
e34f44b3 177 * @boundary: returned blocks won't cross this power of two boundary
a862f68a 178 * Context: not in_interrupt()
1da177e4 179 *
a862f68a 180 * Given one of these pools, dma_pool_alloc()
1da177e4
LT
181 * may be used to allocate memory. Such memory will all have "consistent"
182 * DMA mappings, accessible by the device and its driver without using
183 * cache flushing primitives. The actual size of blocks allocated may be
184 * larger than requested because of alignment.
185 *
e34f44b3 186 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
1da177e4
LT
187 * cross that size boundary. This is useful for devices which have
188 * addressing restrictions on individual DMA transfers, such as not crossing
189 * boundaries of 4KBytes.
a862f68a
MR
190 *
191 * Return: a dma allocation pool with the requested characteristics, or
192 * %NULL if one can't be created.
1da177e4 193 */
e87aa773 194struct dma_pool *dma_pool_create(const char *name, struct device *dev,
e34f44b3 195 size_t size, size_t align, size_t boundary)
1da177e4 196{
e87aa773 197 struct dma_pool *retval;
e34f44b3 198 size_t allocation;
01c2965f 199 bool empty = false;
1da177e4 200
67a540c6
TB
201 if (!dev)
202 return NULL;
203
baa2ef83 204 if (align == 0)
1da177e4 205 align = 1;
baa2ef83 206 else if (align & (align - 1))
1da177e4 207 return NULL;
1da177e4 208
79023352 209 if (size == 0 || size > INT_MAX)
399154be 210 return NULL;
baa2ef83 211 else if (size < 4)
a35a3455 212 size = 4;
399154be 213
1386f7a3 214 size = ALIGN(size, align);
e34f44b3
MW
215 allocation = max_t(size_t, size, PAGE_SIZE);
216
baa2ef83 217 if (!boundary)
e34f44b3 218 boundary = allocation;
baa2ef83 219 else if ((boundary < size) || (boundary & (boundary - 1)))
1da177e4
LT
220 return NULL;
221
79023352
TB
222 boundary = min(boundary, allocation);
223
cc6266f0 224 retval = kmalloc(sizeof(*retval), GFP_KERNEL);
e34f44b3 225 if (!retval)
1da177e4
LT
226 return retval;
227
943f229e 228 strscpy(retval->name, name, sizeof(retval->name));
1da177e4
LT
229
230 retval->dev = dev;
231
e87aa773
MW
232 INIT_LIST_HEAD(&retval->page_list);
233 spin_lock_init(&retval->lock);
1da177e4 234 retval->size = size;
e34f44b3 235 retval->boundary = boundary;
1da177e4 236 retval->allocation = allocation;
1da177e4 237
cc6b664a
DY
238 INIT_LIST_HEAD(&retval->pools);
239
01c2965f
SAS
240 /*
241 * pools_lock ensures that the ->dma_pools list does not get corrupted.
242 * pools_reg_lock ensures that there is not a race between
243 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
244 * when the first invocation of dma_pool_create() failed on
245 * device_create_file() and the second assumes that it has been done (I
246 * know it is a short window).
247 */
248 mutex_lock(&pools_reg_lock);
cc6b664a 249 mutex_lock(&pools_lock);
01c2965f
SAS
250 if (list_empty(&dev->dma_pools))
251 empty = true;
252 list_add(&retval->pools, &dev->dma_pools);
cc6b664a 253 mutex_unlock(&pools_lock);
01c2965f
SAS
254 if (empty) {
255 int err;
256
257 err = device_create_file(dev, &dev_attr_pools);
258 if (err) {
259 mutex_lock(&pools_lock);
260 list_del(&retval->pools);
261 mutex_unlock(&pools_lock);
262 mutex_unlock(&pools_reg_lock);
263 kfree(retval);
264 return NULL;
265 }
266 }
267 mutex_unlock(&pools_reg_lock);
1da177e4
LT
268 return retval;
269}
e87aa773 270EXPORT_SYMBOL(dma_pool_create);
1da177e4 271
a35a3455
MW
272static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
273{
274 unsigned int offset = 0;
e34f44b3 275 unsigned int next_boundary = pool->boundary;
a35a3455 276
f0bccea6
KB
277 pool_init_page(pool, page);
278 page->in_use = 0;
279 page->offset = 0;
a35a3455
MW
280 do {
281 unsigned int next = offset + pool->size;
e34f44b3
MW
282 if (unlikely((next + pool->size) >= next_boundary)) {
283 next = next_boundary;
284 next_boundary += pool->boundary;
285 }
a35a3455
MW
286 *(int *)(page->vaddr + offset) = next;
287 offset = next;
288 } while (offset < pool->allocation);
289}
290
e87aa773 291static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
1da177e4 292{
e87aa773 293 struct dma_page *page;
1da177e4 294
a35a3455 295 page = kmalloc(sizeof(*page), mem_flags);
1da177e4
LT
296 if (!page)
297 return NULL;
5407df10 298
a35a3455 299 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
e87aa773 300 &page->dma, mem_flags);
5407df10 301 if (!page->vaddr) {
e87aa773 302 kfree(page);
5407df10 303 return NULL;
1da177e4 304 }
5407df10 305
5407df10 306 pool_initialise_page(pool, page);
1da177e4
LT
307 return page;
308}
309
d9e7e37b 310static inline bool is_page_busy(struct dma_page *page)
1da177e4 311{
a35a3455 312 return page->in_use != 0;
1da177e4
LT
313}
314
e87aa773 315static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
1da177e4 316{
e87aa773 317 dma_addr_t dma = page->dma;
1da177e4 318
d93e08b7 319 pool_init_page(pool, page);
e87aa773
MW
320 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
321 list_del(&page->page_list);
322 kfree(page);
1da177e4
LT
323}
324
1da177e4
LT
325/**
326 * dma_pool_destroy - destroys a pool of dma memory blocks.
327 * @pool: dma pool that will be destroyed
328 * Context: !in_interrupt()
329 *
330 * Caller guarantees that no more memory from the pool is in use,
331 * and that nothing will try to use the pool after this call.
332 */
e87aa773 333void dma_pool_destroy(struct dma_pool *pool)
1da177e4 334{
42286f83 335 struct dma_page *page, *tmp;
01c2965f
SAS
336 bool empty = false;
337
44d7175d
SS
338 if (unlikely(!pool))
339 return;
340
01c2965f 341 mutex_lock(&pools_reg_lock);
b2366d68 342 mutex_lock(&pools_lock);
e87aa773 343 list_del(&pool->pools);
67a540c6 344 if (list_empty(&pool->dev->dma_pools))
01c2965f 345 empty = true;
b2366d68 346 mutex_unlock(&pools_lock);
01c2965f
SAS
347 if (empty)
348 device_remove_file(pool->dev, &dev_attr_pools);
349 mutex_unlock(&pools_reg_lock);
1da177e4 350
42286f83 351 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
a35a3455 352 if (is_page_busy(page)) {
67a540c6
TB
353 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
354 pool->name, page->vaddr);
1da177e4 355 /* leak the still-in-use consistent memory */
e87aa773
MW
356 list_del(&page->page_list);
357 kfree(page);
1da177e4 358 } else
e87aa773 359 pool_free_page(pool, page);
1da177e4
LT
360 }
361
e87aa773 362 kfree(pool);
1da177e4 363}
e87aa773 364EXPORT_SYMBOL(dma_pool_destroy);
1da177e4
LT
365
366/**
367 * dma_pool_alloc - get a block of consistent memory
368 * @pool: dma pool that will produce the block
369 * @mem_flags: GFP_* bitmask
370 * @handle: pointer to dma address of block
371 *
a862f68a 372 * Return: the kernel virtual address of a currently unused block,
1da177e4 373 * and reports its dma address through the handle.
6182a094 374 * If such a memory block can't be allocated, %NULL is returned.
1da177e4 375 */
e87aa773
MW
376void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
377 dma_addr_t *handle)
1da177e4 378{
e87aa773
MW
379 unsigned long flags;
380 struct dma_page *page;
79023352 381 unsigned int offset;
e87aa773
MW
382 void *retval;
383
0f2f89b6 384 might_alloc(mem_flags);
ea05c844 385
e87aa773 386 spin_lock_irqsave(&pool->lock, flags);
1da177e4 387 list_for_each_entry(page, &pool->page_list, page_list) {
a35a3455
MW
388 if (page->offset < pool->allocation)
389 goto ready;
1da177e4 390 }
1da177e4 391
387870f2
MS
392 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
393 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 394
fa23f56d 395 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
387870f2
MS
396 if (!page)
397 return NULL;
1da177e4 398
387870f2 399 spin_lock_irqsave(&pool->lock, flags);
1da177e4 400
387870f2 401 list_add(&page->page_list, &pool->page_list);
e87aa773 402 ready:
1da177e4 403 page->in_use++;
a35a3455
MW
404 offset = page->offset;
405 page->offset = *(int *)(page->vaddr + offset);
1da177e4
LT
406 retval = offset + page->vaddr;
407 *handle = offset + page->dma;
d93e08b7 408 pool_check_block(pool, retval, offset, mem_flags);
e87aa773 409 spin_unlock_irqrestore(&pool->lock, flags);
fa23f56d 410
6471384a 411 if (want_init_on_alloc(mem_flags))
fa23f56d
SS
412 memset(retval, 0, pool->size);
413
1da177e4
LT
414 return retval;
415}
e87aa773 416EXPORT_SYMBOL(dma_pool_alloc);
1da177e4 417
e87aa773 418static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
1da177e4 419{
e87aa773 420 struct dma_page *page;
1da177e4 421
1da177e4
LT
422 list_for_each_entry(page, &pool->page_list, page_list) {
423 if (dma < page->dma)
424 continue;
676bd991 425 if ((dma - page->dma) < pool->allocation)
84bc227d 426 return page;
1da177e4 427 }
84bc227d 428 return NULL;
1da177e4
LT
429}
430
1da177e4
LT
431/**
432 * dma_pool_free - put block back into dma pool
433 * @pool: the dma pool holding the block
434 * @vaddr: virtual address of block
435 * @dma: dma address of block
436 *
437 * Caller promises neither device nor driver will again touch this block
438 * unless it is first re-allocated.
439 */
e87aa773 440void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
1da177e4 441{
e87aa773
MW
442 struct dma_page *page;
443 unsigned long flags;
1da177e4 444
84bc227d 445 spin_lock_irqsave(&pool->lock, flags);
e87aa773
MW
446 page = pool_find_page(pool, dma);
447 if (!page) {
84bc227d 448 spin_unlock_irqrestore(&pool->lock, flags);
67a540c6
TB
449 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
450 __func__, pool->name, vaddr, &dma);
1da177e4
LT
451 return;
452 }
453
6471384a
AP
454 if (want_init_on_free())
455 memset(vaddr, 0, pool->size);
d93e08b7 456 if (pool_page_err(pool, page, vaddr, dma)) {
84bc227d 457 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4
LT
458 return;
459 }
1da177e4 460
1da177e4 461 page->in_use--;
a35a3455 462 *(int *)vaddr = page->offset;
d93e08b7 463 page->offset = vaddr - page->vaddr;
1da177e4
LT
464 /*
465 * Resist a temptation to do
a35a3455 466 * if (!is_page_busy(page)) pool_free_page(pool, page);
1da177e4
LT
467 * Better have a few empty pages hang around.
468 */
e87aa773 469 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 470}
e87aa773 471EXPORT_SYMBOL(dma_pool_free);
1da177e4 472
9ac7849e
TH
473/*
474 * Managed DMA pool
475 */
476static void dmam_pool_release(struct device *dev, void *res)
477{
478 struct dma_pool *pool = *(struct dma_pool **)res;
479
480 dma_pool_destroy(pool);
481}
482
483static int dmam_pool_match(struct device *dev, void *res, void *match_data)
484{
485 return *(struct dma_pool **)res == match_data;
486}
487
488/**
489 * dmam_pool_create - Managed dma_pool_create()
490 * @name: name of pool, for diagnostics
491 * @dev: device that will be doing the DMA
492 * @size: size of the blocks in this pool.
493 * @align: alignment requirement for blocks; must be a power of two
494 * @allocation: returned blocks won't cross this boundary (or zero)
495 *
496 * Managed dma_pool_create(). DMA pool created with this function is
497 * automatically destroyed on driver detach.
a862f68a
MR
498 *
499 * Return: a managed dma allocation pool with the requested
500 * characteristics, or %NULL if one can't be created.
9ac7849e
TH
501 */
502struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
503 size_t size, size_t align, size_t allocation)
504{
505 struct dma_pool **ptr, *pool;
506
507 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
508 if (!ptr)
509 return NULL;
510
511 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
512 if (pool)
513 devres_add(dev, ptr);
514 else
515 devres_free(ptr);
516
517 return pool;
518}
e87aa773 519EXPORT_SYMBOL(dmam_pool_create);
9ac7849e
TH
520
521/**
522 * dmam_pool_destroy - Managed dma_pool_destroy()
523 * @pool: dma pool that will be destroyed
524 *
525 * Managed dma_pool_destroy().
526 */
527void dmam_pool_destroy(struct dma_pool *pool)
528{
529 struct device *dev = pool->dev;
530
172cb4b3 531 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
9ac7849e 532}
e87aa773 533EXPORT_SYMBOL(dmam_pool_destroy);