Commit | Line | Data |
---|---|---|
8607a965 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
c64be2bb MS |
2 | #ifndef __LINUX_CMA_H |
3 | #define __LINUX_CMA_H | |
4 | ||
5 | /* | |
6 | * Contiguous Memory Allocator for DMA mapping framework | |
7 | * Copyright (c) 2010-2011 by Samsung Electronics. | |
8 | * Written by: | |
9 | * Marek Szyprowski <m.szyprowski@samsung.com> | |
10 | * Michal Nazarewicz <mina86@mina86.com> | |
c64be2bb MS |
11 | */ |
12 | ||
13 | /* | |
14 | * Contiguous Memory Allocator | |
15 | * | |
16 | * The Contiguous Memory Allocator (CMA) makes it possible to | |
17 | * allocate big contiguous chunks of memory after the system has | |
18 | * booted. | |
19 | * | |
20 | * Why is it needed? | |
21 | * | |
22 | * Various devices on embedded systems have no scatter-getter and/or | |
23 | * IO map support and require contiguous blocks of memory to | |
24 | * operate. They include devices such as cameras, hardware video | |
25 | * coders, etc. | |
26 | * | |
27 | * Such devices often require big memory buffers (a full HD frame | |
28 | * is, for instance, more then 2 mega pixels large, i.e. more than 6 | |
29 | * MB of memory), which makes mechanisms such as kmalloc() or | |
30 | * alloc_page() ineffective. | |
31 | * | |
32 | * At the same time, a solution where a big memory region is | |
33 | * reserved for a device is suboptimal since often more memory is | |
34 | * reserved then strictly required and, moreover, the memory is | |
35 | * inaccessible to page system even if device drivers don't use it. | |
36 | * | |
37 | * CMA tries to solve this issue by operating on memory regions | |
38 | * where only movable pages can be allocated from. This way, kernel | |
39 | * can use the memory for pagecache and when device driver requests | |
40 | * it, allocated pages can be migrated. | |
41 | * | |
42 | * Driver usage | |
43 | * | |
44 | * CMA should not be used by the device drivers directly. It is | |
45 | * only a helper framework for dma-mapping subsystem. | |
46 | * | |
cf65a0f6 | 47 | * For more information, see kernel-docs in kernel/dma/contiguous.c |
c64be2bb MS |
48 | */ |
49 | ||
50 | #ifdef __KERNEL__ | |
51 | ||
a254129e JK |
52 | #include <linux/device.h> |
53 | ||
c64be2bb MS |
54 | struct cma; |
55 | struct page; | |
c64be2bb | 56 | |
f825c736 | 57 | #ifdef CONFIG_DMA_CMA |
c64be2bb | 58 | |
c64be2bb MS |
59 | extern struct cma *dma_contiguous_default_area; |
60 | ||
a2547380 MS |
61 | static inline struct cma *dev_get_cma_area(struct device *dev) |
62 | { | |
63 | if (dev && dev->cma_area) | |
64 | return dev->cma_area; | |
65 | return dma_contiguous_default_area; | |
66 | } | |
67 | ||
68 | static inline void dev_set_cma_area(struct device *dev, struct cma *cma) | |
69 | { | |
70 | if (dev) | |
71 | dev->cma_area = cma; | |
72 | } | |
73 | ||
74 | static inline void dma_contiguous_set_default(struct cma *cma) | |
75 | { | |
76 | dma_contiguous_default_area = cma; | |
77 | } | |
78 | ||
c64be2bb | 79 | void dma_contiguous_reserve(phys_addr_t addr_limit); |
a2547380 MS |
80 | |
81 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |
5ea3b1b2 AM |
82 | phys_addr_t limit, struct cma **res_cma, |
83 | bool fixed); | |
a2547380 MS |
84 | |
85 | /** | |
86 | * dma_declare_contiguous() - reserve area for contiguous memory handling | |
87 | * for particular device | |
88 | * @dev: Pointer to device structure. | |
89 | * @size: Size of the reserved memory. | |
90 | * @base: Start address of the reserved memory (optional, 0 for any). | |
91 | * @limit: End address of the reserved memory (optional, 0 for any). | |
92 | * | |
93 | * This function reserves memory for specified device. It should be | |
94 | * called by board specific code when early allocator (memblock or bootmem) | |
95 | * is still activate. | |
96 | */ | |
97 | ||
98 | static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |
99 | phys_addr_t base, phys_addr_t limit) | |
100 | { | |
101 | struct cma *cma; | |
102 | int ret; | |
5ea3b1b2 | 103 | ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); |
a2547380 MS |
104 | if (ret == 0) |
105 | dev_set_cma_area(dev, cma); | |
106 | ||
107 | return ret; | |
108 | } | |
c64be2bb | 109 | |
67a2e213 | 110 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
d834c5ab | 111 | unsigned int order, bool no_warn); |
c64be2bb MS |
112 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
113 | int count); | |
114 | ||
115 | #else | |
116 | ||
a2547380 MS |
117 | static inline struct cma *dev_get_cma_area(struct device *dev) |
118 | { | |
119 | return NULL; | |
120 | } | |
121 | ||
122 | static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } | |
123 | ||
124 | static inline void dma_contiguous_set_default(struct cma *cma) { } | |
125 | ||
c64be2bb MS |
126 | static inline void dma_contiguous_reserve(phys_addr_t limit) { } |
127 | ||
a2547380 | 128 | static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
5ea3b1b2 AM |
129 | phys_addr_t limit, struct cma **res_cma, |
130 | bool fixed) | |
131 | { | |
a2547380 MS |
132 | return -ENOSYS; |
133 | } | |
134 | ||
c64be2bb | 135 | static inline |
4009793e | 136 | int dma_declare_contiguous(struct device *dev, phys_addr_t size, |
c64be2bb MS |
137 | phys_addr_t base, phys_addr_t limit) |
138 | { | |
139 | return -ENOSYS; | |
140 | } | |
141 | ||
142 | static inline | |
67a2e213 | 143 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
d834c5ab | 144 | unsigned int order, bool no_warn) |
c64be2bb MS |
145 | { |
146 | return NULL; | |
147 | } | |
148 | ||
149 | static inline | |
150 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | |
151 | int count) | |
152 | { | |
153 | return false; | |
154 | } | |
155 | ||
156 | #endif | |
157 | ||
158 | #endif | |
159 | ||
160 | #endif |