Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f4a18312 | 2 | #include <linux/err.h> |
5ea81769 AV |
3 | #include <linux/pci.h> |
4 | #include <linux/io.h> | |
5a0e3ad6 | 5 | #include <linux/gfp.h> |
8bc3bcc9 | 6 | #include <linux/export.h> |
d5e83827 | 7 | #include <linux/of_address.h> |
5ea81769 | 8 | |
1b723413 YX |
9 | enum devm_ioremap_type { |
10 | DEVM_IOREMAP = 0, | |
e537654b | 11 | DEVM_IOREMAP_UC, |
1b723413 YX |
12 | DEVM_IOREMAP_WC, |
13 | }; | |
14 | ||
b41e5fff | 15 | void devm_ioremap_release(struct device *dev, void *res) |
5ea81769 AV |
16 | { |
17 | iounmap(*(void __iomem **)res); | |
18 | } | |
19 | ||
20 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | |
21 | { | |
22 | return *(void **)res == match_data; | |
23 | } | |
24 | ||
1b723413 YX |
25 | static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, |
26 | resource_size_t size, | |
27 | enum devm_ioremap_type type) | |
5ea81769 | 28 | { |
1b723413 | 29 | void __iomem **ptr, *addr = NULL; |
5ea81769 AV |
30 | |
31 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | |
32 | if (!ptr) | |
33 | return NULL; | |
34 | ||
1b723413 YX |
35 | switch (type) { |
36 | case DEVM_IOREMAP: | |
37 | addr = ioremap(offset, size); | |
38 | break; | |
e537654b TZ |
39 | case DEVM_IOREMAP_UC: |
40 | addr = ioremap_uc(offset, size); | |
41 | break; | |
1b723413 YX |
42 | case DEVM_IOREMAP_WC: |
43 | addr = ioremap_wc(offset, size); | |
44 | break; | |
45 | } | |
46 | ||
5ea81769 AV |
47 | if (addr) { |
48 | *ptr = addr; | |
49 | devres_add(dev, ptr); | |
50 | } else | |
51 | devres_free(ptr); | |
52 | ||
53 | return addr; | |
54 | } | |
1b723413 YX |
55 | |
56 | /** | |
57 | * devm_ioremap - Managed ioremap() | |
58 | * @dev: Generic device to remap IO address for | |
59 | * @offset: Resource address to map | |
60 | * @size: Size of map | |
61 | * | |
62 | * Managed ioremap(). Map is automatically unmapped on driver detach. | |
63 | */ | |
64 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | |
65 | resource_size_t size) | |
66 | { | |
67 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); | |
68 | } | |
5ea81769 AV |
69 | EXPORT_SYMBOL(devm_ioremap); |
70 | ||
e537654b TZ |
71 | /** |
72 | * devm_ioremap_uc - Managed ioremap_uc() | |
73 | * @dev: Generic device to remap IO address for | |
74 | * @offset: Resource address to map | |
75 | * @size: Size of map | |
76 | * | |
77 | * Managed ioremap_uc(). Map is automatically unmapped on driver detach. | |
78 | */ | |
79 | void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, | |
80 | resource_size_t size) | |
81 | { | |
82 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); | |
83 | } | |
84 | EXPORT_SYMBOL_GPL(devm_ioremap_uc); | |
85 | ||
34644524 AK |
86 | /** |
87 | * devm_ioremap_wc - Managed ioremap_wc() | |
88 | * @dev: Generic device to remap IO address for | |
6524754e | 89 | * @offset: Resource address to map |
34644524 AK |
90 | * @size: Size of map |
91 | * | |
92 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. | |
93 | */ | |
94 | void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, | |
95 | resource_size_t size) | |
96 | { | |
1b723413 | 97 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC); |
34644524 AK |
98 | } |
99 | EXPORT_SYMBOL(devm_ioremap_wc); | |
100 | ||
5ea81769 AV |
101 | /** |
102 | * devm_iounmap - Managed iounmap() | |
103 | * @dev: Generic device to unmap for | |
104 | * @addr: Address to unmap | |
105 | * | |
106 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). | |
107 | */ | |
108 | void devm_iounmap(struct device *dev, void __iomem *addr) | |
109 | { | |
5ea81769 | 110 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
b104d6a5 | 111 | (__force void *)addr)); |
ae891a1b | 112 | iounmap(addr); |
5ea81769 AV |
113 | } |
114 | EXPORT_SYMBOL(devm_iounmap); | |
115 | ||
6e924822 BG |
116 | static void __iomem * |
117 | __devm_ioremap_resource(struct device *dev, const struct resource *res, | |
118 | enum devm_ioremap_type type) | |
72f8c0bf WS |
119 | { |
120 | resource_size_t size; | |
72f8c0bf | 121 | void __iomem *dest_ptr; |
35bd8c07 | 122 | char *pretty_name; |
72f8c0bf WS |
123 | |
124 | BUG_ON(!dev); | |
125 | ||
126 | if (!res || resource_type(res) != IORESOURCE_MEM) { | |
127 | dev_err(dev, "invalid resource\n"); | |
b104d6a5 | 128 | return IOMEM_ERR_PTR(-EINVAL); |
72f8c0bf WS |
129 | } |
130 | ||
131 | size = resource_size(res); | |
72f8c0bf | 132 | |
35bd8c07 VO |
133 | if (res->name) |
134 | pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", | |
135 | dev_name(dev), res->name); | |
136 | else | |
137 | pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); | |
138 | if (!pretty_name) | |
139 | return IOMEM_ERR_PTR(-ENOMEM); | |
140 | ||
141 | if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { | |
72f8c0bf | 142 | dev_err(dev, "can't request region for resource %pR\n", res); |
b104d6a5 | 143 | return IOMEM_ERR_PTR(-EBUSY); |
72f8c0bf WS |
144 | } |
145 | ||
6e924822 | 146 | dest_ptr = __devm_ioremap(dev, res->start, size, type); |
72f8c0bf WS |
147 | if (!dest_ptr) { |
148 | dev_err(dev, "ioremap failed for resource %pR\n", res); | |
149 | devm_release_mem_region(dev, res->start, size); | |
b104d6a5 | 150 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
72f8c0bf WS |
151 | } |
152 | ||
153 | return dest_ptr; | |
154 | } | |
6e924822 BG |
155 | |
156 | /** | |
157 | * devm_ioremap_resource() - check, request region, and ioremap resource | |
158 | * @dev: generic device to handle the resource for | |
159 | * @res: resource to be handled | |
160 | * | |
161 | * Checks that a resource is a valid memory region, requests the memory | |
162 | * region and ioremaps it. All operations are managed and will be undone | |
163 | * on driver detach. | |
164 | * | |
0c7a6b91 | 165 | * Usage example: |
6e924822 BG |
166 | * |
167 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
168 | * base = devm_ioremap_resource(&pdev->dev, res); | |
169 | * if (IS_ERR(base)) | |
170 | * return PTR_ERR(base); | |
0c7a6b91 SB |
171 | * |
172 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code | |
173 | * on failure. | |
6e924822 BG |
174 | */ |
175 | void __iomem *devm_ioremap_resource(struct device *dev, | |
176 | const struct resource *res) | |
177 | { | |
178 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP); | |
179 | } | |
75096579 TR |
180 | EXPORT_SYMBOL(devm_ioremap_resource); |
181 | ||
b873af62 BG |
182 | /** |
183 | * devm_ioremap_resource_wc() - write-combined variant of | |
184 | * devm_ioremap_resource() | |
185 | * @dev: generic device to handle the resource for | |
186 | * @res: resource to be handled | |
187 | * | |
0c7a6b91 SB |
188 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
189 | * on failure. | |
b873af62 BG |
190 | */ |
191 | void __iomem *devm_ioremap_resource_wc(struct device *dev, | |
192 | const struct resource *res) | |
193 | { | |
194 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC); | |
195 | } | |
196 | ||
d5e83827 BH |
197 | /* |
198 | * devm_of_iomap - Requests a resource and maps the memory mapped IO | |
199 | * for a given device_node managed by a given device | |
200 | * | |
201 | * Checks that a resource is a valid memory region, requests the memory | |
202 | * region and ioremaps it. All operations are managed and will be undone | |
203 | * on driver detach of the device. | |
204 | * | |
205 | * This is to be used when a device requests/maps resources described | |
206 | * by other device tree nodes (children or otherwise). | |
207 | * | |
208 | * @dev: The device "managing" the resource | |
209 | * @node: The device-tree node where the resource resides | |
210 | * @index: index of the MMIO range in the "reg" property | |
211 | * @size: Returns the size of the resource (pass NULL if not needed) | |
0c7a6b91 SB |
212 | * |
213 | * Usage example: | |
d5e83827 BH |
214 | * |
215 | * base = devm_of_iomap(&pdev->dev, node, 0, NULL); | |
216 | * if (IS_ERR(base)) | |
217 | * return PTR_ERR(base); | |
7ae731a8 DC |
218 | * |
219 | * Please Note: This is not a one-to-one replacement for of_iomap() because the | |
220 | * of_iomap() function does not track whether the region is already mapped. If | |
221 | * two drivers try to map the same memory, the of_iomap() function will succeed | |
28d9fdf0 | 222 | * but the devm_of_iomap() function will return -EBUSY. |
7ae731a8 | 223 | * |
0c7a6b91 SB |
224 | * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded |
225 | * error code on failure. | |
d5e83827 BH |
226 | */ |
227 | void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, | |
228 | resource_size_t *size) | |
229 | { | |
230 | struct resource res; | |
231 | ||
232 | if (of_address_to_resource(node, index, &res)) | |
233 | return IOMEM_ERR_PTR(-EINVAL); | |
234 | if (size) | |
235 | *size = resource_size(&res); | |
236 | return devm_ioremap_resource(dev, &res); | |
237 | } | |
238 | EXPORT_SYMBOL(devm_of_iomap); | |
239 | ||
ce816fa8 | 240 | #ifdef CONFIG_HAS_IOPORT_MAP |
5ea81769 AV |
241 | /* |
242 | * Generic iomap devres | |
243 | */ | |
244 | static void devm_ioport_map_release(struct device *dev, void *res) | |
245 | { | |
246 | ioport_unmap(*(void __iomem **)res); | |
247 | } | |
248 | ||
249 | static int devm_ioport_map_match(struct device *dev, void *res, | |
250 | void *match_data) | |
251 | { | |
252 | return *(void **)res == match_data; | |
253 | } | |
254 | ||
255 | /** | |
256 | * devm_ioport_map - Managed ioport_map() | |
257 | * @dev: Generic device to map ioport for | |
258 | * @port: Port to map | |
259 | * @nr: Number of ports to map | |
260 | * | |
261 | * Managed ioport_map(). Map is automatically unmapped on driver | |
262 | * detach. | |
0c7a6b91 SB |
263 | * |
264 | * Return: a pointer to the remapped memory or NULL on failure. | |
5ea81769 | 265 | */ |
5cbb00cc | 266 | void __iomem *devm_ioport_map(struct device *dev, unsigned long port, |
5ea81769 AV |
267 | unsigned int nr) |
268 | { | |
269 | void __iomem **ptr, *addr; | |
270 | ||
271 | ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); | |
272 | if (!ptr) | |
273 | return NULL; | |
274 | ||
275 | addr = ioport_map(port, nr); | |
276 | if (addr) { | |
277 | *ptr = addr; | |
278 | devres_add(dev, ptr); | |
279 | } else | |
280 | devres_free(ptr); | |
281 | ||
282 | return addr; | |
283 | } | |
284 | EXPORT_SYMBOL(devm_ioport_map); | |
285 | ||
286 | /** | |
287 | * devm_ioport_unmap - Managed ioport_unmap() | |
288 | * @dev: Generic device to unmap for | |
289 | * @addr: Address to unmap | |
290 | * | |
291 | * Managed ioport_unmap(). @addr must have been mapped using | |
292 | * devm_ioport_map(). | |
293 | */ | |
294 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |
295 | { | |
296 | ioport_unmap(addr); | |
297 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | |
b104d6a5 | 298 | devm_ioport_map_match, (__force void *)addr)); |
5ea81769 AV |
299 | } |
300 | EXPORT_SYMBOL(devm_ioport_unmap); | |
ce816fa8 | 301 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
5ea81769 AV |
302 | |
303 | #ifdef CONFIG_PCI | |
304 | /* | |
305 | * PCI iomap devres | |
306 | */ | |
c9c13ba4 | 307 | #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS |
5ea81769 AV |
308 | |
309 | struct pcim_iomap_devres { | |
310 | void __iomem *table[PCIM_IOMAP_MAX]; | |
311 | }; | |
312 | ||
313 | static void pcim_iomap_release(struct device *gendev, void *res) | |
314 | { | |
20af74ef | 315 | struct pci_dev *dev = to_pci_dev(gendev); |
5ea81769 AV |
316 | struct pcim_iomap_devres *this = res; |
317 | int i; | |
318 | ||
319 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | |
320 | if (this->table[i]) | |
321 | pci_iounmap(dev, this->table[i]); | |
322 | } | |
323 | ||
324 | /** | |
325 | * pcim_iomap_table - access iomap allocation table | |
326 | * @pdev: PCI device to access iomap table for | |
327 | * | |
328 | * Access iomap allocation table for @dev. If iomap table doesn't | |
329 | * exist and @pdev is managed, it will be allocated. All iomaps | |
330 | * recorded in the iomap table are automatically unmapped on driver | |
331 | * detach. | |
332 | * | |
333 | * This function might sleep when the table is first allocated but can | |
334 | * be safely called without context and guaranteed to succed once | |
335 | * allocated. | |
336 | */ | |
5cbb00cc | 337 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) |
5ea81769 AV |
338 | { |
339 | struct pcim_iomap_devres *dr, *new_dr; | |
340 | ||
341 | dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); | |
342 | if (dr) | |
343 | return dr->table; | |
344 | ||
345 | new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); | |
346 | if (!new_dr) | |
347 | return NULL; | |
348 | dr = devres_get(&pdev->dev, new_dr, NULL, NULL); | |
349 | return dr->table; | |
350 | } | |
351 | EXPORT_SYMBOL(pcim_iomap_table); | |
352 | ||
353 | /** | |
354 | * pcim_iomap - Managed pcim_iomap() | |
355 | * @pdev: PCI device to iomap for | |
356 | * @bar: BAR to iomap | |
357 | * @maxlen: Maximum length of iomap | |
358 | * | |
359 | * Managed pci_iomap(). Map is automatically unmapped on driver | |
360 | * detach. | |
361 | */ | |
5cbb00cc | 362 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) |
5ea81769 AV |
363 | { |
364 | void __iomem **tbl; | |
365 | ||
366 | BUG_ON(bar >= PCIM_IOMAP_MAX); | |
367 | ||
368 | tbl = (void __iomem **)pcim_iomap_table(pdev); | |
369 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ | |
370 | return NULL; | |
371 | ||
372 | tbl[bar] = pci_iomap(pdev, bar, maxlen); | |
373 | return tbl[bar]; | |
374 | } | |
375 | EXPORT_SYMBOL(pcim_iomap); | |
376 | ||
377 | /** | |
378 | * pcim_iounmap - Managed pci_iounmap() | |
379 | * @pdev: PCI device to iounmap for | |
380 | * @addr: Address to unmap | |
381 | * | |
382 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). | |
383 | */ | |
384 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) | |
385 | { | |
386 | void __iomem **tbl; | |
387 | int i; | |
388 | ||
389 | pci_iounmap(pdev, addr); | |
390 | ||
391 | tbl = (void __iomem **)pcim_iomap_table(pdev); | |
392 | BUG_ON(!tbl); | |
393 | ||
394 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | |
395 | if (tbl[i] == addr) { | |
396 | tbl[i] = NULL; | |
397 | return; | |
398 | } | |
399 | WARN_ON(1); | |
400 | } | |
401 | EXPORT_SYMBOL(pcim_iounmap); | |
402 | ||
403 | /** | |
404 | * pcim_iomap_regions - Request and iomap PCI BARs | |
405 | * @pdev: PCI device to map IO resources for | |
406 | * @mask: Mask of BARs to request and iomap | |
407 | * @name: Name used when requesting regions | |
408 | * | |
409 | * Request and iomap regions specified by @mask. | |
410 | */ | |
fb7ebfe4 | 411 | int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
5ea81769 AV |
412 | { |
413 | void __iomem * const *iomap; | |
414 | int i, rc; | |
415 | ||
416 | iomap = pcim_iomap_table(pdev); | |
417 | if (!iomap) | |
418 | return -ENOMEM; | |
419 | ||
420 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
421 | unsigned long len; | |
422 | ||
423 | if (!(mask & (1 << i))) | |
424 | continue; | |
425 | ||
426 | rc = -EINVAL; | |
427 | len = pci_resource_len(pdev, i); | |
428 | if (!len) | |
429 | goto err_inval; | |
430 | ||
431 | rc = pci_request_region(pdev, i, name); | |
432 | if (rc) | |
fb4d64e7 | 433 | goto err_inval; |
5ea81769 AV |
434 | |
435 | rc = -ENOMEM; | |
436 | if (!pcim_iomap(pdev, i, 0)) | |
fb4d64e7 | 437 | goto err_region; |
5ea81769 AV |
438 | } |
439 | ||
440 | return 0; | |
441 | ||
5ea81769 AV |
442 | err_region: |
443 | pci_release_region(pdev, i); | |
444 | err_inval: | |
445 | while (--i >= 0) { | |
fb4d64e7 FD |
446 | if (!(mask & (1 << i))) |
447 | continue; | |
5ea81769 AV |
448 | pcim_iounmap(pdev, iomap[i]); |
449 | pci_release_region(pdev, i); | |
450 | } | |
451 | ||
452 | return rc; | |
453 | } | |
454 | EXPORT_SYMBOL(pcim_iomap_regions); | |
ec04b075 | 455 | |
916fbfb7 TH |
456 | /** |
457 | * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones | |
458 | * @pdev: PCI device to map IO resources for | |
459 | * @mask: Mask of BARs to iomap | |
460 | * @name: Name used when requesting regions | |
461 | * | |
462 | * Request all PCI BARs and iomap regions specified by @mask. | |
463 | */ | |
fb7ebfe4 | 464 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
916fbfb7 TH |
465 | const char *name) |
466 | { | |
467 | int request_mask = ((1 << 6) - 1) & ~mask; | |
468 | int rc; | |
469 | ||
470 | rc = pci_request_selected_regions(pdev, request_mask, name); | |
471 | if (rc) | |
472 | return rc; | |
473 | ||
474 | rc = pcim_iomap_regions(pdev, mask, name); | |
475 | if (rc) | |
476 | pci_release_selected_regions(pdev, request_mask); | |
477 | return rc; | |
478 | } | |
479 | EXPORT_SYMBOL(pcim_iomap_regions_request_all); | |
480 | ||
ec04b075 TH |
481 | /** |
482 | * pcim_iounmap_regions - Unmap and release PCI BARs | |
483 | * @pdev: PCI device to map IO resources for | |
484 | * @mask: Mask of BARs to unmap and release | |
485 | * | |
4d45ada3 | 486 | * Unmap and release regions specified by @mask. |
ec04b075 | 487 | */ |
fb7ebfe4 | 488 | void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
ec04b075 TH |
489 | { |
490 | void __iomem * const *iomap; | |
491 | int i; | |
492 | ||
493 | iomap = pcim_iomap_table(pdev); | |
494 | if (!iomap) | |
495 | return; | |
496 | ||
1f35d04a | 497 | for (i = 0; i < PCIM_IOMAP_MAX; i++) { |
ec04b075 TH |
498 | if (!(mask & (1 << i))) |
499 | continue; | |
500 | ||
501 | pcim_iounmap(pdev, iomap[i]); | |
502 | pci_release_region(pdev, i); | |
503 | } | |
504 | } | |
505 | EXPORT_SYMBOL(pcim_iounmap_regions); | |
571806a9 | 506 | #endif /* CONFIG_PCI */ |