| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/err.h> |
| 3 | #include <linux/pci.h> |
| 4 | #include <linux/io.h> |
| 5 | #include <linux/gfp.h> |
| 6 | #include <linux/export.h> |
| 7 | #include <linux/of_address.h> |
| 8 | |
| 9 | enum devm_ioremap_type { |
| 10 | DEVM_IOREMAP = 0, |
| 11 | DEVM_IOREMAP_UC, |
| 12 | DEVM_IOREMAP_WC, |
| 13 | DEVM_IOREMAP_NP, |
| 14 | }; |
| 15 | |
| 16 | void devm_ioremap_release(struct device *dev, void *res) |
| 17 | { |
| 18 | iounmap(*(void __iomem **)res); |
| 19 | } |
| 20 | |
| 21 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) |
| 22 | { |
| 23 | return *(void **)res == match_data; |
| 24 | } |
| 25 | |
| 26 | static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, |
| 27 | resource_size_t size, |
| 28 | enum devm_ioremap_type type) |
| 29 | { |
| 30 | void __iomem **ptr, *addr = NULL; |
| 31 | |
| 32 | ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL, |
| 33 | dev_to_node(dev)); |
| 34 | if (!ptr) |
| 35 | return NULL; |
| 36 | |
| 37 | switch (type) { |
| 38 | case DEVM_IOREMAP: |
| 39 | addr = ioremap(offset, size); |
| 40 | break; |
| 41 | case DEVM_IOREMAP_UC: |
| 42 | addr = ioremap_uc(offset, size); |
| 43 | break; |
| 44 | case DEVM_IOREMAP_WC: |
| 45 | addr = ioremap_wc(offset, size); |
| 46 | break; |
| 47 | case DEVM_IOREMAP_NP: |
| 48 | addr = ioremap_np(offset, size); |
| 49 | break; |
| 50 | } |
| 51 | |
| 52 | if (addr) { |
| 53 | *ptr = addr; |
| 54 | devres_add(dev, ptr); |
| 55 | } else |
| 56 | devres_free(ptr); |
| 57 | |
| 58 | return addr; |
| 59 | } |
| 60 | |
| 61 | /** |
| 62 | * devm_ioremap - Managed ioremap() |
| 63 | * @dev: Generic device to remap IO address for |
| 64 | * @offset: Resource address to map |
| 65 | * @size: Size of map |
| 66 | * |
| 67 | * Managed ioremap(). Map is automatically unmapped on driver detach. |
| 68 | */ |
| 69 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
| 70 | resource_size_t size) |
| 71 | { |
| 72 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); |
| 73 | } |
| 74 | EXPORT_SYMBOL(devm_ioremap); |
| 75 | |
| 76 | /** |
| 77 | * devm_ioremap_uc - Managed ioremap_uc() |
| 78 | * @dev: Generic device to remap IO address for |
| 79 | * @offset: Resource address to map |
| 80 | * @size: Size of map |
| 81 | * |
| 82 | * Managed ioremap_uc(). Map is automatically unmapped on driver detach. |
| 83 | */ |
| 84 | void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, |
| 85 | resource_size_t size) |
| 86 | { |
| 87 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); |
| 88 | } |
| 89 | EXPORT_SYMBOL_GPL(devm_ioremap_uc); |
| 90 | |
| 91 | /** |
| 92 | * devm_ioremap_wc - Managed ioremap_wc() |
| 93 | * @dev: Generic device to remap IO address for |
| 94 | * @offset: Resource address to map |
| 95 | * @size: Size of map |
| 96 | * |
| 97 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. |
| 98 | */ |
| 99 | void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, |
| 100 | resource_size_t size) |
| 101 | { |
| 102 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC); |
| 103 | } |
| 104 | EXPORT_SYMBOL(devm_ioremap_wc); |
| 105 | |
| 106 | /** |
| 107 | * devm_iounmap - Managed iounmap() |
| 108 | * @dev: Generic device to unmap for |
| 109 | * @addr: Address to unmap |
| 110 | * |
| 111 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). |
| 112 | */ |
| 113 | void devm_iounmap(struct device *dev, void __iomem *addr) |
| 114 | { |
| 115 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
| 116 | (__force void *)addr)); |
| 117 | iounmap(addr); |
| 118 | } |
| 119 | EXPORT_SYMBOL(devm_iounmap); |
| 120 | |
| 121 | static void __iomem * |
| 122 | __devm_ioremap_resource(struct device *dev, const struct resource *res, |
| 123 | enum devm_ioremap_type type) |
| 124 | { |
| 125 | resource_size_t size; |
| 126 | void __iomem *dest_ptr; |
| 127 | char *pretty_name; |
| 128 | |
| 129 | BUG_ON(!dev); |
| 130 | |
| 131 | if (!res || resource_type(res) != IORESOURCE_MEM) { |
| 132 | dev_err(dev, "invalid resource\n"); |
| 133 | return IOMEM_ERR_PTR(-EINVAL); |
| 134 | } |
| 135 | |
| 136 | if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) |
| 137 | type = DEVM_IOREMAP_NP; |
| 138 | |
| 139 | size = resource_size(res); |
| 140 | |
| 141 | if (res->name) |
| 142 | pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", |
| 143 | dev_name(dev), res->name); |
| 144 | else |
| 145 | pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); |
| 146 | if (!pretty_name) { |
| 147 | dev_err(dev, "can't generate pretty name for resource %pR\n", res); |
| 148 | return IOMEM_ERR_PTR(-ENOMEM); |
| 149 | } |
| 150 | |
| 151 | if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { |
| 152 | dev_err(dev, "can't request region for resource %pR\n", res); |
| 153 | return IOMEM_ERR_PTR(-EBUSY); |
| 154 | } |
| 155 | |
| 156 | dest_ptr = __devm_ioremap(dev, res->start, size, type); |
| 157 | if (!dest_ptr) { |
| 158 | dev_err(dev, "ioremap failed for resource %pR\n", res); |
| 159 | devm_release_mem_region(dev, res->start, size); |
| 160 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
| 161 | } |
| 162 | |
| 163 | return dest_ptr; |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | * devm_ioremap_resource() - check, request region, and ioremap resource |
| 168 | * @dev: generic device to handle the resource for |
| 169 | * @res: resource to be handled |
| 170 | * |
| 171 | * Checks that a resource is a valid memory region, requests the memory |
| 172 | * region and ioremaps it. All operations are managed and will be undone |
| 173 | * on driver detach. |
| 174 | * |
| 175 | * Usage example: |
| 176 | * |
| 177 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 178 | * base = devm_ioremap_resource(&pdev->dev, res); |
| 179 | * if (IS_ERR(base)) |
| 180 | * return PTR_ERR(base); |
| 181 | * |
| 182 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
| 183 | * on failure. |
| 184 | */ |
| 185 | void __iomem *devm_ioremap_resource(struct device *dev, |
| 186 | const struct resource *res) |
| 187 | { |
| 188 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP); |
| 189 | } |
| 190 | EXPORT_SYMBOL(devm_ioremap_resource); |
| 191 | |
| 192 | /** |
| 193 | * devm_ioremap_resource_wc() - write-combined variant of |
| 194 | * devm_ioremap_resource() |
| 195 | * @dev: generic device to handle the resource for |
| 196 | * @res: resource to be handled |
| 197 | * |
| 198 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
| 199 | * on failure. |
| 200 | */ |
| 201 | void __iomem *devm_ioremap_resource_wc(struct device *dev, |
| 202 | const struct resource *res) |
| 203 | { |
| 204 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC); |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * devm_of_iomap - Requests a resource and maps the memory mapped IO |
| 209 | * for a given device_node managed by a given device |
| 210 | * |
| 211 | * Checks that a resource is a valid memory region, requests the memory |
| 212 | * region and ioremaps it. All operations are managed and will be undone |
| 213 | * on driver detach of the device. |
| 214 | * |
| 215 | * This is to be used when a device requests/maps resources described |
| 216 | * by other device tree nodes (children or otherwise). |
| 217 | * |
| 218 | * @dev: The device "managing" the resource |
| 219 | * @node: The device-tree node where the resource resides |
| 220 | * @index: index of the MMIO range in the "reg" property |
| 221 | * @size: Returns the size of the resource (pass NULL if not needed) |
| 222 | * |
| 223 | * Usage example: |
| 224 | * |
| 225 | * base = devm_of_iomap(&pdev->dev, node, 0, NULL); |
| 226 | * if (IS_ERR(base)) |
| 227 | * return PTR_ERR(base); |
| 228 | * |
| 229 | * Please Note: This is not a one-to-one replacement for of_iomap() because the |
| 230 | * of_iomap() function does not track whether the region is already mapped. If |
| 231 | * two drivers try to map the same memory, the of_iomap() function will succeed |
| 232 | * but the devm_of_iomap() function will return -EBUSY. |
| 233 | * |
| 234 | * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded |
| 235 | * error code on failure. |
| 236 | */ |
| 237 | void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, |
| 238 | resource_size_t *size) |
| 239 | { |
| 240 | struct resource res; |
| 241 | |
| 242 | if (of_address_to_resource(node, index, &res)) |
| 243 | return IOMEM_ERR_PTR(-EINVAL); |
| 244 | if (size) |
| 245 | *size = resource_size(&res); |
| 246 | return devm_ioremap_resource(dev, &res); |
| 247 | } |
| 248 | EXPORT_SYMBOL(devm_of_iomap); |
| 249 | |
| 250 | #ifdef CONFIG_HAS_IOPORT_MAP |
| 251 | /* |
| 252 | * Generic iomap devres |
| 253 | */ |
| 254 | static void devm_ioport_map_release(struct device *dev, void *res) |
| 255 | { |
| 256 | ioport_unmap(*(void __iomem **)res); |
| 257 | } |
| 258 | |
| 259 | static int devm_ioport_map_match(struct device *dev, void *res, |
| 260 | void *match_data) |
| 261 | { |
| 262 | return *(void **)res == match_data; |
| 263 | } |
| 264 | |
| 265 | /** |
| 266 | * devm_ioport_map - Managed ioport_map() |
| 267 | * @dev: Generic device to map ioport for |
| 268 | * @port: Port to map |
| 269 | * @nr: Number of ports to map |
| 270 | * |
| 271 | * Managed ioport_map(). Map is automatically unmapped on driver |
| 272 | * detach. |
| 273 | * |
| 274 | * Return: a pointer to the remapped memory or NULL on failure. |
| 275 | */ |
| 276 | void __iomem *devm_ioport_map(struct device *dev, unsigned long port, |
| 277 | unsigned int nr) |
| 278 | { |
| 279 | void __iomem **ptr, *addr; |
| 280 | |
| 281 | ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL, |
| 282 | dev_to_node(dev)); |
| 283 | if (!ptr) |
| 284 | return NULL; |
| 285 | |
| 286 | addr = ioport_map(port, nr); |
| 287 | if (addr) { |
| 288 | *ptr = addr; |
| 289 | devres_add(dev, ptr); |
| 290 | } else |
| 291 | devres_free(ptr); |
| 292 | |
| 293 | return addr; |
| 294 | } |
| 295 | EXPORT_SYMBOL(devm_ioport_map); |
| 296 | |
| 297 | /** |
| 298 | * devm_ioport_unmap - Managed ioport_unmap() |
| 299 | * @dev: Generic device to unmap for |
| 300 | * @addr: Address to unmap |
| 301 | * |
| 302 | * Managed ioport_unmap(). @addr must have been mapped using |
| 303 | * devm_ioport_map(). |
| 304 | */ |
| 305 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) |
| 306 | { |
| 307 | ioport_unmap(addr); |
| 308 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, |
| 309 | devm_ioport_map_match, (__force void *)addr)); |
| 310 | } |
| 311 | EXPORT_SYMBOL(devm_ioport_unmap); |
| 312 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
| 313 | |
| 314 | #ifdef CONFIG_PCI |
| 315 | /* |
| 316 | * PCI iomap devres |
| 317 | */ |
| 318 | #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS |
| 319 | |
| 320 | struct pcim_iomap_devres { |
| 321 | void __iomem *table[PCIM_IOMAP_MAX]; |
| 322 | }; |
| 323 | |
| 324 | static void pcim_iomap_release(struct device *gendev, void *res) |
| 325 | { |
| 326 | struct pci_dev *dev = to_pci_dev(gendev); |
| 327 | struct pcim_iomap_devres *this = res; |
| 328 | int i; |
| 329 | |
| 330 | for (i = 0; i < PCIM_IOMAP_MAX; i++) |
| 331 | if (this->table[i]) |
| 332 | pci_iounmap(dev, this->table[i]); |
| 333 | } |
| 334 | |
| 335 | /** |
| 336 | * pcim_iomap_table - access iomap allocation table |
| 337 | * @pdev: PCI device to access iomap table for |
| 338 | * |
| 339 | * Access iomap allocation table for @dev. If iomap table doesn't |
| 340 | * exist and @pdev is managed, it will be allocated. All iomaps |
| 341 | * recorded in the iomap table are automatically unmapped on driver |
| 342 | * detach. |
| 343 | * |
| 344 | * This function might sleep when the table is first allocated but can |
| 345 | * be safely called without context and guaranteed to succeed once |
| 346 | * allocated. |
| 347 | */ |
| 348 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) |
| 349 | { |
| 350 | struct pcim_iomap_devres *dr, *new_dr; |
| 351 | |
| 352 | dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); |
| 353 | if (dr) |
| 354 | return dr->table; |
| 355 | |
| 356 | new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, |
| 357 | dev_to_node(&pdev->dev)); |
| 358 | if (!new_dr) |
| 359 | return NULL; |
| 360 | dr = devres_get(&pdev->dev, new_dr, NULL, NULL); |
| 361 | return dr->table; |
| 362 | } |
| 363 | EXPORT_SYMBOL(pcim_iomap_table); |
| 364 | |
| 365 | /** |
| 366 | * pcim_iomap - Managed pcim_iomap() |
| 367 | * @pdev: PCI device to iomap for |
| 368 | * @bar: BAR to iomap |
| 369 | * @maxlen: Maximum length of iomap |
| 370 | * |
| 371 | * Managed pci_iomap(). Map is automatically unmapped on driver |
| 372 | * detach. |
| 373 | */ |
| 374 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) |
| 375 | { |
| 376 | void __iomem **tbl; |
| 377 | |
| 378 | BUG_ON(bar >= PCIM_IOMAP_MAX); |
| 379 | |
| 380 | tbl = (void __iomem **)pcim_iomap_table(pdev); |
| 381 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ |
| 382 | return NULL; |
| 383 | |
| 384 | tbl[bar] = pci_iomap(pdev, bar, maxlen); |
| 385 | return tbl[bar]; |
| 386 | } |
| 387 | EXPORT_SYMBOL(pcim_iomap); |
| 388 | |
| 389 | /** |
| 390 | * pcim_iounmap - Managed pci_iounmap() |
| 391 | * @pdev: PCI device to iounmap for |
| 392 | * @addr: Address to unmap |
| 393 | * |
| 394 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). |
| 395 | */ |
| 396 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) |
| 397 | { |
| 398 | void __iomem **tbl; |
| 399 | int i; |
| 400 | |
| 401 | pci_iounmap(pdev, addr); |
| 402 | |
| 403 | tbl = (void __iomem **)pcim_iomap_table(pdev); |
| 404 | BUG_ON(!tbl); |
| 405 | |
| 406 | for (i = 0; i < PCIM_IOMAP_MAX; i++) |
| 407 | if (tbl[i] == addr) { |
| 408 | tbl[i] = NULL; |
| 409 | return; |
| 410 | } |
| 411 | WARN_ON(1); |
| 412 | } |
| 413 | EXPORT_SYMBOL(pcim_iounmap); |
| 414 | |
| 415 | /** |
| 416 | * pcim_iomap_regions - Request and iomap PCI BARs |
| 417 | * @pdev: PCI device to map IO resources for |
| 418 | * @mask: Mask of BARs to request and iomap |
| 419 | * @name: Name used when requesting regions |
| 420 | * |
| 421 | * Request and iomap regions specified by @mask. |
| 422 | */ |
| 423 | int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
| 424 | { |
| 425 | void __iomem * const *iomap; |
| 426 | int i, rc; |
| 427 | |
| 428 | iomap = pcim_iomap_table(pdev); |
| 429 | if (!iomap) |
| 430 | return -ENOMEM; |
| 431 | |
| 432 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
| 433 | unsigned long len; |
| 434 | |
| 435 | if (!(mask & (1 << i))) |
| 436 | continue; |
| 437 | |
| 438 | rc = -EINVAL; |
| 439 | len = pci_resource_len(pdev, i); |
| 440 | if (!len) |
| 441 | goto err_inval; |
| 442 | |
| 443 | rc = pci_request_region(pdev, i, name); |
| 444 | if (rc) |
| 445 | goto err_inval; |
| 446 | |
| 447 | rc = -ENOMEM; |
| 448 | if (!pcim_iomap(pdev, i, 0)) |
| 449 | goto err_region; |
| 450 | } |
| 451 | |
| 452 | return 0; |
| 453 | |
| 454 | err_region: |
| 455 | pci_release_region(pdev, i); |
| 456 | err_inval: |
| 457 | while (--i >= 0) { |
| 458 | if (!(mask & (1 << i))) |
| 459 | continue; |
| 460 | pcim_iounmap(pdev, iomap[i]); |
| 461 | pci_release_region(pdev, i); |
| 462 | } |
| 463 | |
| 464 | return rc; |
| 465 | } |
| 466 | EXPORT_SYMBOL(pcim_iomap_regions); |
| 467 | |
| 468 | /** |
| 469 | * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones |
| 470 | * @pdev: PCI device to map IO resources for |
| 471 | * @mask: Mask of BARs to iomap |
| 472 | * @name: Name used when requesting regions |
| 473 | * |
| 474 | * Request all PCI BARs and iomap regions specified by @mask. |
| 475 | */ |
| 476 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
| 477 | const char *name) |
| 478 | { |
| 479 | int request_mask = ((1 << 6) - 1) & ~mask; |
| 480 | int rc; |
| 481 | |
| 482 | rc = pci_request_selected_regions(pdev, request_mask, name); |
| 483 | if (rc) |
| 484 | return rc; |
| 485 | |
| 486 | rc = pcim_iomap_regions(pdev, mask, name); |
| 487 | if (rc) |
| 488 | pci_release_selected_regions(pdev, request_mask); |
| 489 | return rc; |
| 490 | } |
| 491 | EXPORT_SYMBOL(pcim_iomap_regions_request_all); |
| 492 | |
| 493 | /** |
| 494 | * pcim_iounmap_regions - Unmap and release PCI BARs |
| 495 | * @pdev: PCI device to map IO resources for |
| 496 | * @mask: Mask of BARs to unmap and release |
| 497 | * |
| 498 | * Unmap and release regions specified by @mask. |
| 499 | */ |
| 500 | void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
| 501 | { |
| 502 | void __iomem * const *iomap; |
| 503 | int i; |
| 504 | |
| 505 | iomap = pcim_iomap_table(pdev); |
| 506 | if (!iomap) |
| 507 | return; |
| 508 | |
| 509 | for (i = 0; i < PCIM_IOMAP_MAX; i++) { |
| 510 | if (!(mask & (1 << i))) |
| 511 | continue; |
| 512 | |
| 513 | pcim_iounmap(pdev, iomap[i]); |
| 514 | pci_release_region(pdev, i); |
| 515 | } |
| 516 | } |
| 517 | EXPORT_SYMBOL(pcim_iounmap_regions); |
| 518 | #endif /* CONFIG_PCI */ |
| 519 | |
| 520 | static void devm_arch_phys_ac_add_release(struct device *dev, void *res) |
| 521 | { |
| 522 | arch_phys_wc_del(*((int *)res)); |
| 523 | } |
| 524 | |
| 525 | /** |
| 526 | * devm_arch_phys_wc_add - Managed arch_phys_wc_add() |
| 527 | * @dev: Managed device |
| 528 | * @base: Memory base address |
| 529 | * @size: Size of memory range |
| 530 | * |
| 531 | * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback. |
| 532 | * See arch_phys_wc_add() for more information. |
| 533 | */ |
| 534 | int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size) |
| 535 | { |
| 536 | int *mtrr; |
| 537 | int ret; |
| 538 | |
| 539 | mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL, |
| 540 | dev_to_node(dev)); |
| 541 | if (!mtrr) |
| 542 | return -ENOMEM; |
| 543 | |
| 544 | ret = arch_phys_wc_add(base, size); |
| 545 | if (ret < 0) { |
| 546 | devres_free(mtrr); |
| 547 | return ret; |
| 548 | } |
| 549 | |
| 550 | *mtrr = ret; |
| 551 | devres_add(dev, mtrr); |
| 552 | |
| 553 | return ret; |
| 554 | } |
| 555 | EXPORT_SYMBOL(devm_arch_phys_wc_add); |
| 556 | |
| 557 | struct arch_io_reserve_memtype_wc_devres { |
| 558 | resource_size_t start; |
| 559 | resource_size_t size; |
| 560 | }; |
| 561 | |
| 562 | static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res) |
| 563 | { |
| 564 | const struct arch_io_reserve_memtype_wc_devres *this = res; |
| 565 | |
| 566 | arch_io_free_memtype_wc(this->start, this->size); |
| 567 | } |
| 568 | |
| 569 | /** |
| 570 | * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc() |
| 571 | * @dev: Managed device |
| 572 | * @start: Memory base address |
| 573 | * @size: Size of memory range |
| 574 | * |
| 575 | * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc() |
| 576 | * and sets up a release callback See arch_io_reserve_memtype_wc() for more |
| 577 | * information. |
| 578 | */ |
| 579 | int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, |
| 580 | resource_size_t size) |
| 581 | { |
| 582 | struct arch_io_reserve_memtype_wc_devres *dr; |
| 583 | int ret; |
| 584 | |
| 585 | dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL, |
| 586 | dev_to_node(dev)); |
| 587 | if (!dr) |
| 588 | return -ENOMEM; |
| 589 | |
| 590 | ret = arch_io_reserve_memtype_wc(start, size); |
| 591 | if (ret < 0) { |
| 592 | devres_free(dr); |
| 593 | return ret; |
| 594 | } |
| 595 | |
| 596 | dr->start = start; |
| 597 | dr->size = size; |
| 598 | devres_add(dev, dr); |
| 599 | |
| 600 | return ret; |
| 601 | } |
| 602 | EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc); |