| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * linux/kernel/resource.c |
| 4 | * |
| 5 | * Copyright (C) 1999 Linus Torvalds |
| 6 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> |
| 7 | * |
| 8 | * Arbitrary resource management. |
| 9 | */ |
| 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
| 13 | #include <linux/export.h> |
| 14 | #include <linux/errno.h> |
| 15 | #include <linux/ioport.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/fs.h> |
| 20 | #include <linux/proc_fs.h> |
| 21 | #include <linux/pseudo_fs.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/seq_file.h> |
| 24 | #include <linux/device.h> |
| 25 | #include <linux/pfn.h> |
| 26 | #include <linux/mm.h> |
| 27 | #include <linux/mount.h> |
| 28 | #include <linux/resource_ext.h> |
| 29 | #include <uapi/linux/magic.h> |
| 30 | #include <linux/string.h> |
| 31 | #include <linux/vmalloc.h> |
| 32 | #include <asm/io.h> |
| 33 | |
| 34 | |
| 35 | struct resource ioport_resource = { |
| 36 | .name = "PCI IO", |
| 37 | .start = 0, |
| 38 | .end = IO_SPACE_LIMIT, |
| 39 | .flags = IORESOURCE_IO, |
| 40 | }; |
| 41 | EXPORT_SYMBOL(ioport_resource); |
| 42 | |
| 43 | struct resource iomem_resource = { |
| 44 | .name = "PCI mem", |
| 45 | .start = 0, |
| 46 | .end = -1, |
| 47 | .flags = IORESOURCE_MEM, |
| 48 | }; |
| 49 | EXPORT_SYMBOL(iomem_resource); |
| 50 | |
| 51 | static DEFINE_RWLOCK(resource_lock); |
| 52 | |
| 53 | /* |
| 54 | * Return the next node of @p in pre-order tree traversal. If |
| 55 | * @skip_children is true, skip the descendant nodes of @p in |
| 56 | * traversal. If @p is a descendant of @subtree_root, only traverse |
| 57 | * the subtree under @subtree_root. |
| 58 | */ |
| 59 | static struct resource *next_resource(struct resource *p, bool skip_children, |
| 60 | struct resource *subtree_root) |
| 61 | { |
| 62 | if (!skip_children && p->child) |
| 63 | return p->child; |
| 64 | while (!p->sibling && p->parent) { |
| 65 | p = p->parent; |
| 66 | if (p == subtree_root) |
| 67 | return NULL; |
| 68 | } |
| 69 | return p->sibling; |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * Traverse the resource subtree under @_root in pre-order, excluding |
| 74 | * @_root itself. |
| 75 | * |
| 76 | * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop. |
| 77 | * And it is referenced to avoid unused variable warning. |
| 78 | */ |
| 79 | #define for_each_resource(_root, _p, _skip_children) \ |
| 80 | for (typeof(_root) __root = (_root), __p = _p = __root->child; \ |
| 81 | __p && _p; _p = next_resource(_p, _skip_children, __root)) |
| 82 | |
| 83 | #ifdef CONFIG_PROC_FS |
| 84 | |
| 85 | enum { MAX_IORES_LEVEL = 5 }; |
| 86 | |
| 87 | static void *r_start(struct seq_file *m, loff_t *pos) |
| 88 | __acquires(resource_lock) |
| 89 | { |
| 90 | struct resource *root = pde_data(file_inode(m->file)); |
| 91 | struct resource *p; |
| 92 | loff_t l = *pos; |
| 93 | |
| 94 | read_lock(&resource_lock); |
| 95 | for_each_resource(root, p, false) { |
| 96 | if (l-- == 0) |
| 97 | break; |
| 98 | } |
| 99 | |
| 100 | return p; |
| 101 | } |
| 102 | |
| 103 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
| 104 | { |
| 105 | struct resource *p = v; |
| 106 | |
| 107 | (*pos)++; |
| 108 | |
| 109 | return (void *)next_resource(p, false, NULL); |
| 110 | } |
| 111 | |
| 112 | static void r_stop(struct seq_file *m, void *v) |
| 113 | __releases(resource_lock) |
| 114 | { |
| 115 | read_unlock(&resource_lock); |
| 116 | } |
| 117 | |
| 118 | static int r_show(struct seq_file *m, void *v) |
| 119 | { |
| 120 | struct resource *root = pde_data(file_inode(m->file)); |
| 121 | struct resource *r = v, *p; |
| 122 | unsigned long long start, end; |
| 123 | int width = root->end < 0x10000 ? 4 : 8; |
| 124 | int depth; |
| 125 | |
| 126 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) |
| 127 | if (p->parent == root) |
| 128 | break; |
| 129 | |
| 130 | if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { |
| 131 | start = r->start; |
| 132 | end = r->end; |
| 133 | } else { |
| 134 | start = end = 0; |
| 135 | } |
| 136 | |
| 137 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
| 138 | depth * 2, "", |
| 139 | width, start, |
| 140 | width, end, |
| 141 | r->name ? r->name : "<BAD>"); |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | static const struct seq_operations resource_op = { |
| 146 | .start = r_start, |
| 147 | .next = r_next, |
| 148 | .stop = r_stop, |
| 149 | .show = r_show, |
| 150 | }; |
| 151 | |
| 152 | static int __init ioresources_init(void) |
| 153 | { |
| 154 | proc_create_seq_data("ioports", 0, NULL, &resource_op, |
| 155 | &ioport_resource); |
| 156 | proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); |
| 157 | return 0; |
| 158 | } |
| 159 | __initcall(ioresources_init); |
| 160 | |
| 161 | #endif /* CONFIG_PROC_FS */ |
| 162 | |
| 163 | static void free_resource(struct resource *res) |
| 164 | { |
| 165 | /** |
| 166 | * If the resource was allocated using memblock early during boot |
| 167 | * we'll leak it here: we can only return full pages back to the |
| 168 | * buddy and trying to be smart and reusing them eventually in |
| 169 | * alloc_resource() overcomplicates resource handling. |
| 170 | */ |
| 171 | if (res && PageSlab(virt_to_head_page(res))) |
| 172 | kfree(res); |
| 173 | } |
| 174 | |
| 175 | static struct resource *alloc_resource(gfp_t flags) |
| 176 | { |
| 177 | return kzalloc(sizeof(struct resource), flags); |
| 178 | } |
| 179 | |
| 180 | /* Return the conflict entry if you can't request it */ |
| 181 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
| 182 | { |
| 183 | resource_size_t start = new->start; |
| 184 | resource_size_t end = new->end; |
| 185 | struct resource *tmp, **p; |
| 186 | |
| 187 | if (end < start) |
| 188 | return root; |
| 189 | if (start < root->start) |
| 190 | return root; |
| 191 | if (end > root->end) |
| 192 | return root; |
| 193 | p = &root->child; |
| 194 | for (;;) { |
| 195 | tmp = *p; |
| 196 | if (!tmp || tmp->start > end) { |
| 197 | new->sibling = tmp; |
| 198 | *p = new; |
| 199 | new->parent = root; |
| 200 | return NULL; |
| 201 | } |
| 202 | p = &tmp->sibling; |
| 203 | if (tmp->end < start) |
| 204 | continue; |
| 205 | return tmp; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | static int __release_resource(struct resource *old, bool release_child) |
| 210 | { |
| 211 | struct resource *tmp, **p, *chd; |
| 212 | |
| 213 | p = &old->parent->child; |
| 214 | for (;;) { |
| 215 | tmp = *p; |
| 216 | if (!tmp) |
| 217 | break; |
| 218 | if (tmp == old) { |
| 219 | if (release_child || !(tmp->child)) { |
| 220 | *p = tmp->sibling; |
| 221 | } else { |
| 222 | for (chd = tmp->child;; chd = chd->sibling) { |
| 223 | chd->parent = tmp->parent; |
| 224 | if (!(chd->sibling)) |
| 225 | break; |
| 226 | } |
| 227 | *p = tmp->child; |
| 228 | chd->sibling = tmp->sibling; |
| 229 | } |
| 230 | old->parent = NULL; |
| 231 | return 0; |
| 232 | } |
| 233 | p = &tmp->sibling; |
| 234 | } |
| 235 | return -EINVAL; |
| 236 | } |
| 237 | |
| 238 | static void __release_child_resources(struct resource *r) |
| 239 | { |
| 240 | struct resource *tmp, *p; |
| 241 | resource_size_t size; |
| 242 | |
| 243 | p = r->child; |
| 244 | r->child = NULL; |
| 245 | while (p) { |
| 246 | tmp = p; |
| 247 | p = p->sibling; |
| 248 | |
| 249 | tmp->parent = NULL; |
| 250 | tmp->sibling = NULL; |
| 251 | __release_child_resources(tmp); |
| 252 | |
| 253 | printk(KERN_DEBUG "release child resource %pR\n", tmp); |
| 254 | /* need to restore size, and keep flags */ |
| 255 | size = resource_size(tmp); |
| 256 | tmp->start = 0; |
| 257 | tmp->end = size - 1; |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | void release_child_resources(struct resource *r) |
| 262 | { |
| 263 | write_lock(&resource_lock); |
| 264 | __release_child_resources(r); |
| 265 | write_unlock(&resource_lock); |
| 266 | } |
| 267 | |
| 268 | /** |
| 269 | * request_resource_conflict - request and reserve an I/O or memory resource |
| 270 | * @root: root resource descriptor |
| 271 | * @new: resource descriptor desired by caller |
| 272 | * |
| 273 | * Returns 0 for success, conflict resource on error. |
| 274 | */ |
| 275 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
| 276 | { |
| 277 | struct resource *conflict; |
| 278 | |
| 279 | write_lock(&resource_lock); |
| 280 | conflict = __request_resource(root, new); |
| 281 | write_unlock(&resource_lock); |
| 282 | return conflict; |
| 283 | } |
| 284 | |
| 285 | /** |
| 286 | * request_resource - request and reserve an I/O or memory resource |
| 287 | * @root: root resource descriptor |
| 288 | * @new: resource descriptor desired by caller |
| 289 | * |
| 290 | * Returns 0 for success, negative error code on error. |
| 291 | */ |
| 292 | int request_resource(struct resource *root, struct resource *new) |
| 293 | { |
| 294 | struct resource *conflict; |
| 295 | |
| 296 | conflict = request_resource_conflict(root, new); |
| 297 | return conflict ? -EBUSY : 0; |
| 298 | } |
| 299 | |
| 300 | EXPORT_SYMBOL(request_resource); |
| 301 | |
| 302 | /** |
| 303 | * release_resource - release a previously reserved resource |
| 304 | * @old: resource pointer |
| 305 | */ |
| 306 | int release_resource(struct resource *old) |
| 307 | { |
| 308 | int retval; |
| 309 | |
| 310 | write_lock(&resource_lock); |
| 311 | retval = __release_resource(old, true); |
| 312 | write_unlock(&resource_lock); |
| 313 | return retval; |
| 314 | } |
| 315 | |
| 316 | EXPORT_SYMBOL(release_resource); |
| 317 | |
| 318 | static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc) |
| 319 | { |
| 320 | return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc); |
| 321 | } |
| 322 | |
| 323 | /** |
| 324 | * find_next_iomem_res - Finds the lowest iomem resource that covers part of |
| 325 | * [@start..@end]. |
| 326 | * |
| 327 | * If a resource is found, returns 0 and @*res is overwritten with the part |
| 328 | * of the resource that's within [@start..@end]; if none is found, returns |
| 329 | * -ENODEV. Returns -EINVAL for invalid parameters. |
| 330 | * |
| 331 | * @start: start address of the resource searched for |
| 332 | * @end: end address of same resource |
| 333 | * @flags: flags which the resource must have |
| 334 | * @desc: descriptor the resource must have |
| 335 | * @res: return ptr, if resource found |
| 336 | * |
| 337 | * The caller must specify @start, @end, @flags, and @desc |
| 338 | * (which may be IORES_DESC_NONE). |
| 339 | */ |
| 340 | static int find_next_iomem_res(resource_size_t start, resource_size_t end, |
| 341 | unsigned long flags, unsigned long desc, |
| 342 | struct resource *res) |
| 343 | { |
| 344 | struct resource *p; |
| 345 | |
| 346 | if (!res) |
| 347 | return -EINVAL; |
| 348 | |
| 349 | if (start >= end) |
| 350 | return -EINVAL; |
| 351 | |
| 352 | read_lock(&resource_lock); |
| 353 | |
| 354 | for_each_resource(&iomem_resource, p, false) { |
| 355 | /* If we passed the resource we are looking for, stop */ |
| 356 | if (p->start > end) { |
| 357 | p = NULL; |
| 358 | break; |
| 359 | } |
| 360 | |
| 361 | /* Skip until we find a range that matches what we look for */ |
| 362 | if (p->end < start) |
| 363 | continue; |
| 364 | |
| 365 | /* Found a match, break */ |
| 366 | if (is_type_match(p, flags, desc)) |
| 367 | break; |
| 368 | } |
| 369 | |
| 370 | if (p) { |
| 371 | /* copy data */ |
| 372 | *res = (struct resource) { |
| 373 | .start = max(start, p->start), |
| 374 | .end = min(end, p->end), |
| 375 | .flags = p->flags, |
| 376 | .desc = p->desc, |
| 377 | .parent = p->parent, |
| 378 | }; |
| 379 | } |
| 380 | |
| 381 | read_unlock(&resource_lock); |
| 382 | return p ? 0 : -ENODEV; |
| 383 | } |
| 384 | |
| 385 | static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, |
| 386 | unsigned long flags, unsigned long desc, |
| 387 | void *arg, |
| 388 | int (*func)(struct resource *, void *)) |
| 389 | { |
| 390 | struct resource res; |
| 391 | int ret = -EINVAL; |
| 392 | |
| 393 | while (start < end && |
| 394 | !find_next_iomem_res(start, end, flags, desc, &res)) { |
| 395 | ret = (*func)(&res, arg); |
| 396 | if (ret) |
| 397 | break; |
| 398 | |
| 399 | start = res.end + 1; |
| 400 | } |
| 401 | |
| 402 | return ret; |
| 403 | } |
| 404 | |
| 405 | /** |
| 406 | * walk_iomem_res_desc - Walks through iomem resources and calls func() |
| 407 | * with matching resource ranges. |
| 408 | * * |
| 409 | * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. |
| 410 | * @flags: I/O resource flags |
| 411 | * @start: start addr |
| 412 | * @end: end addr |
| 413 | * @arg: function argument for the callback @func |
| 414 | * @func: callback function that is called for each qualifying resource area |
| 415 | * |
| 416 | * All the memory ranges which overlap start,end and also match flags and |
| 417 | * desc are valid candidates. |
| 418 | * |
| 419 | * NOTE: For a new descriptor search, define a new IORES_DESC in |
| 420 | * <linux/ioport.h> and set it in 'desc' of a target resource entry. |
| 421 | */ |
| 422 | int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, |
| 423 | u64 end, void *arg, int (*func)(struct resource *, void *)) |
| 424 | { |
| 425 | return __walk_iomem_res_desc(start, end, flags, desc, arg, func); |
| 426 | } |
| 427 | EXPORT_SYMBOL_GPL(walk_iomem_res_desc); |
| 428 | |
| 429 | /* |
| 430 | * This function calls the @func callback against all memory ranges of type |
| 431 | * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. |
| 432 | * Now, this function is only for System RAM, it deals with full ranges and |
| 433 | * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate |
| 434 | * ranges. |
| 435 | */ |
| 436 | int walk_system_ram_res(u64 start, u64 end, void *arg, |
| 437 | int (*func)(struct resource *, void *)) |
| 438 | { |
| 439 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
| 440 | |
| 441 | return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, |
| 442 | func); |
| 443 | } |
| 444 | |
| 445 | /* |
| 446 | * This function, being a variant of walk_system_ram_res(), calls the @func |
| 447 | * callback against all memory ranges of type System RAM which are marked as |
| 448 | * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from |
| 449 | * higher to lower. |
| 450 | */ |
| 451 | int walk_system_ram_res_rev(u64 start, u64 end, void *arg, |
| 452 | int (*func)(struct resource *, void *)) |
| 453 | { |
| 454 | struct resource res, *rams; |
| 455 | int rams_size = 16, i; |
| 456 | unsigned long flags; |
| 457 | int ret = -1; |
| 458 | |
| 459 | /* create a list */ |
| 460 | rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL); |
| 461 | if (!rams) |
| 462 | return ret; |
| 463 | |
| 464 | flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
| 465 | i = 0; |
| 466 | while ((start < end) && |
| 467 | (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) { |
| 468 | if (i >= rams_size) { |
| 469 | /* re-alloc */ |
| 470 | struct resource *rams_new; |
| 471 | |
| 472 | rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource), |
| 473 | GFP_KERNEL); |
| 474 | if (!rams_new) |
| 475 | goto out; |
| 476 | |
| 477 | rams = rams_new; |
| 478 | rams_size += 16; |
| 479 | } |
| 480 | |
| 481 | rams[i++] = res; |
| 482 | start = res.end + 1; |
| 483 | } |
| 484 | |
| 485 | /* go reverse */ |
| 486 | for (i--; i >= 0; i--) { |
| 487 | ret = (*func)(&rams[i], arg); |
| 488 | if (ret) |
| 489 | break; |
| 490 | } |
| 491 | |
| 492 | out: |
| 493 | kvfree(rams); |
| 494 | return ret; |
| 495 | } |
| 496 | |
| 497 | /* |
| 498 | * This function calls the @func callback against all memory ranges, which |
| 499 | * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. |
| 500 | */ |
| 501 | int walk_mem_res(u64 start, u64 end, void *arg, |
| 502 | int (*func)(struct resource *, void *)) |
| 503 | { |
| 504 | unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
| 505 | |
| 506 | return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, |
| 507 | func); |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * This function calls the @func callback against all memory ranges of type |
| 512 | * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. |
| 513 | * It is to be used only for System RAM. |
| 514 | */ |
| 515 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
| 516 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
| 517 | { |
| 518 | resource_size_t start, end; |
| 519 | unsigned long flags; |
| 520 | struct resource res; |
| 521 | unsigned long pfn, end_pfn; |
| 522 | int ret = -EINVAL; |
| 523 | |
| 524 | start = (u64) start_pfn << PAGE_SHIFT; |
| 525 | end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
| 526 | flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
| 527 | while (start < end && |
| 528 | !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { |
| 529 | pfn = PFN_UP(res.start); |
| 530 | end_pfn = PFN_DOWN(res.end + 1); |
| 531 | if (end_pfn > pfn) |
| 532 | ret = (*func)(pfn, end_pfn - pfn, arg); |
| 533 | if (ret) |
| 534 | break; |
| 535 | start = res.end + 1; |
| 536 | } |
| 537 | return ret; |
| 538 | } |
| 539 | |
| 540 | static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) |
| 541 | { |
| 542 | return 1; |
| 543 | } |
| 544 | |
| 545 | /* |
| 546 | * This generic page_is_ram() returns true if specified address is |
| 547 | * registered as System RAM in iomem_resource list. |
| 548 | */ |
| 549 | int __weak page_is_ram(unsigned long pfn) |
| 550 | { |
| 551 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; |
| 552 | } |
| 553 | EXPORT_SYMBOL_GPL(page_is_ram); |
| 554 | |
| 555 | static int __region_intersects(struct resource *parent, resource_size_t start, |
| 556 | size_t size, unsigned long flags, |
| 557 | unsigned long desc) |
| 558 | { |
| 559 | int type = 0; int other = 0; |
| 560 | struct resource *p, *dp; |
| 561 | struct resource res, o; |
| 562 | bool covered; |
| 563 | |
| 564 | res = DEFINE_RES(start, size, 0); |
| 565 | |
| 566 | for (p = parent->child; p ; p = p->sibling) { |
| 567 | if (!resource_intersection(p, &res, &o)) |
| 568 | continue; |
| 569 | if (is_type_match(p, flags, desc)) { |
| 570 | type++; |
| 571 | continue; |
| 572 | } |
| 573 | /* |
| 574 | * Continue to search in descendant resources as if the |
| 575 | * matched descendant resources cover some ranges of 'p'. |
| 576 | * |
| 577 | * |------------- "CXL Window 0" ------------| |
| 578 | * |-- "System RAM" --| |
| 579 | * |
| 580 | * will behave similar as the following fake resource |
| 581 | * tree when searching "System RAM". |
| 582 | * |
| 583 | * |-- "System RAM" --||-- "CXL Window 0a" --| |
| 584 | */ |
| 585 | covered = false; |
| 586 | for_each_resource(p, dp, false) { |
| 587 | if (!resource_overlaps(dp, &res)) |
| 588 | continue; |
| 589 | if (is_type_match(dp, flags, desc)) { |
| 590 | type++; |
| 591 | /* |
| 592 | * Range from 'o.start' to 'dp->start' |
| 593 | * isn't covered by matched resource. |
| 594 | */ |
| 595 | if (dp->start > o.start) |
| 596 | break; |
| 597 | if (dp->end >= o.end) { |
| 598 | covered = true; |
| 599 | break; |
| 600 | } |
| 601 | /* Remove covered range */ |
| 602 | o.start = max(o.start, dp->end + 1); |
| 603 | } |
| 604 | } |
| 605 | if (!covered) |
| 606 | other++; |
| 607 | } |
| 608 | |
| 609 | if (type == 0) |
| 610 | return REGION_DISJOINT; |
| 611 | |
| 612 | if (other == 0) |
| 613 | return REGION_INTERSECTS; |
| 614 | |
| 615 | return REGION_MIXED; |
| 616 | } |
| 617 | |
| 618 | /** |
| 619 | * region_intersects() - determine intersection of region with known resources |
| 620 | * @start: region start address |
| 621 | * @size: size of region |
| 622 | * @flags: flags of resource (in iomem_resource) |
| 623 | * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE |
| 624 | * |
| 625 | * Check if the specified region partially overlaps or fully eclipses a |
| 626 | * resource identified by @flags and @desc (optional with IORES_DESC_NONE). |
| 627 | * Return REGION_DISJOINT if the region does not overlap @flags/@desc, |
| 628 | * return REGION_MIXED if the region overlaps @flags/@desc and another |
| 629 | * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc |
| 630 | * and no other defined resource. Note that REGION_INTERSECTS is also |
| 631 | * returned in the case when the specified region overlaps RAM and undefined |
| 632 | * memory holes. |
| 633 | * |
| 634 | * region_intersect() is used by memory remapping functions to ensure |
| 635 | * the user is not remapping RAM and is a vast speed up over walking |
| 636 | * through the resource table page by page. |
| 637 | */ |
| 638 | int region_intersects(resource_size_t start, size_t size, unsigned long flags, |
| 639 | unsigned long desc) |
| 640 | { |
| 641 | int ret; |
| 642 | |
| 643 | read_lock(&resource_lock); |
| 644 | ret = __region_intersects(&iomem_resource, start, size, flags, desc); |
| 645 | read_unlock(&resource_lock); |
| 646 | |
| 647 | return ret; |
| 648 | } |
| 649 | EXPORT_SYMBOL_GPL(region_intersects); |
| 650 | |
| 651 | void __weak arch_remove_reservations(struct resource *avail) |
| 652 | { |
| 653 | } |
| 654 | |
| 655 | static void resource_clip(struct resource *res, resource_size_t min, |
| 656 | resource_size_t max) |
| 657 | { |
| 658 | if (res->start < min) |
| 659 | res->start = min; |
| 660 | if (res->end > max) |
| 661 | res->end = max; |
| 662 | } |
| 663 | |
| 664 | /* |
| 665 | * Find empty space in the resource tree with the given range and |
| 666 | * alignment constraints |
| 667 | */ |
| 668 | static int __find_resource_space(struct resource *root, struct resource *old, |
| 669 | struct resource *new, resource_size_t size, |
| 670 | struct resource_constraint *constraint) |
| 671 | { |
| 672 | struct resource *this = root->child; |
| 673 | struct resource tmp = *new, avail, alloc; |
| 674 | resource_alignf alignf = constraint->alignf; |
| 675 | |
| 676 | tmp.start = root->start; |
| 677 | /* |
| 678 | * Skip past an allocated resource that starts at 0, since the assignment |
| 679 | * of this->start - 1 to tmp->end below would cause an underflow. |
| 680 | */ |
| 681 | if (this && this->start == root->start) { |
| 682 | tmp.start = (this == old) ? old->start : this->end + 1; |
| 683 | this = this->sibling; |
| 684 | } |
| 685 | for(;;) { |
| 686 | if (this) |
| 687 | tmp.end = (this == old) ? this->end : this->start - 1; |
| 688 | else |
| 689 | tmp.end = root->end; |
| 690 | |
| 691 | if (tmp.end < tmp.start) |
| 692 | goto next; |
| 693 | |
| 694 | resource_clip(&tmp, constraint->min, constraint->max); |
| 695 | arch_remove_reservations(&tmp); |
| 696 | |
| 697 | /* Check for overflow after ALIGN() */ |
| 698 | avail.start = ALIGN(tmp.start, constraint->align); |
| 699 | avail.end = tmp.end; |
| 700 | avail.flags = new->flags & ~IORESOURCE_UNSET; |
| 701 | if (avail.start >= tmp.start) { |
| 702 | alloc.flags = avail.flags; |
| 703 | if (alignf) { |
| 704 | alloc.start = alignf(constraint->alignf_data, |
| 705 | &avail, size, constraint->align); |
| 706 | } else { |
| 707 | alloc.start = avail.start; |
| 708 | } |
| 709 | alloc.end = alloc.start + size - 1; |
| 710 | if (alloc.start <= alloc.end && |
| 711 | resource_contains(&avail, &alloc)) { |
| 712 | new->start = alloc.start; |
| 713 | new->end = alloc.end; |
| 714 | return 0; |
| 715 | } |
| 716 | } |
| 717 | |
| 718 | next: if (!this || this->end == root->end) |
| 719 | break; |
| 720 | |
| 721 | if (this != old) |
| 722 | tmp.start = this->end + 1; |
| 723 | this = this->sibling; |
| 724 | } |
| 725 | return -EBUSY; |
| 726 | } |
| 727 | |
| 728 | /** |
| 729 | * find_resource_space - Find empty space in the resource tree |
| 730 | * @root: Root resource descriptor |
| 731 | * @new: Resource descriptor awaiting an empty resource space |
| 732 | * @size: The minimum size of the empty space |
| 733 | * @constraint: The range and alignment constraints to be met |
| 734 | * |
| 735 | * Finds an empty space under @root in the resource tree satisfying range and |
| 736 | * alignment @constraints. |
| 737 | * |
| 738 | * Return: |
| 739 | * * %0 - if successful, @new members start, end, and flags are altered. |
| 740 | * * %-EBUSY - if no empty space was found. |
| 741 | */ |
| 742 | int find_resource_space(struct resource *root, struct resource *new, |
| 743 | resource_size_t size, |
| 744 | struct resource_constraint *constraint) |
| 745 | { |
| 746 | return __find_resource_space(root, NULL, new, size, constraint); |
| 747 | } |
| 748 | EXPORT_SYMBOL_GPL(find_resource_space); |
| 749 | |
| 750 | /** |
| 751 | * reallocate_resource - allocate a slot in the resource tree given range & alignment. |
| 752 | * The resource will be relocated if the new size cannot be reallocated in the |
| 753 | * current location. |
| 754 | * |
| 755 | * @root: root resource descriptor |
| 756 | * @old: resource descriptor desired by caller |
| 757 | * @newsize: new size of the resource descriptor |
| 758 | * @constraint: the memory range and alignment constraints to be met. |
| 759 | */ |
| 760 | static int reallocate_resource(struct resource *root, struct resource *old, |
| 761 | resource_size_t newsize, |
| 762 | struct resource_constraint *constraint) |
| 763 | { |
| 764 | int err=0; |
| 765 | struct resource new = *old; |
| 766 | struct resource *conflict; |
| 767 | |
| 768 | write_lock(&resource_lock); |
| 769 | |
| 770 | if ((err = __find_resource_space(root, old, &new, newsize, constraint))) |
| 771 | goto out; |
| 772 | |
| 773 | if (resource_contains(&new, old)) { |
| 774 | old->start = new.start; |
| 775 | old->end = new.end; |
| 776 | goto out; |
| 777 | } |
| 778 | |
| 779 | if (old->child) { |
| 780 | err = -EBUSY; |
| 781 | goto out; |
| 782 | } |
| 783 | |
| 784 | if (resource_contains(old, &new)) { |
| 785 | old->start = new.start; |
| 786 | old->end = new.end; |
| 787 | } else { |
| 788 | __release_resource(old, true); |
| 789 | *old = new; |
| 790 | conflict = __request_resource(root, old); |
| 791 | BUG_ON(conflict); |
| 792 | } |
| 793 | out: |
| 794 | write_unlock(&resource_lock); |
| 795 | return err; |
| 796 | } |
| 797 | |
| 798 | |
| 799 | /** |
| 800 | * allocate_resource - allocate empty slot in the resource tree given range & alignment. |
| 801 | * The resource will be reallocated with a new size if it was already allocated |
| 802 | * @root: root resource descriptor |
| 803 | * @new: resource descriptor desired by caller |
| 804 | * @size: requested resource region size |
| 805 | * @min: minimum boundary to allocate |
| 806 | * @max: maximum boundary to allocate |
| 807 | * @align: alignment requested, in bytes |
| 808 | * @alignf: alignment function, optional, called if not NULL |
| 809 | * @alignf_data: arbitrary data to pass to the @alignf function |
| 810 | */ |
| 811 | int allocate_resource(struct resource *root, struct resource *new, |
| 812 | resource_size_t size, resource_size_t min, |
| 813 | resource_size_t max, resource_size_t align, |
| 814 | resource_alignf alignf, |
| 815 | void *alignf_data) |
| 816 | { |
| 817 | int err; |
| 818 | struct resource_constraint constraint; |
| 819 | |
| 820 | constraint.min = min; |
| 821 | constraint.max = max; |
| 822 | constraint.align = align; |
| 823 | constraint.alignf = alignf; |
| 824 | constraint.alignf_data = alignf_data; |
| 825 | |
| 826 | if ( new->parent ) { |
| 827 | /* resource is already allocated, try reallocating with |
| 828 | the new constraints */ |
| 829 | return reallocate_resource(root, new, size, &constraint); |
| 830 | } |
| 831 | |
| 832 | write_lock(&resource_lock); |
| 833 | err = find_resource_space(root, new, size, &constraint); |
| 834 | if (err >= 0 && __request_resource(root, new)) |
| 835 | err = -EBUSY; |
| 836 | write_unlock(&resource_lock); |
| 837 | return err; |
| 838 | } |
| 839 | |
| 840 | EXPORT_SYMBOL(allocate_resource); |
| 841 | |
| 842 | /** |
| 843 | * lookup_resource - find an existing resource by a resource start address |
| 844 | * @root: root resource descriptor |
| 845 | * @start: resource start address |
| 846 | * |
| 847 | * Returns a pointer to the resource if found, NULL otherwise |
| 848 | */ |
| 849 | struct resource *lookup_resource(struct resource *root, resource_size_t start) |
| 850 | { |
| 851 | struct resource *res; |
| 852 | |
| 853 | read_lock(&resource_lock); |
| 854 | for (res = root->child; res; res = res->sibling) { |
| 855 | if (res->start == start) |
| 856 | break; |
| 857 | } |
| 858 | read_unlock(&resource_lock); |
| 859 | |
| 860 | return res; |
| 861 | } |
| 862 | |
| 863 | /* |
| 864 | * Insert a resource into the resource tree. If successful, return NULL, |
| 865 | * otherwise return the conflicting resource (compare to __request_resource()) |
| 866 | */ |
| 867 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
| 868 | { |
| 869 | struct resource *first, *next; |
| 870 | |
| 871 | for (;; parent = first) { |
| 872 | first = __request_resource(parent, new); |
| 873 | if (!first) |
| 874 | return first; |
| 875 | |
| 876 | if (first == parent) |
| 877 | return first; |
| 878 | if (WARN_ON(first == new)) /* duplicated insertion */ |
| 879 | return first; |
| 880 | |
| 881 | if ((first->start > new->start) || (first->end < new->end)) |
| 882 | break; |
| 883 | if ((first->start == new->start) && (first->end == new->end)) |
| 884 | break; |
| 885 | } |
| 886 | |
| 887 | for (next = first; ; next = next->sibling) { |
| 888 | /* Partial overlap? Bad, and unfixable */ |
| 889 | if (next->start < new->start || next->end > new->end) |
| 890 | return next; |
| 891 | if (!next->sibling) |
| 892 | break; |
| 893 | if (next->sibling->start > new->end) |
| 894 | break; |
| 895 | } |
| 896 | |
| 897 | new->parent = parent; |
| 898 | new->sibling = next->sibling; |
| 899 | new->child = first; |
| 900 | |
| 901 | next->sibling = NULL; |
| 902 | for (next = first; next; next = next->sibling) |
| 903 | next->parent = new; |
| 904 | |
| 905 | if (parent->child == first) { |
| 906 | parent->child = new; |
| 907 | } else { |
| 908 | next = parent->child; |
| 909 | while (next->sibling != first) |
| 910 | next = next->sibling; |
| 911 | next->sibling = new; |
| 912 | } |
| 913 | return NULL; |
| 914 | } |
| 915 | |
| 916 | /** |
| 917 | * insert_resource_conflict - Inserts resource in the resource tree |
| 918 | * @parent: parent of the new resource |
| 919 | * @new: new resource to insert |
| 920 | * |
| 921 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
| 922 | * |
| 923 | * This function is equivalent to request_resource_conflict when no conflict |
| 924 | * happens. If a conflict happens, and the conflicting resources |
| 925 | * entirely fit within the range of the new resource, then the new |
| 926 | * resource is inserted and the conflicting resources become children of |
| 927 | * the new resource. |
| 928 | * |
| 929 | * This function is intended for producers of resources, such as FW modules |
| 930 | * and bus drivers. |
| 931 | */ |
| 932 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
| 933 | { |
| 934 | struct resource *conflict; |
| 935 | |
| 936 | write_lock(&resource_lock); |
| 937 | conflict = __insert_resource(parent, new); |
| 938 | write_unlock(&resource_lock); |
| 939 | return conflict; |
| 940 | } |
| 941 | |
| 942 | /** |
| 943 | * insert_resource - Inserts a resource in the resource tree |
| 944 | * @parent: parent of the new resource |
| 945 | * @new: new resource to insert |
| 946 | * |
| 947 | * Returns 0 on success, -EBUSY if the resource can't be inserted. |
| 948 | * |
| 949 | * This function is intended for producers of resources, such as FW modules |
| 950 | * and bus drivers. |
| 951 | */ |
| 952 | int insert_resource(struct resource *parent, struct resource *new) |
| 953 | { |
| 954 | struct resource *conflict; |
| 955 | |
| 956 | conflict = insert_resource_conflict(parent, new); |
| 957 | return conflict ? -EBUSY : 0; |
| 958 | } |
| 959 | EXPORT_SYMBOL_GPL(insert_resource); |
| 960 | |
| 961 | /** |
| 962 | * insert_resource_expand_to_fit - Insert a resource into the resource tree |
| 963 | * @root: root resource descriptor |
| 964 | * @new: new resource to insert |
| 965 | * |
| 966 | * Insert a resource into the resource tree, possibly expanding it in order |
| 967 | * to make it encompass any conflicting resources. |
| 968 | */ |
| 969 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) |
| 970 | { |
| 971 | if (new->parent) |
| 972 | return; |
| 973 | |
| 974 | write_lock(&resource_lock); |
| 975 | for (;;) { |
| 976 | struct resource *conflict; |
| 977 | |
| 978 | conflict = __insert_resource(root, new); |
| 979 | if (!conflict) |
| 980 | break; |
| 981 | if (conflict == root) |
| 982 | break; |
| 983 | |
| 984 | /* Ok, expand resource to cover the conflict, then try again .. */ |
| 985 | if (conflict->start < new->start) |
| 986 | new->start = conflict->start; |
| 987 | if (conflict->end > new->end) |
| 988 | new->end = conflict->end; |
| 989 | |
| 990 | pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); |
| 991 | } |
| 992 | write_unlock(&resource_lock); |
| 993 | } |
| 994 | /* |
| 995 | * Not for general consumption, only early boot memory map parsing, PCI |
| 996 | * resource discovery, and late discovery of CXL resources are expected |
| 997 | * to use this interface. The former are built-in and only the latter, |
| 998 | * CXL, is a module. |
| 999 | */ |
| 1000 | EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL"); |
| 1001 | |
| 1002 | /** |
| 1003 | * remove_resource - Remove a resource in the resource tree |
| 1004 | * @old: resource to remove |
| 1005 | * |
| 1006 | * Returns 0 on success, -EINVAL if the resource is not valid. |
| 1007 | * |
| 1008 | * This function removes a resource previously inserted by insert_resource() |
| 1009 | * or insert_resource_conflict(), and moves the children (if any) up to |
| 1010 | * where they were before. insert_resource() and insert_resource_conflict() |
| 1011 | * insert a new resource, and move any conflicting resources down to the |
| 1012 | * children of the new resource. |
| 1013 | * |
| 1014 | * insert_resource(), insert_resource_conflict() and remove_resource() are |
| 1015 | * intended for producers of resources, such as FW modules and bus drivers. |
| 1016 | */ |
| 1017 | int remove_resource(struct resource *old) |
| 1018 | { |
| 1019 | int retval; |
| 1020 | |
| 1021 | write_lock(&resource_lock); |
| 1022 | retval = __release_resource(old, false); |
| 1023 | write_unlock(&resource_lock); |
| 1024 | return retval; |
| 1025 | } |
| 1026 | EXPORT_SYMBOL_GPL(remove_resource); |
| 1027 | |
| 1028 | static int __adjust_resource(struct resource *res, resource_size_t start, |
| 1029 | resource_size_t size) |
| 1030 | { |
| 1031 | struct resource *tmp, *parent = res->parent; |
| 1032 | resource_size_t end = start + size - 1; |
| 1033 | int result = -EBUSY; |
| 1034 | |
| 1035 | if (!parent) |
| 1036 | goto skip; |
| 1037 | |
| 1038 | if ((start < parent->start) || (end > parent->end)) |
| 1039 | goto out; |
| 1040 | |
| 1041 | if (res->sibling && (res->sibling->start <= end)) |
| 1042 | goto out; |
| 1043 | |
| 1044 | tmp = parent->child; |
| 1045 | if (tmp != res) { |
| 1046 | while (tmp->sibling != res) |
| 1047 | tmp = tmp->sibling; |
| 1048 | if (start <= tmp->end) |
| 1049 | goto out; |
| 1050 | } |
| 1051 | |
| 1052 | skip: |
| 1053 | for (tmp = res->child; tmp; tmp = tmp->sibling) |
| 1054 | if ((tmp->start < start) || (tmp->end > end)) |
| 1055 | goto out; |
| 1056 | |
| 1057 | res->start = start; |
| 1058 | res->end = end; |
| 1059 | result = 0; |
| 1060 | |
| 1061 | out: |
| 1062 | return result; |
| 1063 | } |
| 1064 | |
| 1065 | /** |
| 1066 | * adjust_resource - modify a resource's start and size |
| 1067 | * @res: resource to modify |
| 1068 | * @start: new start value |
| 1069 | * @size: new size |
| 1070 | * |
| 1071 | * Given an existing resource, change its start and size to match the |
| 1072 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
| 1073 | * Existing children of the resource are assumed to be immutable. |
| 1074 | */ |
| 1075 | int adjust_resource(struct resource *res, resource_size_t start, |
| 1076 | resource_size_t size) |
| 1077 | { |
| 1078 | int result; |
| 1079 | |
| 1080 | write_lock(&resource_lock); |
| 1081 | result = __adjust_resource(res, start, size); |
| 1082 | write_unlock(&resource_lock); |
| 1083 | return result; |
| 1084 | } |
| 1085 | EXPORT_SYMBOL(adjust_resource); |
| 1086 | |
| 1087 | static void __init |
| 1088 | __reserve_region_with_split(struct resource *root, resource_size_t start, |
| 1089 | resource_size_t end, const char *name) |
| 1090 | { |
| 1091 | struct resource *parent = root; |
| 1092 | struct resource *conflict; |
| 1093 | struct resource *res = alloc_resource(GFP_ATOMIC); |
| 1094 | struct resource *next_res = NULL; |
| 1095 | int type = resource_type(root); |
| 1096 | |
| 1097 | if (!res) |
| 1098 | return; |
| 1099 | |
| 1100 | res->name = name; |
| 1101 | res->start = start; |
| 1102 | res->end = end; |
| 1103 | res->flags = type | IORESOURCE_BUSY; |
| 1104 | res->desc = IORES_DESC_NONE; |
| 1105 | |
| 1106 | while (1) { |
| 1107 | |
| 1108 | conflict = __request_resource(parent, res); |
| 1109 | if (!conflict) { |
| 1110 | if (!next_res) |
| 1111 | break; |
| 1112 | res = next_res; |
| 1113 | next_res = NULL; |
| 1114 | continue; |
| 1115 | } |
| 1116 | |
| 1117 | /* conflict covered whole area */ |
| 1118 | if (conflict->start <= res->start && |
| 1119 | conflict->end >= res->end) { |
| 1120 | free_resource(res); |
| 1121 | WARN_ON(next_res); |
| 1122 | break; |
| 1123 | } |
| 1124 | |
| 1125 | /* failed, split and try again */ |
| 1126 | if (conflict->start > res->start) { |
| 1127 | end = res->end; |
| 1128 | res->end = conflict->start - 1; |
| 1129 | if (conflict->end < end) { |
| 1130 | next_res = alloc_resource(GFP_ATOMIC); |
| 1131 | if (!next_res) { |
| 1132 | free_resource(res); |
| 1133 | break; |
| 1134 | } |
| 1135 | next_res->name = name; |
| 1136 | next_res->start = conflict->end + 1; |
| 1137 | next_res->end = end; |
| 1138 | next_res->flags = type | IORESOURCE_BUSY; |
| 1139 | next_res->desc = IORES_DESC_NONE; |
| 1140 | } |
| 1141 | } else { |
| 1142 | res->start = conflict->end + 1; |
| 1143 | } |
| 1144 | } |
| 1145 | |
| 1146 | } |
| 1147 | |
| 1148 | void __init |
| 1149 | reserve_region_with_split(struct resource *root, resource_size_t start, |
| 1150 | resource_size_t end, const char *name) |
| 1151 | { |
| 1152 | int abort = 0; |
| 1153 | |
| 1154 | write_lock(&resource_lock); |
| 1155 | if (root->start > start || root->end < end) { |
| 1156 | pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", |
| 1157 | (unsigned long long)start, (unsigned long long)end, |
| 1158 | root); |
| 1159 | if (start > root->end || end < root->start) |
| 1160 | abort = 1; |
| 1161 | else { |
| 1162 | if (end > root->end) |
| 1163 | end = root->end; |
| 1164 | if (start < root->start) |
| 1165 | start = root->start; |
| 1166 | pr_err("fixing request to [0x%llx-0x%llx]\n", |
| 1167 | (unsigned long long)start, |
| 1168 | (unsigned long long)end); |
| 1169 | } |
| 1170 | dump_stack(); |
| 1171 | } |
| 1172 | if (!abort) |
| 1173 | __reserve_region_with_split(root, start, end, name); |
| 1174 | write_unlock(&resource_lock); |
| 1175 | } |
| 1176 | |
| 1177 | /** |
| 1178 | * resource_alignment - calculate resource's alignment |
| 1179 | * @res: resource pointer |
| 1180 | * |
| 1181 | * Returns alignment on success, 0 (invalid alignment) on failure. |
| 1182 | */ |
| 1183 | resource_size_t resource_alignment(struct resource *res) |
| 1184 | { |
| 1185 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
| 1186 | case IORESOURCE_SIZEALIGN: |
| 1187 | return resource_size(res); |
| 1188 | case IORESOURCE_STARTALIGN: |
| 1189 | return res->start; |
| 1190 | default: |
| 1191 | return 0; |
| 1192 | } |
| 1193 | } |
| 1194 | |
| 1195 | /* |
| 1196 | * This is compatibility stuff for IO resources. |
| 1197 | * |
| 1198 | * Note how this, unlike the above, knows about |
| 1199 | * the IO flag meanings (busy etc). |
| 1200 | * |
| 1201 | * request_region creates a new busy region. |
| 1202 | * |
| 1203 | * release_region releases a matching busy region. |
| 1204 | */ |
| 1205 | |
| 1206 | static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); |
| 1207 | |
| 1208 | static struct inode *iomem_inode; |
| 1209 | |
| 1210 | #ifdef CONFIG_IO_STRICT_DEVMEM |
| 1211 | static void revoke_iomem(struct resource *res) |
| 1212 | { |
| 1213 | /* pairs with smp_store_release() in iomem_init_inode() */ |
| 1214 | struct inode *inode = smp_load_acquire(&iomem_inode); |
| 1215 | |
| 1216 | /* |
| 1217 | * Check that the initialization has completed. Losing the race |
| 1218 | * is ok because it means drivers are claiming resources before |
| 1219 | * the fs_initcall level of init and prevent iomem_get_mapping users |
| 1220 | * from establishing mappings. |
| 1221 | */ |
| 1222 | if (!inode) |
| 1223 | return; |
| 1224 | |
| 1225 | /* |
| 1226 | * The expectation is that the driver has successfully marked |
| 1227 | * the resource busy by this point, so devmem_is_allowed() |
| 1228 | * should start returning false, however for performance this |
| 1229 | * does not iterate the entire resource range. |
| 1230 | */ |
| 1231 | if (devmem_is_allowed(PHYS_PFN(res->start)) && |
| 1232 | devmem_is_allowed(PHYS_PFN(res->end))) { |
| 1233 | /* |
| 1234 | * *cringe* iomem=relaxed says "go ahead, what's the |
| 1235 | * worst that can happen?" |
| 1236 | */ |
| 1237 | return; |
| 1238 | } |
| 1239 | |
| 1240 | unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); |
| 1241 | } |
| 1242 | #else |
| 1243 | static void revoke_iomem(struct resource *res) {} |
| 1244 | #endif |
| 1245 | |
| 1246 | struct address_space *iomem_get_mapping(void) |
| 1247 | { |
| 1248 | /* |
| 1249 | * This function is only called from file open paths, hence guaranteed |
| 1250 | * that fs_initcalls have completed and no need to check for NULL. But |
| 1251 | * since revoke_iomem can be called before the initcall we still need |
| 1252 | * the barrier to appease checkers. |
| 1253 | */ |
| 1254 | return smp_load_acquire(&iomem_inode)->i_mapping; |
| 1255 | } |
| 1256 | |
| 1257 | static int __request_region_locked(struct resource *res, struct resource *parent, |
| 1258 | resource_size_t start, resource_size_t n, |
| 1259 | const char *name, int flags) |
| 1260 | { |
| 1261 | DECLARE_WAITQUEUE(wait, current); |
| 1262 | |
| 1263 | res->name = name; |
| 1264 | res->start = start; |
| 1265 | res->end = start + n - 1; |
| 1266 | |
| 1267 | for (;;) { |
| 1268 | struct resource *conflict; |
| 1269 | |
| 1270 | res->flags = resource_type(parent) | resource_ext_type(parent); |
| 1271 | res->flags |= IORESOURCE_BUSY | flags; |
| 1272 | res->desc = parent->desc; |
| 1273 | |
| 1274 | conflict = __request_resource(parent, res); |
| 1275 | if (!conflict) |
| 1276 | break; |
| 1277 | /* |
| 1278 | * mm/hmm.c reserves physical addresses which then |
| 1279 | * become unavailable to other users. Conflicts are |
| 1280 | * not expected. Warn to aid debugging if encountered. |
| 1281 | */ |
| 1282 | if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { |
| 1283 | pr_warn("Unaddressable device %s %pR conflicts with %pR", |
| 1284 | conflict->name, conflict, res); |
| 1285 | } |
| 1286 | if (conflict != parent) { |
| 1287 | if (!(conflict->flags & IORESOURCE_BUSY)) { |
| 1288 | parent = conflict; |
| 1289 | continue; |
| 1290 | } |
| 1291 | } |
| 1292 | if (conflict->flags & flags & IORESOURCE_MUXED) { |
| 1293 | add_wait_queue(&muxed_resource_wait, &wait); |
| 1294 | write_unlock(&resource_lock); |
| 1295 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1296 | schedule(); |
| 1297 | remove_wait_queue(&muxed_resource_wait, &wait); |
| 1298 | write_lock(&resource_lock); |
| 1299 | continue; |
| 1300 | } |
| 1301 | /* Uhhuh, that didn't work out.. */ |
| 1302 | return -EBUSY; |
| 1303 | } |
| 1304 | |
| 1305 | return 0; |
| 1306 | } |
| 1307 | |
| 1308 | /** |
| 1309 | * __request_region - create a new busy resource region |
| 1310 | * @parent: parent resource descriptor |
| 1311 | * @start: resource start address |
| 1312 | * @n: resource region size |
| 1313 | * @name: reserving caller's ID string |
| 1314 | * @flags: IO resource flags |
| 1315 | */ |
| 1316 | struct resource *__request_region(struct resource *parent, |
| 1317 | resource_size_t start, resource_size_t n, |
| 1318 | const char *name, int flags) |
| 1319 | { |
| 1320 | struct resource *res = alloc_resource(GFP_KERNEL); |
| 1321 | int ret; |
| 1322 | |
| 1323 | if (!res) |
| 1324 | return NULL; |
| 1325 | |
| 1326 | write_lock(&resource_lock); |
| 1327 | ret = __request_region_locked(res, parent, start, n, name, flags); |
| 1328 | write_unlock(&resource_lock); |
| 1329 | |
| 1330 | if (ret) { |
| 1331 | free_resource(res); |
| 1332 | return NULL; |
| 1333 | } |
| 1334 | |
| 1335 | if (parent == &iomem_resource) |
| 1336 | revoke_iomem(res); |
| 1337 | |
| 1338 | return res; |
| 1339 | } |
| 1340 | EXPORT_SYMBOL(__request_region); |
| 1341 | |
| 1342 | /** |
| 1343 | * __release_region - release a previously reserved resource region |
| 1344 | * @parent: parent resource descriptor |
| 1345 | * @start: resource start address |
| 1346 | * @n: resource region size |
| 1347 | * |
| 1348 | * The described resource region must match a currently busy region. |
| 1349 | */ |
| 1350 | void __release_region(struct resource *parent, resource_size_t start, |
| 1351 | resource_size_t n) |
| 1352 | { |
| 1353 | struct resource **p; |
| 1354 | resource_size_t end; |
| 1355 | |
| 1356 | p = &parent->child; |
| 1357 | end = start + n - 1; |
| 1358 | |
| 1359 | write_lock(&resource_lock); |
| 1360 | |
| 1361 | for (;;) { |
| 1362 | struct resource *res = *p; |
| 1363 | |
| 1364 | if (!res) |
| 1365 | break; |
| 1366 | if (res->start <= start && res->end >= end) { |
| 1367 | if (!(res->flags & IORESOURCE_BUSY)) { |
| 1368 | p = &res->child; |
| 1369 | continue; |
| 1370 | } |
| 1371 | if (res->start != start || res->end != end) |
| 1372 | break; |
| 1373 | *p = res->sibling; |
| 1374 | write_unlock(&resource_lock); |
| 1375 | if (res->flags & IORESOURCE_MUXED) |
| 1376 | wake_up(&muxed_resource_wait); |
| 1377 | free_resource(res); |
| 1378 | return; |
| 1379 | } |
| 1380 | p = &res->sibling; |
| 1381 | } |
| 1382 | |
| 1383 | write_unlock(&resource_lock); |
| 1384 | |
| 1385 | pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end); |
| 1386 | } |
| 1387 | EXPORT_SYMBOL(__release_region); |
| 1388 | |
| 1389 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 1390 | /** |
| 1391 | * release_mem_region_adjustable - release a previously reserved memory region |
| 1392 | * @start: resource start address |
| 1393 | * @size: resource region size |
| 1394 | * |
| 1395 | * This interface is intended for memory hot-delete. The requested region |
| 1396 | * is released from a currently busy memory resource. The requested region |
| 1397 | * must either match exactly or fit into a single busy resource entry. In |
| 1398 | * the latter case, the remaining resource is adjusted accordingly. |
| 1399 | * Existing children of the busy memory resource must be immutable in the |
| 1400 | * request. |
| 1401 | * |
| 1402 | * Note: |
| 1403 | * - Additional release conditions, such as overlapping region, can be |
| 1404 | * supported after they are confirmed as valid cases. |
| 1405 | * - When a busy memory resource gets split into two entries, the code |
| 1406 | * assumes that all children remain in the lower address entry for |
| 1407 | * simplicity. Enhance this logic when necessary. |
| 1408 | */ |
| 1409 | void release_mem_region_adjustable(resource_size_t start, resource_size_t size) |
| 1410 | { |
| 1411 | struct resource *parent = &iomem_resource; |
| 1412 | struct resource *new_res = NULL; |
| 1413 | bool alloc_nofail = false; |
| 1414 | struct resource **p; |
| 1415 | struct resource *res; |
| 1416 | resource_size_t end; |
| 1417 | |
| 1418 | end = start + size - 1; |
| 1419 | if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) |
| 1420 | return; |
| 1421 | |
| 1422 | /* |
| 1423 | * We free up quite a lot of memory on memory hotunplug (esp., memap), |
| 1424 | * just before releasing the region. This is highly unlikely to |
| 1425 | * fail - let's play save and make it never fail as the caller cannot |
| 1426 | * perform any error handling (e.g., trying to re-add memory will fail |
| 1427 | * similarly). |
| 1428 | */ |
| 1429 | retry: |
| 1430 | new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); |
| 1431 | |
| 1432 | p = &parent->child; |
| 1433 | write_lock(&resource_lock); |
| 1434 | |
| 1435 | while ((res = *p)) { |
| 1436 | if (res->start >= end) |
| 1437 | break; |
| 1438 | |
| 1439 | /* look for the next resource if it does not fit into */ |
| 1440 | if (res->start > start || res->end < end) { |
| 1441 | p = &res->sibling; |
| 1442 | continue; |
| 1443 | } |
| 1444 | |
| 1445 | if (!(res->flags & IORESOURCE_MEM)) |
| 1446 | break; |
| 1447 | |
| 1448 | if (!(res->flags & IORESOURCE_BUSY)) { |
| 1449 | p = &res->child; |
| 1450 | continue; |
| 1451 | } |
| 1452 | |
| 1453 | /* found the target resource; let's adjust accordingly */ |
| 1454 | if (res->start == start && res->end == end) { |
| 1455 | /* free the whole entry */ |
| 1456 | *p = res->sibling; |
| 1457 | free_resource(res); |
| 1458 | } else if (res->start == start && res->end != end) { |
| 1459 | /* adjust the start */ |
| 1460 | WARN_ON_ONCE(__adjust_resource(res, end + 1, |
| 1461 | res->end - end)); |
| 1462 | } else if (res->start != start && res->end == end) { |
| 1463 | /* adjust the end */ |
| 1464 | WARN_ON_ONCE(__adjust_resource(res, res->start, |
| 1465 | start - res->start)); |
| 1466 | } else { |
| 1467 | /* split into two entries - we need a new resource */ |
| 1468 | if (!new_res) { |
| 1469 | new_res = alloc_resource(GFP_ATOMIC); |
| 1470 | if (!new_res) { |
| 1471 | alloc_nofail = true; |
| 1472 | write_unlock(&resource_lock); |
| 1473 | goto retry; |
| 1474 | } |
| 1475 | } |
| 1476 | new_res->name = res->name; |
| 1477 | new_res->start = end + 1; |
| 1478 | new_res->end = res->end; |
| 1479 | new_res->flags = res->flags; |
| 1480 | new_res->desc = res->desc; |
| 1481 | new_res->parent = res->parent; |
| 1482 | new_res->sibling = res->sibling; |
| 1483 | new_res->child = NULL; |
| 1484 | |
| 1485 | if (WARN_ON_ONCE(__adjust_resource(res, res->start, |
| 1486 | start - res->start))) |
| 1487 | break; |
| 1488 | res->sibling = new_res; |
| 1489 | new_res = NULL; |
| 1490 | } |
| 1491 | |
| 1492 | break; |
| 1493 | } |
| 1494 | |
| 1495 | write_unlock(&resource_lock); |
| 1496 | free_resource(new_res); |
| 1497 | } |
| 1498 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 1499 | |
| 1500 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1501 | static bool system_ram_resources_mergeable(struct resource *r1, |
| 1502 | struct resource *r2) |
| 1503 | { |
| 1504 | /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ |
| 1505 | return r1->flags == r2->flags && r1->end + 1 == r2->start && |
| 1506 | r1->name == r2->name && r1->desc == r2->desc && |
| 1507 | !r1->child && !r2->child; |
| 1508 | } |
| 1509 | |
| 1510 | /** |
| 1511 | * merge_system_ram_resource - mark the System RAM resource mergeable and try to |
| 1512 | * merge it with adjacent, mergeable resources |
| 1513 | * @res: resource descriptor |
| 1514 | * |
| 1515 | * This interface is intended for memory hotplug, whereby lots of contiguous |
| 1516 | * system ram resources are added (e.g., via add_memory*()) by a driver, and |
| 1517 | * the actual resource boundaries are not of interest (e.g., it might be |
| 1518 | * relevant for DIMMs). Only resources that are marked mergeable, that have the |
| 1519 | * same parent, and that don't have any children are considered. All mergeable |
| 1520 | * resources must be immutable during the request. |
| 1521 | * |
| 1522 | * Note: |
| 1523 | * - The caller has to make sure that no pointers to resources that are |
| 1524 | * marked mergeable are used anymore after this call - the resource might |
| 1525 | * be freed and the pointer might be stale! |
| 1526 | * - release_mem_region_adjustable() will split on demand on memory hotunplug |
| 1527 | */ |
| 1528 | void merge_system_ram_resource(struct resource *res) |
| 1529 | { |
| 1530 | const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
| 1531 | struct resource *cur; |
| 1532 | |
| 1533 | if (WARN_ON_ONCE((res->flags & flags) != flags)) |
| 1534 | return; |
| 1535 | |
| 1536 | write_lock(&resource_lock); |
| 1537 | res->flags |= IORESOURCE_SYSRAM_MERGEABLE; |
| 1538 | |
| 1539 | /* Try to merge with next item in the list. */ |
| 1540 | cur = res->sibling; |
| 1541 | if (cur && system_ram_resources_mergeable(res, cur)) { |
| 1542 | res->end = cur->end; |
| 1543 | res->sibling = cur->sibling; |
| 1544 | free_resource(cur); |
| 1545 | } |
| 1546 | |
| 1547 | /* Try to merge with previous item in the list. */ |
| 1548 | cur = res->parent->child; |
| 1549 | while (cur && cur->sibling != res) |
| 1550 | cur = cur->sibling; |
| 1551 | if (cur && system_ram_resources_mergeable(cur, res)) { |
| 1552 | cur->end = res->end; |
| 1553 | cur->sibling = res->sibling; |
| 1554 | free_resource(res); |
| 1555 | } |
| 1556 | write_unlock(&resource_lock); |
| 1557 | } |
| 1558 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 1559 | |
| 1560 | /* |
| 1561 | * Managed region resource |
| 1562 | */ |
| 1563 | static void devm_resource_release(struct device *dev, void *ptr) |
| 1564 | { |
| 1565 | struct resource **r = ptr; |
| 1566 | |
| 1567 | release_resource(*r); |
| 1568 | } |
| 1569 | |
| 1570 | /** |
| 1571 | * devm_request_resource() - request and reserve an I/O or memory resource |
| 1572 | * @dev: device for which to request the resource |
| 1573 | * @root: root of the resource tree from which to request the resource |
| 1574 | * @new: descriptor of the resource to request |
| 1575 | * |
| 1576 | * This is a device-managed version of request_resource(). There is usually |
| 1577 | * no need to release resources requested by this function explicitly since |
| 1578 | * that will be taken care of when the device is unbound from its driver. |
| 1579 | * If for some reason the resource needs to be released explicitly, because |
| 1580 | * of ordering issues for example, drivers must call devm_release_resource() |
| 1581 | * rather than the regular release_resource(). |
| 1582 | * |
| 1583 | * When a conflict is detected between any existing resources and the newly |
| 1584 | * requested resource, an error message will be printed. |
| 1585 | * |
| 1586 | * Returns 0 on success or a negative error code on failure. |
| 1587 | */ |
| 1588 | int devm_request_resource(struct device *dev, struct resource *root, |
| 1589 | struct resource *new) |
| 1590 | { |
| 1591 | struct resource *conflict, **ptr; |
| 1592 | |
| 1593 | ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); |
| 1594 | if (!ptr) |
| 1595 | return -ENOMEM; |
| 1596 | |
| 1597 | *ptr = new; |
| 1598 | |
| 1599 | conflict = request_resource_conflict(root, new); |
| 1600 | if (conflict) { |
| 1601 | dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", |
| 1602 | new, conflict->name, conflict); |
| 1603 | devres_free(ptr); |
| 1604 | return -EBUSY; |
| 1605 | } |
| 1606 | |
| 1607 | devres_add(dev, ptr); |
| 1608 | return 0; |
| 1609 | } |
| 1610 | EXPORT_SYMBOL(devm_request_resource); |
| 1611 | |
| 1612 | static int devm_resource_match(struct device *dev, void *res, void *data) |
| 1613 | { |
| 1614 | struct resource **ptr = res; |
| 1615 | |
| 1616 | return *ptr == data; |
| 1617 | } |
| 1618 | |
| 1619 | /** |
| 1620 | * devm_release_resource() - release a previously requested resource |
| 1621 | * @dev: device for which to release the resource |
| 1622 | * @new: descriptor of the resource to release |
| 1623 | * |
| 1624 | * Releases a resource previously requested using devm_request_resource(). |
| 1625 | */ |
| 1626 | void devm_release_resource(struct device *dev, struct resource *new) |
| 1627 | { |
| 1628 | WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, |
| 1629 | new)); |
| 1630 | } |
| 1631 | EXPORT_SYMBOL(devm_release_resource); |
| 1632 | |
| 1633 | struct region_devres { |
| 1634 | struct resource *parent; |
| 1635 | resource_size_t start; |
| 1636 | resource_size_t n; |
| 1637 | }; |
| 1638 | |
| 1639 | static void devm_region_release(struct device *dev, void *res) |
| 1640 | { |
| 1641 | struct region_devres *this = res; |
| 1642 | |
| 1643 | __release_region(this->parent, this->start, this->n); |
| 1644 | } |
| 1645 | |
| 1646 | static int devm_region_match(struct device *dev, void *res, void *match_data) |
| 1647 | { |
| 1648 | struct region_devres *this = res, *match = match_data; |
| 1649 | |
| 1650 | return this->parent == match->parent && |
| 1651 | this->start == match->start && this->n == match->n; |
| 1652 | } |
| 1653 | |
| 1654 | struct resource * |
| 1655 | __devm_request_region(struct device *dev, struct resource *parent, |
| 1656 | resource_size_t start, resource_size_t n, const char *name) |
| 1657 | { |
| 1658 | struct region_devres *dr = NULL; |
| 1659 | struct resource *res; |
| 1660 | |
| 1661 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), |
| 1662 | GFP_KERNEL); |
| 1663 | if (!dr) |
| 1664 | return NULL; |
| 1665 | |
| 1666 | dr->parent = parent; |
| 1667 | dr->start = start; |
| 1668 | dr->n = n; |
| 1669 | |
| 1670 | res = __request_region(parent, start, n, name, 0); |
| 1671 | if (res) |
| 1672 | devres_add(dev, dr); |
| 1673 | else |
| 1674 | devres_free(dr); |
| 1675 | |
| 1676 | return res; |
| 1677 | } |
| 1678 | EXPORT_SYMBOL(__devm_request_region); |
| 1679 | |
| 1680 | void __devm_release_region(struct device *dev, struct resource *parent, |
| 1681 | resource_size_t start, resource_size_t n) |
| 1682 | { |
| 1683 | struct region_devres match_data = { parent, start, n }; |
| 1684 | |
| 1685 | WARN_ON(devres_release(dev, devm_region_release, devm_region_match, |
| 1686 | &match_data)); |
| 1687 | } |
| 1688 | EXPORT_SYMBOL(__devm_release_region); |
| 1689 | |
| 1690 | /* |
| 1691 | * Reserve I/O ports or memory based on "reserve=" kernel parameter. |
| 1692 | */ |
| 1693 | #define MAXRESERVE 4 |
| 1694 | static int __init reserve_setup(char *str) |
| 1695 | { |
| 1696 | static int reserved; |
| 1697 | static struct resource reserve[MAXRESERVE]; |
| 1698 | |
| 1699 | for (;;) { |
| 1700 | unsigned int io_start, io_num; |
| 1701 | int x = reserved; |
| 1702 | struct resource *parent; |
| 1703 | |
| 1704 | if (get_option(&str, &io_start) != 2) |
| 1705 | break; |
| 1706 | if (get_option(&str, &io_num) == 0) |
| 1707 | break; |
| 1708 | if (x < MAXRESERVE) { |
| 1709 | struct resource *res = reserve + x; |
| 1710 | |
| 1711 | /* |
| 1712 | * If the region starts below 0x10000, we assume it's |
| 1713 | * I/O port space; otherwise assume it's memory. |
| 1714 | */ |
| 1715 | if (io_start < 0x10000) { |
| 1716 | *res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved"); |
| 1717 | parent = &ioport_resource; |
| 1718 | } else { |
| 1719 | *res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved"); |
| 1720 | parent = &iomem_resource; |
| 1721 | } |
| 1722 | res->flags |= IORESOURCE_BUSY; |
| 1723 | if (request_resource(parent, res) == 0) |
| 1724 | reserved = x+1; |
| 1725 | } |
| 1726 | } |
| 1727 | return 1; |
| 1728 | } |
| 1729 | __setup("reserve=", reserve_setup); |
| 1730 | |
| 1731 | /* |
| 1732 | * Check if the requested addr and size spans more than any slot in the |
| 1733 | * iomem resource tree. |
| 1734 | */ |
| 1735 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) |
| 1736 | { |
| 1737 | resource_size_t end = addr + size - 1; |
| 1738 | struct resource *p; |
| 1739 | int err = 0; |
| 1740 | |
| 1741 | read_lock(&resource_lock); |
| 1742 | for_each_resource(&iomem_resource, p, false) { |
| 1743 | /* |
| 1744 | * We can probably skip the resources without |
| 1745 | * IORESOURCE_IO attribute? |
| 1746 | */ |
| 1747 | if (p->start > end) |
| 1748 | continue; |
| 1749 | if (p->end < addr) |
| 1750 | continue; |
| 1751 | if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && |
| 1752 | PFN_DOWN(p->end) >= PFN_DOWN(end)) |
| 1753 | continue; |
| 1754 | /* |
| 1755 | * if a resource is "BUSY", it's not a hardware resource |
| 1756 | * but a driver mapping of such a resource; we don't want |
| 1757 | * to warn for those; some drivers legitimately map only |
| 1758 | * partial hardware resources. (example: vesafb) |
| 1759 | */ |
| 1760 | if (p->flags & IORESOURCE_BUSY) |
| 1761 | continue; |
| 1762 | |
| 1763 | pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n", |
| 1764 | &addr, &end, p->name, p); |
| 1765 | err = -1; |
| 1766 | break; |
| 1767 | } |
| 1768 | read_unlock(&resource_lock); |
| 1769 | |
| 1770 | return err; |
| 1771 | } |
| 1772 | |
| 1773 | #ifdef CONFIG_STRICT_DEVMEM |
| 1774 | static int strict_iomem_checks = 1; |
| 1775 | #else |
| 1776 | static int strict_iomem_checks; |
| 1777 | #endif |
| 1778 | |
| 1779 | /* |
| 1780 | * Check if an address is exclusive to the kernel and must not be mapped to |
| 1781 | * user space, for example, via /dev/mem. |
| 1782 | * |
| 1783 | * Returns true if exclusive to the kernel, otherwise returns false. |
| 1784 | */ |
| 1785 | bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size) |
| 1786 | { |
| 1787 | const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | |
| 1788 | IORESOURCE_EXCLUSIVE; |
| 1789 | bool skip_children = false, err = false; |
| 1790 | struct resource *p; |
| 1791 | |
| 1792 | read_lock(&resource_lock); |
| 1793 | for_each_resource(root, p, skip_children) { |
| 1794 | if (p->start >= addr + size) |
| 1795 | break; |
| 1796 | if (p->end < addr) { |
| 1797 | skip_children = true; |
| 1798 | continue; |
| 1799 | } |
| 1800 | skip_children = false; |
| 1801 | |
| 1802 | /* |
| 1803 | * IORESOURCE_SYSTEM_RAM resources are exclusive if |
| 1804 | * IORESOURCE_EXCLUSIVE is set, even if they |
| 1805 | * are not busy and even if "iomem=relaxed" is set. The |
| 1806 | * responsible driver dynamically adds/removes system RAM within |
| 1807 | * such an area and uncontrolled access is dangerous. |
| 1808 | */ |
| 1809 | if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { |
| 1810 | err = true; |
| 1811 | break; |
| 1812 | } |
| 1813 | |
| 1814 | /* |
| 1815 | * A resource is exclusive if IORESOURCE_EXCLUSIVE is set |
| 1816 | * or CONFIG_IO_STRICT_DEVMEM is enabled and the |
| 1817 | * resource is busy. |
| 1818 | */ |
| 1819 | if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) |
| 1820 | continue; |
| 1821 | if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) |
| 1822 | || p->flags & IORESOURCE_EXCLUSIVE) { |
| 1823 | err = true; |
| 1824 | break; |
| 1825 | } |
| 1826 | } |
| 1827 | read_unlock(&resource_lock); |
| 1828 | |
| 1829 | return err; |
| 1830 | } |
| 1831 | |
| 1832 | bool iomem_is_exclusive(u64 addr) |
| 1833 | { |
| 1834 | return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK, |
| 1835 | PAGE_SIZE); |
| 1836 | } |
| 1837 | |
| 1838 | struct resource_entry *resource_list_create_entry(struct resource *res, |
| 1839 | size_t extra_size) |
| 1840 | { |
| 1841 | struct resource_entry *entry; |
| 1842 | |
| 1843 | entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); |
| 1844 | if (entry) { |
| 1845 | INIT_LIST_HEAD(&entry->node); |
| 1846 | entry->res = res ? res : &entry->__res; |
| 1847 | } |
| 1848 | |
| 1849 | return entry; |
| 1850 | } |
| 1851 | EXPORT_SYMBOL(resource_list_create_entry); |
| 1852 | |
| 1853 | void resource_list_free(struct list_head *head) |
| 1854 | { |
| 1855 | struct resource_entry *entry, *tmp; |
| 1856 | |
| 1857 | list_for_each_entry_safe(entry, tmp, head, node) |
| 1858 | resource_list_destroy_entry(entry); |
| 1859 | } |
| 1860 | EXPORT_SYMBOL(resource_list_free); |
| 1861 | |
| 1862 | #ifdef CONFIG_GET_FREE_REGION |
| 1863 | #define GFR_DESCENDING (1UL << 0) |
| 1864 | #define GFR_REQUEST_REGION (1UL << 1) |
| 1865 | #ifdef PA_SECTION_SHIFT |
| 1866 | #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT) |
| 1867 | #else |
| 1868 | #define GFR_DEFAULT_ALIGN PAGE_SIZE |
| 1869 | #endif |
| 1870 | |
| 1871 | static resource_size_t gfr_start(struct resource *base, resource_size_t size, |
| 1872 | resource_size_t align, unsigned long flags) |
| 1873 | { |
| 1874 | if (flags & GFR_DESCENDING) { |
| 1875 | resource_size_t end; |
| 1876 | |
| 1877 | end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END); |
| 1878 | return end - size + 1; |
| 1879 | } |
| 1880 | |
| 1881 | return ALIGN(max(base->start, align), align); |
| 1882 | } |
| 1883 | |
| 1884 | static bool gfr_continue(struct resource *base, resource_size_t addr, |
| 1885 | resource_size_t size, unsigned long flags) |
| 1886 | { |
| 1887 | if (flags & GFR_DESCENDING) |
| 1888 | return addr > size && addr >= base->start; |
| 1889 | /* |
| 1890 | * In the ascend case be careful that the last increment by |
| 1891 | * @size did not wrap 0. |
| 1892 | */ |
| 1893 | return addr > addr - size && |
| 1894 | addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END); |
| 1895 | } |
| 1896 | |
| 1897 | static resource_size_t gfr_next(resource_size_t addr, resource_size_t size, |
| 1898 | unsigned long flags) |
| 1899 | { |
| 1900 | if (flags & GFR_DESCENDING) |
| 1901 | return addr - size; |
| 1902 | return addr + size; |
| 1903 | } |
| 1904 | |
| 1905 | static void remove_free_mem_region(void *_res) |
| 1906 | { |
| 1907 | struct resource *res = _res; |
| 1908 | |
| 1909 | if (res->parent) |
| 1910 | remove_resource(res); |
| 1911 | free_resource(res); |
| 1912 | } |
| 1913 | |
| 1914 | static struct resource * |
| 1915 | get_free_mem_region(struct device *dev, struct resource *base, |
| 1916 | resource_size_t size, const unsigned long align, |
| 1917 | const char *name, const unsigned long desc, |
| 1918 | const unsigned long flags) |
| 1919 | { |
| 1920 | resource_size_t addr; |
| 1921 | struct resource *res; |
| 1922 | struct region_devres *dr = NULL; |
| 1923 | |
| 1924 | size = ALIGN(size, align); |
| 1925 | |
| 1926 | res = alloc_resource(GFP_KERNEL); |
| 1927 | if (!res) |
| 1928 | return ERR_PTR(-ENOMEM); |
| 1929 | |
| 1930 | if (dev && (flags & GFR_REQUEST_REGION)) { |
| 1931 | dr = devres_alloc(devm_region_release, |
| 1932 | sizeof(struct region_devres), GFP_KERNEL); |
| 1933 | if (!dr) { |
| 1934 | free_resource(res); |
| 1935 | return ERR_PTR(-ENOMEM); |
| 1936 | } |
| 1937 | } else if (dev) { |
| 1938 | if (devm_add_action_or_reset(dev, remove_free_mem_region, res)) |
| 1939 | return ERR_PTR(-ENOMEM); |
| 1940 | } |
| 1941 | |
| 1942 | write_lock(&resource_lock); |
| 1943 | for (addr = gfr_start(base, size, align, flags); |
| 1944 | gfr_continue(base, addr, align, flags); |
| 1945 | addr = gfr_next(addr, align, flags)) { |
| 1946 | if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) != |
| 1947 | REGION_DISJOINT) |
| 1948 | continue; |
| 1949 | |
| 1950 | if (flags & GFR_REQUEST_REGION) { |
| 1951 | if (__request_region_locked(res, &iomem_resource, addr, |
| 1952 | size, name, 0)) |
| 1953 | break; |
| 1954 | |
| 1955 | if (dev) { |
| 1956 | dr->parent = &iomem_resource; |
| 1957 | dr->start = addr; |
| 1958 | dr->n = size; |
| 1959 | devres_add(dev, dr); |
| 1960 | } |
| 1961 | |
| 1962 | res->desc = desc; |
| 1963 | write_unlock(&resource_lock); |
| 1964 | |
| 1965 | |
| 1966 | /* |
| 1967 | * A driver is claiming this region so revoke any |
| 1968 | * mappings. |
| 1969 | */ |
| 1970 | revoke_iomem(res); |
| 1971 | } else { |
| 1972 | *res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc); |
| 1973 | |
| 1974 | /* |
| 1975 | * Only succeed if the resource hosts an exclusive |
| 1976 | * range after the insert |
| 1977 | */ |
| 1978 | if (__insert_resource(base, res) || res->child) |
| 1979 | break; |
| 1980 | |
| 1981 | write_unlock(&resource_lock); |
| 1982 | } |
| 1983 | |
| 1984 | return res; |
| 1985 | } |
| 1986 | write_unlock(&resource_lock); |
| 1987 | |
| 1988 | if (flags & GFR_REQUEST_REGION) { |
| 1989 | free_resource(res); |
| 1990 | devres_free(dr); |
| 1991 | } else if (dev) |
| 1992 | devm_release_action(dev, remove_free_mem_region, res); |
| 1993 | |
| 1994 | return ERR_PTR(-ERANGE); |
| 1995 | } |
| 1996 | |
| 1997 | /** |
| 1998 | * devm_request_free_mem_region - find free region for device private memory |
| 1999 | * |
| 2000 | * @dev: device struct to bind the resource to |
| 2001 | * @size: size in bytes of the device memory to add |
| 2002 | * @base: resource tree to look in |
| 2003 | * |
| 2004 | * This function tries to find an empty range of physical address big enough to |
| 2005 | * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE |
| 2006 | * memory, which in turn allocates struct pages. |
| 2007 | */ |
| 2008 | struct resource *devm_request_free_mem_region(struct device *dev, |
| 2009 | struct resource *base, unsigned long size) |
| 2010 | { |
| 2011 | unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; |
| 2012 | |
| 2013 | return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN, |
| 2014 | dev_name(dev), |
| 2015 | IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); |
| 2016 | } |
| 2017 | EXPORT_SYMBOL_GPL(devm_request_free_mem_region); |
| 2018 | |
| 2019 | struct resource *request_free_mem_region(struct resource *base, |
| 2020 | unsigned long size, const char *name) |
| 2021 | { |
| 2022 | unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION; |
| 2023 | |
| 2024 | return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name, |
| 2025 | IORES_DESC_DEVICE_PRIVATE_MEMORY, flags); |
| 2026 | } |
| 2027 | EXPORT_SYMBOL_GPL(request_free_mem_region); |
| 2028 | |
| 2029 | /** |
| 2030 | * alloc_free_mem_region - find a free region relative to @base |
| 2031 | * @base: resource that will parent the new resource |
| 2032 | * @size: size in bytes of memory to allocate from @base |
| 2033 | * @align: alignment requirements for the allocation |
| 2034 | * @name: resource name |
| 2035 | * |
| 2036 | * Buses like CXL, that can dynamically instantiate new memory regions, |
| 2037 | * need a method to allocate physical address space for those regions. |
| 2038 | * Allocate and insert a new resource to cover a free, unclaimed by a |
| 2039 | * descendant of @base, range in the span of @base. |
| 2040 | */ |
| 2041 | struct resource *alloc_free_mem_region(struct resource *base, |
| 2042 | unsigned long size, unsigned long align, |
| 2043 | const char *name) |
| 2044 | { |
| 2045 | /* Default of ascending direction and insert resource */ |
| 2046 | unsigned long flags = 0; |
| 2047 | |
| 2048 | return get_free_mem_region(NULL, base, size, align, name, |
| 2049 | IORES_DESC_NONE, flags); |
| 2050 | } |
| 2051 | EXPORT_SYMBOL_GPL(alloc_free_mem_region); |
| 2052 | #endif /* CONFIG_GET_FREE_REGION */ |
| 2053 | |
| 2054 | static int __init strict_iomem(char *str) |
| 2055 | { |
| 2056 | if (strstr(str, "relaxed")) |
| 2057 | strict_iomem_checks = 0; |
| 2058 | if (strstr(str, "strict")) |
| 2059 | strict_iomem_checks = 1; |
| 2060 | return 1; |
| 2061 | } |
| 2062 | |
| 2063 | static int iomem_fs_init_fs_context(struct fs_context *fc) |
| 2064 | { |
| 2065 | return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; |
| 2066 | } |
| 2067 | |
| 2068 | static struct file_system_type iomem_fs_type = { |
| 2069 | .name = "iomem", |
| 2070 | .owner = THIS_MODULE, |
| 2071 | .init_fs_context = iomem_fs_init_fs_context, |
| 2072 | .kill_sb = kill_anon_super, |
| 2073 | }; |
| 2074 | |
| 2075 | static int __init iomem_init_inode(void) |
| 2076 | { |
| 2077 | static struct vfsmount *iomem_vfs_mount; |
| 2078 | static int iomem_fs_cnt; |
| 2079 | struct inode *inode; |
| 2080 | int rc; |
| 2081 | |
| 2082 | rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); |
| 2083 | if (rc < 0) { |
| 2084 | pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); |
| 2085 | return rc; |
| 2086 | } |
| 2087 | |
| 2088 | inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); |
| 2089 | if (IS_ERR(inode)) { |
| 2090 | rc = PTR_ERR(inode); |
| 2091 | pr_err("Cannot allocate inode for iomem: %d\n", rc); |
| 2092 | simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); |
| 2093 | return rc; |
| 2094 | } |
| 2095 | |
| 2096 | /* |
| 2097 | * Publish iomem revocation inode initialized. |
| 2098 | * Pairs with smp_load_acquire() in revoke_iomem(). |
| 2099 | */ |
| 2100 | smp_store_release(&iomem_inode, inode); |
| 2101 | |
| 2102 | return 0; |
| 2103 | } |
| 2104 | |
| 2105 | fs_initcall(iomem_init_inode); |
| 2106 | |
| 2107 | __setup("iomem=", strict_iomem); |