| 1 | /* |
| 2 | * Generic pidhash and scalable, time-bounded PID allocator |
| 3 | * |
| 4 | * (C) 2002-2003 Nadia Yvette Chambers, IBM |
| 5 | * (C) 2004 Nadia Yvette Chambers, Oracle |
| 6 | * (C) 2002-2004 Ingo Molnar, Red Hat |
| 7 | * |
| 8 | * pid-structures are backing objects for tasks sharing a given ID to chain |
| 9 | * against. There is very little to them aside from hashing them and |
| 10 | * parking tasks using given ID's on a list. |
| 11 | * |
| 12 | * The hash is always changed with the tasklist_lock write-acquired, |
| 13 | * and the hash is only accessed with the tasklist_lock at least |
| 14 | * read-acquired, so there's no additional SMP locking needed here. |
| 15 | * |
| 16 | * We have a list of bitmap pages, which bitmaps represent the PID space. |
| 17 | * Allocating and freeing PIDs is completely lockless. The worst-case |
| 18 | * allocation scenario when all but one out of 1 million PIDs possible are |
| 19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE |
| 20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). |
| 21 | * |
| 22 | * Pid namespaces: |
| 23 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 24 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 25 | * Many thanks to Oleg Nesterov for comments and help |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/export.h> |
| 31 | #include <linux/slab.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/rculist.h> |
| 34 | #include <linux/memblock.h> |
| 35 | #include <linux/pid_namespace.h> |
| 36 | #include <linux/init_task.h> |
| 37 | #include <linux/syscalls.h> |
| 38 | #include <linux/proc_ns.h> |
| 39 | #include <linux/proc_fs.h> |
| 40 | #include <linux/sched/task.h> |
| 41 | #include <linux/idr.h> |
| 42 | |
| 43 | struct pid init_struct_pid = { |
| 44 | .count = ATOMIC_INIT(1), |
| 45 | .tasks = { |
| 46 | { .first = NULL }, |
| 47 | { .first = NULL }, |
| 48 | { .first = NULL }, |
| 49 | }, |
| 50 | .level = 0, |
| 51 | .numbers = { { |
| 52 | .nr = 0, |
| 53 | .ns = &init_pid_ns, |
| 54 | }, } |
| 55 | }; |
| 56 | |
| 57 | int pid_max = PID_MAX_DEFAULT; |
| 58 | |
| 59 | #define RESERVED_PIDS 300 |
| 60 | |
| 61 | int pid_max_min = RESERVED_PIDS + 1; |
| 62 | int pid_max_max = PID_MAX_LIMIT; |
| 63 | |
| 64 | /* |
| 65 | * PID-map pages start out as NULL, they get allocated upon |
| 66 | * first use and are never deallocated. This way a low pid_max |
| 67 | * value does not cause lots of bitmaps to be allocated, but |
| 68 | * the scheme scales to up to 4 million PIDs, runtime. |
| 69 | */ |
| 70 | struct pid_namespace init_pid_ns = { |
| 71 | .kref = KREF_INIT(2), |
| 72 | .idr = IDR_INIT(init_pid_ns.idr), |
| 73 | .pid_allocated = PIDNS_ADDING, |
| 74 | .level = 0, |
| 75 | .child_reaper = &init_task, |
| 76 | .user_ns = &init_user_ns, |
| 77 | .ns.inum = PROC_PID_INIT_INO, |
| 78 | #ifdef CONFIG_PID_NS |
| 79 | .ns.ops = &pidns_operations, |
| 80 | #endif |
| 81 | }; |
| 82 | EXPORT_SYMBOL_GPL(init_pid_ns); |
| 83 | |
| 84 | /* |
| 85 | * Note: disable interrupts while the pidmap_lock is held as an |
| 86 | * interrupt might come in and do read_lock(&tasklist_lock). |
| 87 | * |
| 88 | * If we don't disable interrupts there is a nasty deadlock between |
| 89 | * detach_pid()->free_pid() and another cpu that does |
| 90 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does |
| 91 | * read_lock(&tasklist_lock); |
| 92 | * |
| 93 | * After we clean up the tasklist_lock and know there are no |
| 94 | * irq handlers that take it we can leave the interrupts enabled. |
| 95 | * For now it is easier to be safe than to prove it can't happen. |
| 96 | */ |
| 97 | |
| 98 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
| 99 | |
| 100 | void put_pid(struct pid *pid) |
| 101 | { |
| 102 | struct pid_namespace *ns; |
| 103 | |
| 104 | if (!pid) |
| 105 | return; |
| 106 | |
| 107 | ns = pid->numbers[pid->level].ns; |
| 108 | if ((atomic_read(&pid->count) == 1) || |
| 109 | atomic_dec_and_test(&pid->count)) { |
| 110 | kmem_cache_free(ns->pid_cachep, pid); |
| 111 | put_pid_ns(ns); |
| 112 | } |
| 113 | } |
| 114 | EXPORT_SYMBOL_GPL(put_pid); |
| 115 | |
| 116 | static void delayed_put_pid(struct rcu_head *rhp) |
| 117 | { |
| 118 | struct pid *pid = container_of(rhp, struct pid, rcu); |
| 119 | put_pid(pid); |
| 120 | } |
| 121 | |
| 122 | void free_pid(struct pid *pid) |
| 123 | { |
| 124 | /* We can be called with write_lock_irq(&tasklist_lock) held */ |
| 125 | int i; |
| 126 | unsigned long flags; |
| 127 | |
| 128 | spin_lock_irqsave(&pidmap_lock, flags); |
| 129 | for (i = 0; i <= pid->level; i++) { |
| 130 | struct upid *upid = pid->numbers + i; |
| 131 | struct pid_namespace *ns = upid->ns; |
| 132 | switch (--ns->pid_allocated) { |
| 133 | case 2: |
| 134 | case 1: |
| 135 | /* When all that is left in the pid namespace |
| 136 | * is the reaper wake up the reaper. The reaper |
| 137 | * may be sleeping in zap_pid_ns_processes(). |
| 138 | */ |
| 139 | wake_up_process(ns->child_reaper); |
| 140 | break; |
| 141 | case PIDNS_ADDING: |
| 142 | /* Handle a fork failure of the first process */ |
| 143 | WARN_ON(ns->child_reaper); |
| 144 | ns->pid_allocated = 0; |
| 145 | /* fall through */ |
| 146 | case 0: |
| 147 | schedule_work(&ns->proc_work); |
| 148 | break; |
| 149 | } |
| 150 | |
| 151 | idr_remove(&ns->idr, upid->nr); |
| 152 | } |
| 153 | spin_unlock_irqrestore(&pidmap_lock, flags); |
| 154 | |
| 155 | call_rcu(&pid->rcu, delayed_put_pid); |
| 156 | } |
| 157 | |
| 158 | struct pid *alloc_pid(struct pid_namespace *ns) |
| 159 | { |
| 160 | struct pid *pid; |
| 161 | enum pid_type type; |
| 162 | int i, nr; |
| 163 | struct pid_namespace *tmp; |
| 164 | struct upid *upid; |
| 165 | int retval = -ENOMEM; |
| 166 | |
| 167 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
| 168 | if (!pid) |
| 169 | return ERR_PTR(retval); |
| 170 | |
| 171 | tmp = ns; |
| 172 | pid->level = ns->level; |
| 173 | |
| 174 | for (i = ns->level; i >= 0; i--) { |
| 175 | int pid_min = 1; |
| 176 | |
| 177 | idr_preload(GFP_KERNEL); |
| 178 | spin_lock_irq(&pidmap_lock); |
| 179 | |
| 180 | /* |
| 181 | * init really needs pid 1, but after reaching the maximum |
| 182 | * wrap back to RESERVED_PIDS |
| 183 | */ |
| 184 | if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) |
| 185 | pid_min = RESERVED_PIDS; |
| 186 | |
| 187 | /* |
| 188 | * Store a null pointer so find_pid_ns does not find |
| 189 | * a partially initialized PID (see below). |
| 190 | */ |
| 191 | nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, |
| 192 | pid_max, GFP_ATOMIC); |
| 193 | spin_unlock_irq(&pidmap_lock); |
| 194 | idr_preload_end(); |
| 195 | |
| 196 | if (nr < 0) { |
| 197 | retval = (nr == -ENOSPC) ? -EAGAIN : nr; |
| 198 | goto out_free; |
| 199 | } |
| 200 | |
| 201 | pid->numbers[i].nr = nr; |
| 202 | pid->numbers[i].ns = tmp; |
| 203 | tmp = tmp->parent; |
| 204 | } |
| 205 | |
| 206 | if (unlikely(is_child_reaper(pid))) { |
| 207 | if (pid_ns_prepare_proc(ns)) |
| 208 | goto out_free; |
| 209 | } |
| 210 | |
| 211 | get_pid_ns(ns); |
| 212 | atomic_set(&pid->count, 1); |
| 213 | for (type = 0; type < PIDTYPE_MAX; ++type) |
| 214 | INIT_HLIST_HEAD(&pid->tasks[type]); |
| 215 | |
| 216 | upid = pid->numbers + ns->level; |
| 217 | spin_lock_irq(&pidmap_lock); |
| 218 | if (!(ns->pid_allocated & PIDNS_ADDING)) |
| 219 | goto out_unlock; |
| 220 | for ( ; upid >= pid->numbers; --upid) { |
| 221 | /* Make the PID visible to find_pid_ns. */ |
| 222 | idr_replace(&upid->ns->idr, pid, upid->nr); |
| 223 | upid->ns->pid_allocated++; |
| 224 | } |
| 225 | spin_unlock_irq(&pidmap_lock); |
| 226 | |
| 227 | return pid; |
| 228 | |
| 229 | out_unlock: |
| 230 | spin_unlock_irq(&pidmap_lock); |
| 231 | put_pid_ns(ns); |
| 232 | |
| 233 | out_free: |
| 234 | spin_lock_irq(&pidmap_lock); |
| 235 | while (++i <= ns->level) { |
| 236 | upid = pid->numbers + i; |
| 237 | idr_remove(&upid->ns->idr, upid->nr); |
| 238 | } |
| 239 | |
| 240 | /* On failure to allocate the first pid, reset the state */ |
| 241 | if (ns->pid_allocated == PIDNS_ADDING) |
| 242 | idr_set_cursor(&ns->idr, 0); |
| 243 | |
| 244 | spin_unlock_irq(&pidmap_lock); |
| 245 | |
| 246 | kmem_cache_free(ns->pid_cachep, pid); |
| 247 | return ERR_PTR(retval); |
| 248 | } |
| 249 | |
| 250 | void disable_pid_allocation(struct pid_namespace *ns) |
| 251 | { |
| 252 | spin_lock_irq(&pidmap_lock); |
| 253 | ns->pid_allocated &= ~PIDNS_ADDING; |
| 254 | spin_unlock_irq(&pidmap_lock); |
| 255 | } |
| 256 | |
| 257 | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) |
| 258 | { |
| 259 | return idr_find(&ns->idr, nr); |
| 260 | } |
| 261 | EXPORT_SYMBOL_GPL(find_pid_ns); |
| 262 | |
| 263 | struct pid *find_vpid(int nr) |
| 264 | { |
| 265 | return find_pid_ns(nr, task_active_pid_ns(current)); |
| 266 | } |
| 267 | EXPORT_SYMBOL_GPL(find_vpid); |
| 268 | |
| 269 | static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) |
| 270 | { |
| 271 | return (type == PIDTYPE_PID) ? |
| 272 | &task->thread_pid : |
| 273 | &task->signal->pids[type]; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * attach_pid() must be called with the tasklist_lock write-held. |
| 278 | */ |
| 279 | void attach_pid(struct task_struct *task, enum pid_type type) |
| 280 | { |
| 281 | struct pid *pid = *task_pid_ptr(task, type); |
| 282 | hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); |
| 283 | } |
| 284 | |
| 285 | static void __change_pid(struct task_struct *task, enum pid_type type, |
| 286 | struct pid *new) |
| 287 | { |
| 288 | struct pid **pid_ptr = task_pid_ptr(task, type); |
| 289 | struct pid *pid; |
| 290 | int tmp; |
| 291 | |
| 292 | pid = *pid_ptr; |
| 293 | |
| 294 | hlist_del_rcu(&task->pid_links[type]); |
| 295 | *pid_ptr = new; |
| 296 | |
| 297 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
| 298 | if (!hlist_empty(&pid->tasks[tmp])) |
| 299 | return; |
| 300 | |
| 301 | free_pid(pid); |
| 302 | } |
| 303 | |
| 304 | void detach_pid(struct task_struct *task, enum pid_type type) |
| 305 | { |
| 306 | __change_pid(task, type, NULL); |
| 307 | } |
| 308 | |
| 309 | void change_pid(struct task_struct *task, enum pid_type type, |
| 310 | struct pid *pid) |
| 311 | { |
| 312 | __change_pid(task, type, pid); |
| 313 | attach_pid(task, type); |
| 314 | } |
| 315 | |
| 316 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
| 317 | void transfer_pid(struct task_struct *old, struct task_struct *new, |
| 318 | enum pid_type type) |
| 319 | { |
| 320 | if (type == PIDTYPE_PID) |
| 321 | new->thread_pid = old->thread_pid; |
| 322 | hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); |
| 323 | } |
| 324 | |
| 325 | struct task_struct *pid_task(struct pid *pid, enum pid_type type) |
| 326 | { |
| 327 | struct task_struct *result = NULL; |
| 328 | if (pid) { |
| 329 | struct hlist_node *first; |
| 330 | first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), |
| 331 | lockdep_tasklist_lock_is_held()); |
| 332 | if (first) |
| 333 | result = hlist_entry(first, struct task_struct, pid_links[(type)]); |
| 334 | } |
| 335 | return result; |
| 336 | } |
| 337 | EXPORT_SYMBOL(pid_task); |
| 338 | |
| 339 | /* |
| 340 | * Must be called under rcu_read_lock(). |
| 341 | */ |
| 342 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
| 343 | { |
| 344 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), |
| 345 | "find_task_by_pid_ns() needs rcu_read_lock() protection"); |
| 346 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); |
| 347 | } |
| 348 | |
| 349 | struct task_struct *find_task_by_vpid(pid_t vnr) |
| 350 | { |
| 351 | return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); |
| 352 | } |
| 353 | |
| 354 | struct task_struct *find_get_task_by_vpid(pid_t nr) |
| 355 | { |
| 356 | struct task_struct *task; |
| 357 | |
| 358 | rcu_read_lock(); |
| 359 | task = find_task_by_vpid(nr); |
| 360 | if (task) |
| 361 | get_task_struct(task); |
| 362 | rcu_read_unlock(); |
| 363 | |
| 364 | return task; |
| 365 | } |
| 366 | |
| 367 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
| 368 | { |
| 369 | struct pid *pid; |
| 370 | rcu_read_lock(); |
| 371 | pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); |
| 372 | rcu_read_unlock(); |
| 373 | return pid; |
| 374 | } |
| 375 | EXPORT_SYMBOL_GPL(get_task_pid); |
| 376 | |
| 377 | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) |
| 378 | { |
| 379 | struct task_struct *result; |
| 380 | rcu_read_lock(); |
| 381 | result = pid_task(pid, type); |
| 382 | if (result) |
| 383 | get_task_struct(result); |
| 384 | rcu_read_unlock(); |
| 385 | return result; |
| 386 | } |
| 387 | EXPORT_SYMBOL_GPL(get_pid_task); |
| 388 | |
| 389 | struct pid *find_get_pid(pid_t nr) |
| 390 | { |
| 391 | struct pid *pid; |
| 392 | |
| 393 | rcu_read_lock(); |
| 394 | pid = get_pid(find_vpid(nr)); |
| 395 | rcu_read_unlock(); |
| 396 | |
| 397 | return pid; |
| 398 | } |
| 399 | EXPORT_SYMBOL_GPL(find_get_pid); |
| 400 | |
| 401 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
| 402 | { |
| 403 | struct upid *upid; |
| 404 | pid_t nr = 0; |
| 405 | |
| 406 | if (pid && ns->level <= pid->level) { |
| 407 | upid = &pid->numbers[ns->level]; |
| 408 | if (upid->ns == ns) |
| 409 | nr = upid->nr; |
| 410 | } |
| 411 | return nr; |
| 412 | } |
| 413 | EXPORT_SYMBOL_GPL(pid_nr_ns); |
| 414 | |
| 415 | pid_t pid_vnr(struct pid *pid) |
| 416 | { |
| 417 | return pid_nr_ns(pid, task_active_pid_ns(current)); |
| 418 | } |
| 419 | EXPORT_SYMBOL_GPL(pid_vnr); |
| 420 | |
| 421 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
| 422 | struct pid_namespace *ns) |
| 423 | { |
| 424 | pid_t nr = 0; |
| 425 | |
| 426 | rcu_read_lock(); |
| 427 | if (!ns) |
| 428 | ns = task_active_pid_ns(current); |
| 429 | if (likely(pid_alive(task))) |
| 430 | nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); |
| 431 | rcu_read_unlock(); |
| 432 | |
| 433 | return nr; |
| 434 | } |
| 435 | EXPORT_SYMBOL(__task_pid_nr_ns); |
| 436 | |
| 437 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) |
| 438 | { |
| 439 | return ns_of_pid(task_pid(tsk)); |
| 440 | } |
| 441 | EXPORT_SYMBOL_GPL(task_active_pid_ns); |
| 442 | |
| 443 | /* |
| 444 | * Used by proc to find the first pid that is greater than or equal to nr. |
| 445 | * |
| 446 | * If there is a pid at nr this function is exactly the same as find_pid_ns. |
| 447 | */ |
| 448 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
| 449 | { |
| 450 | return idr_get_next(&ns->idr, &nr); |
| 451 | } |
| 452 | |
| 453 | void __init pid_idr_init(void) |
| 454 | { |
| 455 | /* Verify no one has done anything silly: */ |
| 456 | BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); |
| 457 | |
| 458 | /* bump default and minimum pid_max based on number of cpus */ |
| 459 | pid_max = min(pid_max_max, max_t(int, pid_max, |
| 460 | PIDS_PER_CPU_DEFAULT * num_possible_cpus())); |
| 461 | pid_max_min = max_t(int, pid_max_min, |
| 462 | PIDS_PER_CPU_MIN * num_possible_cpus()); |
| 463 | pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); |
| 464 | |
| 465 | idr_init(&init_pid_ns.idr); |
| 466 | |
| 467 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, |
| 468 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); |
| 469 | } |