| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Pid namespaces |
| 4 | * |
| 5 | * Authors: |
| 6 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 7 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 8 | * Many thanks to Oleg Nesterov for comments and help |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/pid.h> |
| 13 | #include <linux/pid_namespace.h> |
| 14 | #include <linux/user_namespace.h> |
| 15 | #include <linux/syscalls.h> |
| 16 | #include <linux/cred.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/acct.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/proc_ns.h> |
| 21 | #include <linux/reboot.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/sched/task.h> |
| 24 | #include <linux/sched/signal.h> |
| 25 | #include <linux/idr.h> |
| 26 | #include "pid_sysctl.h" |
| 27 | |
| 28 | static DEFINE_MUTEX(pid_caches_mutex); |
| 29 | static struct kmem_cache *pid_ns_cachep; |
| 30 | /* Write once array, filled from the beginning. */ |
| 31 | static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; |
| 32 | |
| 33 | /* |
| 34 | * creates the kmem cache to allocate pids from. |
| 35 | * @level: pid namespace level |
| 36 | */ |
| 37 | |
| 38 | static struct kmem_cache *create_pid_cachep(unsigned int level) |
| 39 | { |
| 40 | /* Level 0 is init_pid_ns.pid_cachep */ |
| 41 | struct kmem_cache **pkc = &pid_cache[level - 1]; |
| 42 | struct kmem_cache *kc; |
| 43 | char name[4 + 10 + 1]; |
| 44 | unsigned int len; |
| 45 | |
| 46 | kc = READ_ONCE(*pkc); |
| 47 | if (kc) |
| 48 | return kc; |
| 49 | |
| 50 | snprintf(name, sizeof(name), "pid_%u", level + 1); |
| 51 | len = sizeof(struct pid) + level * sizeof(struct upid); |
| 52 | mutex_lock(&pid_caches_mutex); |
| 53 | /* Name collision forces to do allocation under mutex. */ |
| 54 | if (!*pkc) |
| 55 | *pkc = kmem_cache_create(name, len, 0, |
| 56 | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); |
| 57 | mutex_unlock(&pid_caches_mutex); |
| 58 | /* current can fail, but someone else can succeed. */ |
| 59 | return READ_ONCE(*pkc); |
| 60 | } |
| 61 | |
| 62 | static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) |
| 63 | { |
| 64 | return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); |
| 65 | } |
| 66 | |
| 67 | static void dec_pid_namespaces(struct ucounts *ucounts) |
| 68 | { |
| 69 | dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); |
| 70 | } |
| 71 | |
| 72 | static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, |
| 73 | struct pid_namespace *parent_pid_ns) |
| 74 | { |
| 75 | struct pid_namespace *ns; |
| 76 | unsigned int level = parent_pid_ns->level + 1; |
| 77 | struct ucounts *ucounts; |
| 78 | int err; |
| 79 | |
| 80 | err = -EINVAL; |
| 81 | if (!in_userns(parent_pid_ns->user_ns, user_ns)) |
| 82 | goto out; |
| 83 | |
| 84 | err = -ENOSPC; |
| 85 | if (level > MAX_PID_NS_LEVEL) |
| 86 | goto out; |
| 87 | ucounts = inc_pid_namespaces(user_ns); |
| 88 | if (!ucounts) |
| 89 | goto out; |
| 90 | |
| 91 | err = -ENOMEM; |
| 92 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); |
| 93 | if (ns == NULL) |
| 94 | goto out_dec; |
| 95 | |
| 96 | idr_init(&ns->idr); |
| 97 | |
| 98 | ns->pid_cachep = create_pid_cachep(level); |
| 99 | if (ns->pid_cachep == NULL) |
| 100 | goto out_free_idr; |
| 101 | |
| 102 | err = ns_alloc_inum(&ns->ns); |
| 103 | if (err) |
| 104 | goto out_free_idr; |
| 105 | ns->ns.ops = &pidns_operations; |
| 106 | |
| 107 | refcount_set(&ns->ns.count, 1); |
| 108 | ns->level = level; |
| 109 | ns->parent = get_pid_ns(parent_pid_ns); |
| 110 | ns->user_ns = get_user_ns(user_ns); |
| 111 | ns->ucounts = ucounts; |
| 112 | ns->pid_allocated = PIDNS_ADDING; |
| 113 | |
| 114 | initialize_memfd_noexec_scope(ns); |
| 115 | |
| 116 | return ns; |
| 117 | |
| 118 | out_free_idr: |
| 119 | idr_destroy(&ns->idr); |
| 120 | kmem_cache_free(pid_ns_cachep, ns); |
| 121 | out_dec: |
| 122 | dec_pid_namespaces(ucounts); |
| 123 | out: |
| 124 | return ERR_PTR(err); |
| 125 | } |
| 126 | |
| 127 | static void delayed_free_pidns(struct rcu_head *p) |
| 128 | { |
| 129 | struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu); |
| 130 | |
| 131 | dec_pid_namespaces(ns->ucounts); |
| 132 | put_user_ns(ns->user_ns); |
| 133 | |
| 134 | kmem_cache_free(pid_ns_cachep, ns); |
| 135 | } |
| 136 | |
| 137 | static void destroy_pid_namespace(struct pid_namespace *ns) |
| 138 | { |
| 139 | ns_free_inum(&ns->ns); |
| 140 | |
| 141 | idr_destroy(&ns->idr); |
| 142 | call_rcu(&ns->rcu, delayed_free_pidns); |
| 143 | } |
| 144 | |
| 145 | struct pid_namespace *copy_pid_ns(unsigned long flags, |
| 146 | struct user_namespace *user_ns, struct pid_namespace *old_ns) |
| 147 | { |
| 148 | if (!(flags & CLONE_NEWPID)) |
| 149 | return get_pid_ns(old_ns); |
| 150 | if (task_active_pid_ns(current) != old_ns) |
| 151 | return ERR_PTR(-EINVAL); |
| 152 | return create_pid_namespace(user_ns, old_ns); |
| 153 | } |
| 154 | |
| 155 | void put_pid_ns(struct pid_namespace *ns) |
| 156 | { |
| 157 | struct pid_namespace *parent; |
| 158 | |
| 159 | while (ns != &init_pid_ns) { |
| 160 | parent = ns->parent; |
| 161 | if (!refcount_dec_and_test(&ns->ns.count)) |
| 162 | break; |
| 163 | destroy_pid_namespace(ns); |
| 164 | ns = parent; |
| 165 | } |
| 166 | } |
| 167 | EXPORT_SYMBOL_GPL(put_pid_ns); |
| 168 | |
| 169 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
| 170 | { |
| 171 | int nr; |
| 172 | int rc; |
| 173 | struct task_struct *task, *me = current; |
| 174 | int init_pids = thread_group_leader(me) ? 1 : 2; |
| 175 | struct pid *pid; |
| 176 | |
| 177 | /* Don't allow any more processes into the pid namespace */ |
| 178 | disable_pid_allocation(pid_ns); |
| 179 | |
| 180 | /* |
| 181 | * Ignore SIGCHLD causing any terminated children to autoreap. |
| 182 | * This speeds up the namespace shutdown, plus see the comment |
| 183 | * below. |
| 184 | */ |
| 185 | spin_lock_irq(&me->sighand->siglock); |
| 186 | me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; |
| 187 | spin_unlock_irq(&me->sighand->siglock); |
| 188 | |
| 189 | /* |
| 190 | * The last thread in the cgroup-init thread group is terminating. |
| 191 | * Find remaining pid_ts in the namespace, signal and wait for them |
| 192 | * to exit. |
| 193 | * |
| 194 | * Note: This signals each threads in the namespace - even those that |
| 195 | * belong to the same thread group, To avoid this, we would have |
| 196 | * to walk the entire tasklist looking a processes in this |
| 197 | * namespace, but that could be unnecessarily expensive if the |
| 198 | * pid namespace has just a few processes. Or we need to |
| 199 | * maintain a tasklist for each pid namespace. |
| 200 | * |
| 201 | */ |
| 202 | rcu_read_lock(); |
| 203 | read_lock(&tasklist_lock); |
| 204 | nr = 2; |
| 205 | idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { |
| 206 | task = pid_task(pid, PIDTYPE_PID); |
| 207 | if (task && !__fatal_signal_pending(task)) |
| 208 | group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); |
| 209 | } |
| 210 | read_unlock(&tasklist_lock); |
| 211 | rcu_read_unlock(); |
| 212 | |
| 213 | /* |
| 214 | * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. |
| 215 | * kernel_wait4() will also block until our children traced from the |
| 216 | * parent namespace are detached and become EXIT_DEAD. |
| 217 | */ |
| 218 | do { |
| 219 | clear_thread_flag(TIF_SIGPENDING); |
| 220 | rc = kernel_wait4(-1, NULL, __WALL, NULL); |
| 221 | } while (rc != -ECHILD); |
| 222 | |
| 223 | /* |
| 224 | * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE |
| 225 | * process whose parents processes are outside of the pid |
| 226 | * namespace. Such processes are created with setns()+fork(). |
| 227 | * |
| 228 | * If those EXIT_ZOMBIE processes are not reaped by their |
| 229 | * parents before their parents exit, they will be reparented |
| 230 | * to pid_ns->child_reaper. Thus pidns->child_reaper needs to |
| 231 | * stay valid until they all go away. |
| 232 | * |
| 233 | * The code relies on the pid_ns->child_reaper ignoring |
| 234 | * SIGCHILD to cause those EXIT_ZOMBIE processes to be |
| 235 | * autoreaped if reparented. |
| 236 | * |
| 237 | * Semantically it is also desirable to wait for EXIT_ZOMBIE |
| 238 | * processes before allowing the child_reaper to be reaped, as |
| 239 | * that gives the invariant that when the init process of a |
| 240 | * pid namespace is reaped all of the processes in the pid |
| 241 | * namespace are gone. |
| 242 | * |
| 243 | * Once all of the other tasks are gone from the pid_namespace |
| 244 | * free_pid() will awaken this task. |
| 245 | */ |
| 246 | for (;;) { |
| 247 | set_current_state(TASK_INTERRUPTIBLE); |
| 248 | if (pid_ns->pid_allocated == init_pids) |
| 249 | break; |
| 250 | /* |
| 251 | * Release tasks_rcu_exit_srcu to avoid following deadlock: |
| 252 | * |
| 253 | * 1) TASK A unshare(CLONE_NEWPID) |
| 254 | * 2) TASK A fork() twice -> TASK B (child reaper for new ns) |
| 255 | * and TASK C |
| 256 | * 3) TASK B exits, kills TASK C, waits for TASK A to reap it |
| 257 | * 4) TASK A calls synchronize_rcu_tasks() |
| 258 | * -> synchronize_srcu(tasks_rcu_exit_srcu) |
| 259 | * 5) *DEADLOCK* |
| 260 | * |
| 261 | * It is considered safe to release tasks_rcu_exit_srcu here |
| 262 | * because we assume the current task can not be concurrently |
| 263 | * reaped at this point. |
| 264 | */ |
| 265 | exit_tasks_rcu_stop(); |
| 266 | schedule(); |
| 267 | exit_tasks_rcu_start(); |
| 268 | } |
| 269 | __set_current_state(TASK_RUNNING); |
| 270 | |
| 271 | if (pid_ns->reboot) |
| 272 | current->signal->group_exit_code = pid_ns->reboot; |
| 273 | |
| 274 | acct_exit_ns(pid_ns); |
| 275 | return; |
| 276 | } |
| 277 | |
| 278 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 279 | static int pid_ns_ctl_handler(struct ctl_table *table, int write, |
| 280 | void *buffer, size_t *lenp, loff_t *ppos) |
| 281 | { |
| 282 | struct pid_namespace *pid_ns = task_active_pid_ns(current); |
| 283 | struct ctl_table tmp = *table; |
| 284 | int ret, next; |
| 285 | |
| 286 | if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) |
| 287 | return -EPERM; |
| 288 | |
| 289 | /* |
| 290 | * Writing directly to ns' last_pid field is OK, since this field |
| 291 | * is volatile in a living namespace anyway and a code writing to |
| 292 | * it should synchronize its usage with external means. |
| 293 | */ |
| 294 | |
| 295 | next = idr_get_cursor(&pid_ns->idr) - 1; |
| 296 | |
| 297 | tmp.data = &next; |
| 298 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
| 299 | if (!ret && write) |
| 300 | idr_set_cursor(&pid_ns->idr, next + 1); |
| 301 | |
| 302 | return ret; |
| 303 | } |
| 304 | |
| 305 | extern int pid_max; |
| 306 | static struct ctl_table pid_ns_ctl_table[] = { |
| 307 | { |
| 308 | .procname = "ns_last_pid", |
| 309 | .maxlen = sizeof(int), |
| 310 | .mode = 0666, /* permissions are checked in the handler */ |
| 311 | .proc_handler = pid_ns_ctl_handler, |
| 312 | .extra1 = SYSCTL_ZERO, |
| 313 | .extra2 = &pid_max, |
| 314 | }, |
| 315 | { } |
| 316 | }; |
| 317 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
| 318 | |
| 319 | int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) |
| 320 | { |
| 321 | if (pid_ns == &init_pid_ns) |
| 322 | return 0; |
| 323 | |
| 324 | switch (cmd) { |
| 325 | case LINUX_REBOOT_CMD_RESTART2: |
| 326 | case LINUX_REBOOT_CMD_RESTART: |
| 327 | pid_ns->reboot = SIGHUP; |
| 328 | break; |
| 329 | |
| 330 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 331 | case LINUX_REBOOT_CMD_HALT: |
| 332 | pid_ns->reboot = SIGINT; |
| 333 | break; |
| 334 | default: |
| 335 | return -EINVAL; |
| 336 | } |
| 337 | |
| 338 | read_lock(&tasklist_lock); |
| 339 | send_sig(SIGKILL, pid_ns->child_reaper, 1); |
| 340 | read_unlock(&tasklist_lock); |
| 341 | |
| 342 | do_exit(0); |
| 343 | |
| 344 | /* Not reached */ |
| 345 | return 0; |
| 346 | } |
| 347 | |
| 348 | static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) |
| 349 | { |
| 350 | return container_of(ns, struct pid_namespace, ns); |
| 351 | } |
| 352 | |
| 353 | static struct ns_common *pidns_get(struct task_struct *task) |
| 354 | { |
| 355 | struct pid_namespace *ns; |
| 356 | |
| 357 | rcu_read_lock(); |
| 358 | ns = task_active_pid_ns(task); |
| 359 | if (ns) |
| 360 | get_pid_ns(ns); |
| 361 | rcu_read_unlock(); |
| 362 | |
| 363 | return ns ? &ns->ns : NULL; |
| 364 | } |
| 365 | |
| 366 | static struct ns_common *pidns_for_children_get(struct task_struct *task) |
| 367 | { |
| 368 | struct pid_namespace *ns = NULL; |
| 369 | |
| 370 | task_lock(task); |
| 371 | if (task->nsproxy) { |
| 372 | ns = task->nsproxy->pid_ns_for_children; |
| 373 | get_pid_ns(ns); |
| 374 | } |
| 375 | task_unlock(task); |
| 376 | |
| 377 | if (ns) { |
| 378 | read_lock(&tasklist_lock); |
| 379 | if (!ns->child_reaper) { |
| 380 | put_pid_ns(ns); |
| 381 | ns = NULL; |
| 382 | } |
| 383 | read_unlock(&tasklist_lock); |
| 384 | } |
| 385 | |
| 386 | return ns ? &ns->ns : NULL; |
| 387 | } |
| 388 | |
| 389 | static void pidns_put(struct ns_common *ns) |
| 390 | { |
| 391 | put_pid_ns(to_pid_ns(ns)); |
| 392 | } |
| 393 | |
| 394 | static int pidns_install(struct nsset *nsset, struct ns_common *ns) |
| 395 | { |
| 396 | struct nsproxy *nsproxy = nsset->nsproxy; |
| 397 | struct pid_namespace *active = task_active_pid_ns(current); |
| 398 | struct pid_namespace *ancestor, *new = to_pid_ns(ns); |
| 399 | |
| 400 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || |
| 401 | !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) |
| 402 | return -EPERM; |
| 403 | |
| 404 | /* |
| 405 | * Only allow entering the current active pid namespace |
| 406 | * or a child of the current active pid namespace. |
| 407 | * |
| 408 | * This is required for fork to return a usable pid value and |
| 409 | * this maintains the property that processes and their |
| 410 | * children can not escape their current pid namespace. |
| 411 | */ |
| 412 | if (new->level < active->level) |
| 413 | return -EINVAL; |
| 414 | |
| 415 | ancestor = new; |
| 416 | while (ancestor->level > active->level) |
| 417 | ancestor = ancestor->parent; |
| 418 | if (ancestor != active) |
| 419 | return -EINVAL; |
| 420 | |
| 421 | put_pid_ns(nsproxy->pid_ns_for_children); |
| 422 | nsproxy->pid_ns_for_children = get_pid_ns(new); |
| 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | static struct ns_common *pidns_get_parent(struct ns_common *ns) |
| 427 | { |
| 428 | struct pid_namespace *active = task_active_pid_ns(current); |
| 429 | struct pid_namespace *pid_ns, *p; |
| 430 | |
| 431 | /* See if the parent is in the current namespace */ |
| 432 | pid_ns = p = to_pid_ns(ns)->parent; |
| 433 | for (;;) { |
| 434 | if (!p) |
| 435 | return ERR_PTR(-EPERM); |
| 436 | if (p == active) |
| 437 | break; |
| 438 | p = p->parent; |
| 439 | } |
| 440 | |
| 441 | return &get_pid_ns(pid_ns)->ns; |
| 442 | } |
| 443 | |
| 444 | static struct user_namespace *pidns_owner(struct ns_common *ns) |
| 445 | { |
| 446 | return to_pid_ns(ns)->user_ns; |
| 447 | } |
| 448 | |
| 449 | const struct proc_ns_operations pidns_operations = { |
| 450 | .name = "pid", |
| 451 | .type = CLONE_NEWPID, |
| 452 | .get = pidns_get, |
| 453 | .put = pidns_put, |
| 454 | .install = pidns_install, |
| 455 | .owner = pidns_owner, |
| 456 | .get_parent = pidns_get_parent, |
| 457 | }; |
| 458 | |
| 459 | const struct proc_ns_operations pidns_for_children_operations = { |
| 460 | .name = "pid_for_children", |
| 461 | .real_ns_name = "pid", |
| 462 | .type = CLONE_NEWPID, |
| 463 | .get = pidns_for_children_get, |
| 464 | .put = pidns_put, |
| 465 | .install = pidns_install, |
| 466 | .owner = pidns_owner, |
| 467 | .get_parent = pidns_get_parent, |
| 468 | }; |
| 469 | |
| 470 | static __init int pid_namespaces_init(void) |
| 471 | { |
| 472 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC | SLAB_ACCOUNT); |
| 473 | |
| 474 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 475 | register_sysctl_init("kernel", pid_ns_ctl_table); |
| 476 | #endif |
| 477 | |
| 478 | register_pid_ns_sysctl_table_vm(); |
| 479 | return 0; |
| 480 | } |
| 481 | |
| 482 | __initcall(pid_namespaces_init); |