Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic pidhash and scalable, time-bounded PID allocator | |
3 | * | |
4 | * (C) 2002-2003 William Irwin, IBM | |
5 | * (C) 2004 William Irwin, Oracle | |
6 | * (C) 2002-2004 Ingo Molnar, Red Hat | |
7 | * | |
8 | * pid-structures are backing objects for tasks sharing a given ID to chain | |
9 | * against. There is very little to them aside from hashing them and | |
10 | * parking tasks using given ID's on a list. | |
11 | * | |
12 | * The hash is always changed with the tasklist_lock write-acquired, | |
13 | * and the hash is only accessed with the tasklist_lock at least | |
14 | * read-acquired, so there's no additional SMP locking needed here. | |
15 | * | |
16 | * We have a list of bitmap pages, which bitmaps represent the PID space. | |
17 | * Allocating and freeing PIDs is completely lockless. The worst-case | |
18 | * allocation scenario when all but one out of 1 million PIDs possible are | |
19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE | |
20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). | |
30e49c26 PE |
21 | * |
22 | * Pid namespaces: | |
23 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. | |
24 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM | |
25 | * Many thanks to Oleg Nesterov for comments and help | |
26 | * | |
1da177e4 LT |
27 | */ |
28 | ||
29 | #include <linux/mm.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/bootmem.h> | |
34 | #include <linux/hash.h> | |
61a58c6c | 35 | #include <linux/pid_namespace.h> |
820e45db | 36 | #include <linux/init_task.h> |
3eb07c8c | 37 | #include <linux/syscalls.h> |
1da177e4 | 38 | |
8ef047aa PE |
39 | #define pid_hashfn(nr, ns) \ |
40 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | |
92476d7f | 41 | static struct hlist_head *pid_hash; |
1da177e4 | 42 | static int pidhash_shift; |
820e45db | 43 | struct pid init_struct_pid = INIT_STRUCT_PID; |
c9c5d922 | 44 | static struct kmem_cache *pid_ns_cachep; |
1da177e4 LT |
45 | |
46 | int pid_max = PID_MAX_DEFAULT; | |
1da177e4 LT |
47 | |
48 | #define RESERVED_PIDS 300 | |
49 | ||
50 | int pid_max_min = RESERVED_PIDS + 1; | |
51 | int pid_max_max = PID_MAX_LIMIT; | |
52 | ||
1da177e4 LT |
53 | #define BITS_PER_PAGE (PAGE_SIZE*8) |
54 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | |
3fbc9648 | 55 | |
61a58c6c SB |
56 | static inline int mk_pid(struct pid_namespace *pid_ns, |
57 | struct pidmap *map, int off) | |
3fbc9648 | 58 | { |
61a58c6c | 59 | return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; |
3fbc9648 SB |
60 | } |
61 | ||
1da177e4 LT |
62 | #define find_next_offset(map, off) \ |
63 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) | |
64 | ||
65 | /* | |
66 | * PID-map pages start out as NULL, they get allocated upon | |
67 | * first use and are never deallocated. This way a low pid_max | |
68 | * value does not cause lots of bitmaps to be allocated, but | |
69 | * the scheme scales to up to 4 million PIDs, runtime. | |
70 | */ | |
61a58c6c | 71 | struct pid_namespace init_pid_ns = { |
9a575a92 CLG |
72 | .kref = { |
73 | .refcount = ATOMIC_INIT(2), | |
74 | }, | |
3fbc9648 SB |
75 | .pidmap = { |
76 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } | |
77 | }, | |
84d73786 | 78 | .last_pid = 0, |
faacbfd3 PE |
79 | .level = 0, |
80 | .child_reaper = &init_task, | |
3fbc9648 | 81 | }; |
198fe21b | 82 | EXPORT_SYMBOL_GPL(init_pid_ns); |
1da177e4 | 83 | |
b461cc03 | 84 | int is_container_init(struct task_struct *tsk) |
b460cbc5 | 85 | { |
b461cc03 PE |
86 | int ret = 0; |
87 | struct pid *pid; | |
88 | ||
89 | rcu_read_lock(); | |
90 | pid = task_pid(tsk); | |
91 | if (pid != NULL && pid->numbers[pid->level].nr == 1) | |
92 | ret = 1; | |
93 | rcu_read_unlock(); | |
94 | ||
95 | return ret; | |
b460cbc5 | 96 | } |
b461cc03 | 97 | EXPORT_SYMBOL(is_container_init); |
b460cbc5 | 98 | |
92476d7f EB |
99 | /* |
100 | * Note: disable interrupts while the pidmap_lock is held as an | |
101 | * interrupt might come in and do read_lock(&tasklist_lock). | |
102 | * | |
103 | * If we don't disable interrupts there is a nasty deadlock between | |
104 | * detach_pid()->free_pid() and another cpu that does | |
105 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does | |
106 | * read_lock(&tasklist_lock); | |
107 | * | |
108 | * After we clean up the tasklist_lock and know there are no | |
109 | * irq handlers that take it we can leave the interrupts enabled. | |
110 | * For now it is easier to be safe than to prove it can't happen. | |
111 | */ | |
3fbc9648 | 112 | |
1da177e4 LT |
113 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
114 | ||
61a58c6c | 115 | static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid) |
1da177e4 | 116 | { |
61a58c6c | 117 | struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; |
1da177e4 LT |
118 | int offset = pid & BITS_PER_PAGE_MASK; |
119 | ||
120 | clear_bit(offset, map->page); | |
121 | atomic_inc(&map->nr_free); | |
122 | } | |
123 | ||
61a58c6c | 124 | static int alloc_pidmap(struct pid_namespace *pid_ns) |
1da177e4 | 125 | { |
61a58c6c | 126 | int i, offset, max_scan, pid, last = pid_ns->last_pid; |
6a1f3b84 | 127 | struct pidmap *map; |
1da177e4 LT |
128 | |
129 | pid = last + 1; | |
130 | if (pid >= pid_max) | |
131 | pid = RESERVED_PIDS; | |
132 | offset = pid & BITS_PER_PAGE_MASK; | |
61a58c6c | 133 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; |
1da177e4 LT |
134 | max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; |
135 | for (i = 0; i <= max_scan; ++i) { | |
136 | if (unlikely(!map->page)) { | |
3fbc9648 | 137 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1da177e4 LT |
138 | /* |
139 | * Free the page if someone raced with us | |
140 | * installing it: | |
141 | */ | |
92476d7f | 142 | spin_lock_irq(&pidmap_lock); |
1da177e4 | 143 | if (map->page) |
3fbc9648 | 144 | kfree(page); |
1da177e4 | 145 | else |
3fbc9648 | 146 | map->page = page; |
92476d7f | 147 | spin_unlock_irq(&pidmap_lock); |
1da177e4 LT |
148 | if (unlikely(!map->page)) |
149 | break; | |
150 | } | |
151 | if (likely(atomic_read(&map->nr_free))) { | |
152 | do { | |
153 | if (!test_and_set_bit(offset, map->page)) { | |
154 | atomic_dec(&map->nr_free); | |
61a58c6c | 155 | pid_ns->last_pid = pid; |
1da177e4 LT |
156 | return pid; |
157 | } | |
158 | offset = find_next_offset(map, offset); | |
61a58c6c | 159 | pid = mk_pid(pid_ns, map, offset); |
1da177e4 LT |
160 | /* |
161 | * find_next_offset() found a bit, the pid from it | |
162 | * is in-bounds, and if we fell back to the last | |
163 | * bitmap block and the final block was the same | |
164 | * as the starting point, pid is before last_pid. | |
165 | */ | |
166 | } while (offset < BITS_PER_PAGE && pid < pid_max && | |
167 | (i != max_scan || pid < last || | |
168 | !((last+1) & BITS_PER_PAGE_MASK))); | |
169 | } | |
61a58c6c | 170 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
1da177e4 LT |
171 | ++map; |
172 | offset = 0; | |
173 | } else { | |
61a58c6c | 174 | map = &pid_ns->pidmap[0]; |
1da177e4 LT |
175 | offset = RESERVED_PIDS; |
176 | if (unlikely(last == offset)) | |
177 | break; | |
178 | } | |
61a58c6c | 179 | pid = mk_pid(pid_ns, map, offset); |
1da177e4 LT |
180 | } |
181 | return -1; | |
182 | } | |
183 | ||
61a58c6c | 184 | static int next_pidmap(struct pid_namespace *pid_ns, int last) |
0804ef4b EB |
185 | { |
186 | int offset; | |
f40f50d3 | 187 | struct pidmap *map, *end; |
0804ef4b EB |
188 | |
189 | offset = (last + 1) & BITS_PER_PAGE_MASK; | |
61a58c6c SB |
190 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
191 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; | |
f40f50d3 | 192 | for (; map < end; map++, offset = 0) { |
0804ef4b EB |
193 | if (unlikely(!map->page)) |
194 | continue; | |
195 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); | |
196 | if (offset < BITS_PER_PAGE) | |
61a58c6c | 197 | return mk_pid(pid_ns, map, offset); |
0804ef4b EB |
198 | } |
199 | return -1; | |
200 | } | |
201 | ||
92476d7f EB |
202 | fastcall void put_pid(struct pid *pid) |
203 | { | |
baf8f0f8 PE |
204 | struct pid_namespace *ns; |
205 | ||
92476d7f EB |
206 | if (!pid) |
207 | return; | |
baf8f0f8 | 208 | |
8ef047aa | 209 | ns = pid->numbers[pid->level].ns; |
92476d7f | 210 | if ((atomic_read(&pid->count) == 1) || |
8ef047aa | 211 | atomic_dec_and_test(&pid->count)) { |
baf8f0f8 | 212 | kmem_cache_free(ns->pid_cachep, pid); |
b461cc03 | 213 | put_pid_ns(ns); |
8ef047aa | 214 | } |
92476d7f | 215 | } |
bbf73147 | 216 | EXPORT_SYMBOL_GPL(put_pid); |
92476d7f EB |
217 | |
218 | static void delayed_put_pid(struct rcu_head *rhp) | |
219 | { | |
220 | struct pid *pid = container_of(rhp, struct pid, rcu); | |
221 | put_pid(pid); | |
222 | } | |
223 | ||
224 | fastcall void free_pid(struct pid *pid) | |
225 | { | |
226 | /* We can be called with write_lock_irq(&tasklist_lock) held */ | |
8ef047aa | 227 | int i; |
92476d7f EB |
228 | unsigned long flags; |
229 | ||
230 | spin_lock_irqsave(&pidmap_lock, flags); | |
198fe21b PE |
231 | for (i = 0; i <= pid->level; i++) |
232 | hlist_del_rcu(&pid->numbers[i].pid_chain); | |
92476d7f EB |
233 | spin_unlock_irqrestore(&pidmap_lock, flags); |
234 | ||
8ef047aa PE |
235 | for (i = 0; i <= pid->level; i++) |
236 | free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); | |
237 | ||
92476d7f EB |
238 | call_rcu(&pid->rcu, delayed_put_pid); |
239 | } | |
240 | ||
8ef047aa | 241 | struct pid *alloc_pid(struct pid_namespace *ns) |
92476d7f EB |
242 | { |
243 | struct pid *pid; | |
244 | enum pid_type type; | |
8ef047aa PE |
245 | int i, nr; |
246 | struct pid_namespace *tmp; | |
198fe21b | 247 | struct upid *upid; |
92476d7f | 248 | |
baf8f0f8 | 249 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
92476d7f EB |
250 | if (!pid) |
251 | goto out; | |
252 | ||
8ef047aa PE |
253 | tmp = ns; |
254 | for (i = ns->level; i >= 0; i--) { | |
255 | nr = alloc_pidmap(tmp); | |
256 | if (nr < 0) | |
257 | goto out_free; | |
92476d7f | 258 | |
8ef047aa PE |
259 | pid->numbers[i].nr = nr; |
260 | pid->numbers[i].ns = tmp; | |
261 | tmp = tmp->parent; | |
262 | } | |
263 | ||
b461cc03 | 264 | get_pid_ns(ns); |
8ef047aa PE |
265 | pid->level = ns->level; |
266 | pid->nr = pid->numbers[0].nr; | |
92476d7f | 267 | atomic_set(&pid->count, 1); |
92476d7f EB |
268 | for (type = 0; type < PIDTYPE_MAX; ++type) |
269 | INIT_HLIST_HEAD(&pid->tasks[type]); | |
270 | ||
271 | spin_lock_irq(&pidmap_lock); | |
198fe21b PE |
272 | for (i = ns->level; i >= 0; i--) { |
273 | upid = &pid->numbers[i]; | |
274 | hlist_add_head_rcu(&upid->pid_chain, | |
275 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); | |
276 | } | |
92476d7f EB |
277 | spin_unlock_irq(&pidmap_lock); |
278 | ||
279 | out: | |
280 | return pid; | |
281 | ||
282 | out_free: | |
8ef047aa PE |
283 | for (i++; i <= ns->level; i++) |
284 | free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); | |
285 | ||
baf8f0f8 | 286 | kmem_cache_free(ns->pid_cachep, pid); |
92476d7f EB |
287 | pid = NULL; |
288 | goto out; | |
289 | } | |
290 | ||
198fe21b | 291 | struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns) |
1da177e4 LT |
292 | { |
293 | struct hlist_node *elem; | |
198fe21b PE |
294 | struct upid *pnr; |
295 | ||
296 | hlist_for_each_entry_rcu(pnr, elem, | |
297 | &pid_hash[pid_hashfn(nr, ns)], pid_chain) | |
298 | if (pnr->nr == nr && pnr->ns == ns) | |
299 | return container_of(pnr, struct pid, | |
300 | numbers[ns->level]); | |
1da177e4 | 301 | |
1da177e4 LT |
302 | return NULL; |
303 | } | |
198fe21b | 304 | EXPORT_SYMBOL_GPL(find_pid_ns); |
1da177e4 | 305 | |
e713d0da SB |
306 | /* |
307 | * attach_pid() must be called with the tasklist_lock write-held. | |
308 | */ | |
309 | int fastcall attach_pid(struct task_struct *task, enum pid_type type, | |
310 | struct pid *pid) | |
1da177e4 | 311 | { |
92476d7f | 312 | struct pid_link *link; |
92476d7f | 313 | |
92476d7f | 314 | link = &task->pids[type]; |
e713d0da | 315 | link->pid = pid; |
92476d7f | 316 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); |
1da177e4 LT |
317 | |
318 | return 0; | |
319 | } | |
320 | ||
36c8b586 | 321 | void fastcall detach_pid(struct task_struct *task, enum pid_type type) |
1da177e4 | 322 | { |
92476d7f EB |
323 | struct pid_link *link; |
324 | struct pid *pid; | |
325 | int tmp; | |
1da177e4 | 326 | |
92476d7f EB |
327 | link = &task->pids[type]; |
328 | pid = link->pid; | |
1da177e4 | 329 | |
92476d7f EB |
330 | hlist_del_rcu(&link->node); |
331 | link->pid = NULL; | |
1da177e4 | 332 | |
92476d7f EB |
333 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
334 | if (!hlist_empty(&pid->tasks[tmp])) | |
335 | return; | |
1da177e4 | 336 | |
92476d7f | 337 | free_pid(pid); |
1da177e4 LT |
338 | } |
339 | ||
c18258c6 EB |
340 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
341 | void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, | |
342 | enum pid_type type) | |
343 | { | |
344 | new->pids[type].pid = old->pids[type].pid; | |
345 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); | |
346 | old->pids[type].pid = NULL; | |
347 | } | |
348 | ||
92476d7f | 349 | struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) |
1da177e4 | 350 | { |
92476d7f EB |
351 | struct task_struct *result = NULL; |
352 | if (pid) { | |
353 | struct hlist_node *first; | |
354 | first = rcu_dereference(pid->tasks[type].first); | |
355 | if (first) | |
356 | result = hlist_entry(first, struct task_struct, pids[(type)].node); | |
357 | } | |
358 | return result; | |
359 | } | |
1da177e4 | 360 | |
92476d7f EB |
361 | /* |
362 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | |
363 | */ | |
198fe21b PE |
364 | struct task_struct *find_task_by_pid_type_ns(int type, int nr, |
365 | struct pid_namespace *ns) | |
92476d7f | 366 | { |
198fe21b | 367 | return pid_task(find_pid_ns(nr, ns), type); |
92476d7f | 368 | } |
1da177e4 | 369 | |
198fe21b | 370 | EXPORT_SYMBOL(find_task_by_pid_type_ns); |
1da177e4 | 371 | |
228ebcbe PE |
372 | struct task_struct *find_task_by_pid(pid_t nr) |
373 | { | |
374 | return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); | |
375 | } | |
376 | EXPORT_SYMBOL(find_task_by_pid); | |
377 | ||
378 | struct task_struct *find_task_by_vpid(pid_t vnr) | |
379 | { | |
380 | return find_task_by_pid_type_ns(PIDTYPE_PID, vnr, | |
381 | current->nsproxy->pid_ns); | |
382 | } | |
383 | EXPORT_SYMBOL(find_task_by_vpid); | |
384 | ||
385 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) | |
386 | { | |
387 | return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns); | |
388 | } | |
389 | EXPORT_SYMBOL(find_task_by_pid_ns); | |
390 | ||
1a657f78 ON |
391 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
392 | { | |
393 | struct pid *pid; | |
394 | rcu_read_lock(); | |
395 | pid = get_pid(task->pids[type].pid); | |
396 | rcu_read_unlock(); | |
397 | return pid; | |
398 | } | |
399 | ||
92476d7f EB |
400 | struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) |
401 | { | |
402 | struct task_struct *result; | |
403 | rcu_read_lock(); | |
404 | result = pid_task(pid, type); | |
405 | if (result) | |
406 | get_task_struct(result); | |
407 | rcu_read_unlock(); | |
408 | return result; | |
1da177e4 LT |
409 | } |
410 | ||
92476d7f | 411 | struct pid *find_get_pid(pid_t nr) |
1da177e4 LT |
412 | { |
413 | struct pid *pid; | |
414 | ||
92476d7f | 415 | rcu_read_lock(); |
198fe21b | 416 | pid = get_pid(find_vpid(nr)); |
92476d7f | 417 | rcu_read_unlock(); |
1da177e4 | 418 | |
92476d7f | 419 | return pid; |
1da177e4 LT |
420 | } |
421 | ||
7af57294 PE |
422 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
423 | { | |
424 | struct upid *upid; | |
425 | pid_t nr = 0; | |
426 | ||
427 | if (pid && ns->level <= pid->level) { | |
428 | upid = &pid->numbers[ns->level]; | |
429 | if (upid->ns == ns) | |
430 | nr = upid->nr; | |
431 | } | |
432 | return nr; | |
433 | } | |
434 | ||
0804ef4b EB |
435 | /* |
436 | * Used by proc to find the first pid that is greater then or equal to nr. | |
437 | * | |
438 | * If there is a pid at nr this function is exactly the same as find_pid. | |
439 | */ | |
198fe21b | 440 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
0804ef4b EB |
441 | { |
442 | struct pid *pid; | |
443 | ||
444 | do { | |
198fe21b | 445 | pid = find_pid_ns(nr, ns); |
0804ef4b EB |
446 | if (pid) |
447 | break; | |
198fe21b | 448 | nr = next_pidmap(ns, nr); |
0804ef4b EB |
449 | } while (nr > 0); |
450 | ||
451 | return pid; | |
452 | } | |
bbf73147 | 453 | EXPORT_SYMBOL_GPL(find_get_pid); |
0804ef4b | 454 | |
baf8f0f8 PE |
455 | struct pid_cache { |
456 | int nr_ids; | |
457 | char name[16]; | |
458 | struct kmem_cache *cachep; | |
459 | struct list_head list; | |
460 | }; | |
461 | ||
462 | static LIST_HEAD(pid_caches_lh); | |
463 | static DEFINE_MUTEX(pid_caches_mutex); | |
464 | ||
465 | /* | |
466 | * creates the kmem cache to allocate pids from. | |
467 | * @nr_ids: the number of numerical ids this pid will have to carry | |
468 | */ | |
469 | ||
470 | static struct kmem_cache *create_pid_cachep(int nr_ids) | |
471 | { | |
472 | struct pid_cache *pcache; | |
473 | struct kmem_cache *cachep; | |
474 | ||
475 | mutex_lock(&pid_caches_mutex); | |
476 | list_for_each_entry (pcache, &pid_caches_lh, list) | |
477 | if (pcache->nr_ids == nr_ids) | |
478 | goto out; | |
479 | ||
480 | pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); | |
481 | if (pcache == NULL) | |
482 | goto err_alloc; | |
483 | ||
484 | snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); | |
485 | cachep = kmem_cache_create(pcache->name, | |
30e49c26 PE |
486 | sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), |
487 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
baf8f0f8 PE |
488 | if (cachep == NULL) |
489 | goto err_cachep; | |
490 | ||
491 | pcache->nr_ids = nr_ids; | |
492 | pcache->cachep = cachep; | |
493 | list_add(&pcache->list, &pid_caches_lh); | |
494 | out: | |
495 | mutex_unlock(&pid_caches_mutex); | |
496 | return pcache->cachep; | |
497 | ||
498 | err_cachep: | |
499 | kfree(pcache); | |
500 | err_alloc: | |
501 | mutex_unlock(&pid_caches_mutex); | |
502 | return NULL; | |
503 | } | |
504 | ||
30e49c26 PE |
505 | static struct pid_namespace *create_pid_namespace(int level) |
506 | { | |
507 | struct pid_namespace *ns; | |
508 | int i; | |
509 | ||
c9c5d922 | 510 | ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL); |
30e49c26 PE |
511 | if (ns == NULL) |
512 | goto out; | |
513 | ||
514 | ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); | |
515 | if (!ns->pidmap[0].page) | |
516 | goto out_free; | |
517 | ||
518 | ns->pid_cachep = create_pid_cachep(level + 1); | |
519 | if (ns->pid_cachep == NULL) | |
520 | goto out_free_map; | |
521 | ||
522 | kref_init(&ns->kref); | |
523 | ns->last_pid = 0; | |
524 | ns->child_reaper = NULL; | |
525 | ns->level = level; | |
526 | ||
527 | set_bit(0, ns->pidmap[0].page); | |
528 | atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); | |
529 | ||
530 | for (i = 1; i < PIDMAP_ENTRIES; i++) { | |
531 | ns->pidmap[i].page = 0; | |
532 | atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); | |
533 | } | |
534 | ||
535 | return ns; | |
536 | ||
537 | out_free_map: | |
538 | kfree(ns->pidmap[0].page); | |
539 | out_free: | |
c9c5d922 | 540 | kmem_cache_free(pid_ns_cachep, ns); |
30e49c26 PE |
541 | out: |
542 | return ERR_PTR(-ENOMEM); | |
543 | } | |
544 | ||
545 | static void destroy_pid_namespace(struct pid_namespace *ns) | |
546 | { | |
547 | int i; | |
548 | ||
549 | for (i = 0; i < PIDMAP_ENTRIES; i++) | |
550 | kfree(ns->pidmap[i].page); | |
c9c5d922 | 551 | kmem_cache_free(pid_ns_cachep, ns); |
30e49c26 PE |
552 | } |
553 | ||
213dd266 | 554 | struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) |
9a575a92 | 555 | { |
30e49c26 PE |
556 | struct pid_namespace *new_ns; |
557 | ||
e3222c4e | 558 | BUG_ON(!old_ns); |
30e49c26 PE |
559 | new_ns = get_pid_ns(old_ns); |
560 | if (!(flags & CLONE_NEWPID)) | |
561 | goto out; | |
562 | ||
563 | new_ns = ERR_PTR(-EINVAL); | |
564 | if (flags & CLONE_THREAD) | |
565 | goto out_put; | |
566 | ||
567 | new_ns = create_pid_namespace(old_ns->level + 1); | |
568 | if (!IS_ERR(new_ns)) | |
569 | new_ns->parent = get_pid_ns(old_ns); | |
570 | ||
571 | out_put: | |
572 | put_pid_ns(old_ns); | |
573 | out: | |
574 | return new_ns; | |
9a575a92 CLG |
575 | } |
576 | ||
577 | void free_pid_ns(struct kref *kref) | |
578 | { | |
30e49c26 | 579 | struct pid_namespace *ns, *parent; |
9a575a92 CLG |
580 | |
581 | ns = container_of(kref, struct pid_namespace, kref); | |
30e49c26 PE |
582 | |
583 | parent = ns->parent; | |
584 | destroy_pid_namespace(ns); | |
585 | ||
586 | if (parent != NULL) | |
587 | put_pid_ns(parent); | |
9a575a92 CLG |
588 | } |
589 | ||
3eb07c8c SB |
590 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
591 | { | |
592 | int nr; | |
593 | int rc; | |
594 | ||
595 | /* | |
596 | * The last thread in the cgroup-init thread group is terminating. | |
597 | * Find remaining pid_ts in the namespace, signal and wait for them | |
598 | * to exit. | |
599 | * | |
600 | * Note: This signals each threads in the namespace - even those that | |
601 | * belong to the same thread group, To avoid this, we would have | |
602 | * to walk the entire tasklist looking a processes in this | |
603 | * namespace, but that could be unnecessarily expensive if the | |
604 | * pid namespace has just a few processes. Or we need to | |
605 | * maintain a tasklist for each pid namespace. | |
606 | * | |
607 | */ | |
608 | read_lock(&tasklist_lock); | |
609 | nr = next_pidmap(pid_ns, 1); | |
610 | while (nr > 0) { | |
611 | kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr); | |
612 | nr = next_pidmap(pid_ns, nr); | |
613 | } | |
614 | read_unlock(&tasklist_lock); | |
615 | ||
616 | do { | |
617 | clear_thread_flag(TIF_SIGPENDING); | |
618 | rc = sys_wait4(-1, NULL, __WALL, NULL); | |
619 | } while (rc != -ECHILD); | |
620 | ||
621 | ||
622 | /* Child reaper for the pid namespace is going away */ | |
623 | pid_ns->child_reaper = NULL; | |
624 | return; | |
625 | } | |
626 | ||
1da177e4 LT |
627 | /* |
628 | * The pid hash table is scaled according to the amount of memory in the | |
629 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or | |
630 | * more. | |
631 | */ | |
632 | void __init pidhash_init(void) | |
633 | { | |
92476d7f | 634 | int i, pidhash_size; |
1da177e4 LT |
635 | unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); |
636 | ||
637 | pidhash_shift = max(4, fls(megabytes * 4)); | |
638 | pidhash_shift = min(12, pidhash_shift); | |
639 | pidhash_size = 1 << pidhash_shift; | |
640 | ||
641 | printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", | |
642 | pidhash_size, pidhash_shift, | |
92476d7f EB |
643 | pidhash_size * sizeof(struct hlist_head)); |
644 | ||
645 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | |
646 | if (!pid_hash) | |
647 | panic("Could not alloc pidhash!\n"); | |
648 | for (i = 0; i < pidhash_size; i++) | |
649 | INIT_HLIST_HEAD(&pid_hash[i]); | |
1da177e4 LT |
650 | } |
651 | ||
652 | void __init pidmap_init(void) | |
653 | { | |
61a58c6c | 654 | init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
73b9ebfe | 655 | /* Reserve PID 0. We never call free_pidmap(0) */ |
61a58c6c SB |
656 | set_bit(0, init_pid_ns.pidmap[0].page); |
657 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); | |
92476d7f | 658 | |
baf8f0f8 PE |
659 | init_pid_ns.pid_cachep = create_pid_cachep(1); |
660 | if (init_pid_ns.pid_cachep == NULL) | |
661 | panic("Can't create pid_1 cachep\n"); | |
c9c5d922 SB |
662 | |
663 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); | |
1da177e4 | 664 | } |