Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic pidhash and scalable, time-bounded PID allocator | |
3 | * | |
4 | * (C) 2002-2003 William Irwin, IBM | |
5 | * (C) 2004 William Irwin, Oracle | |
6 | * (C) 2002-2004 Ingo Molnar, Red Hat | |
7 | * | |
8 | * pid-structures are backing objects for tasks sharing a given ID to chain | |
9 | * against. There is very little to them aside from hashing them and | |
10 | * parking tasks using given ID's on a list. | |
11 | * | |
12 | * The hash is always changed with the tasklist_lock write-acquired, | |
13 | * and the hash is only accessed with the tasklist_lock at least | |
14 | * read-acquired, so there's no additional SMP locking needed here. | |
15 | * | |
16 | * We have a list of bitmap pages, which bitmaps represent the PID space. | |
17 | * Allocating and freeing PIDs is completely lockless. The worst-case | |
18 | * allocation scenario when all but one out of 1 million PIDs possible are | |
19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE | |
20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). | |
30e49c26 PE |
21 | * |
22 | * Pid namespaces: | |
23 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. | |
24 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM | |
25 | * Many thanks to Oleg Nesterov for comments and help | |
26 | * | |
1da177e4 LT |
27 | */ |
28 | ||
29 | #include <linux/mm.h> | |
9984de1a | 30 | #include <linux/export.h> |
1da177e4 LT |
31 | #include <linux/slab.h> |
32 | #include <linux/init.h> | |
82524746 | 33 | #include <linux/rculist.h> |
1da177e4 LT |
34 | #include <linux/bootmem.h> |
35 | #include <linux/hash.h> | |
61a58c6c | 36 | #include <linux/pid_namespace.h> |
820e45db | 37 | #include <linux/init_task.h> |
3eb07c8c | 38 | #include <linux/syscalls.h> |
0a01f2cc | 39 | #include <linux/proc_fs.h> |
1da177e4 | 40 | |
8ef047aa PE |
41 | #define pid_hashfn(nr, ns) \ |
42 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | |
92476d7f | 43 | static struct hlist_head *pid_hash; |
2c85f51d | 44 | static unsigned int pidhash_shift = 4; |
820e45db | 45 | struct pid init_struct_pid = INIT_STRUCT_PID; |
1da177e4 LT |
46 | |
47 | int pid_max = PID_MAX_DEFAULT; | |
1da177e4 LT |
48 | |
49 | #define RESERVED_PIDS 300 | |
50 | ||
51 | int pid_max_min = RESERVED_PIDS + 1; | |
52 | int pid_max_max = PID_MAX_LIMIT; | |
53 | ||
1da177e4 LT |
54 | #define BITS_PER_PAGE (PAGE_SIZE*8) |
55 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | |
3fbc9648 | 56 | |
61a58c6c SB |
57 | static inline int mk_pid(struct pid_namespace *pid_ns, |
58 | struct pidmap *map, int off) | |
3fbc9648 | 59 | { |
61a58c6c | 60 | return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; |
3fbc9648 SB |
61 | } |
62 | ||
1da177e4 LT |
63 | #define find_next_offset(map, off) \ |
64 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) | |
65 | ||
66 | /* | |
67 | * PID-map pages start out as NULL, they get allocated upon | |
68 | * first use and are never deallocated. This way a low pid_max | |
69 | * value does not cause lots of bitmaps to be allocated, but | |
70 | * the scheme scales to up to 4 million PIDs, runtime. | |
71 | */ | |
61a58c6c | 72 | struct pid_namespace init_pid_ns = { |
9a575a92 CLG |
73 | .kref = { |
74 | .refcount = ATOMIC_INIT(2), | |
75 | }, | |
3fbc9648 SB |
76 | .pidmap = { |
77 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } | |
78 | }, | |
84d73786 | 79 | .last_pid = 0, |
faacbfd3 PE |
80 | .level = 0, |
81 | .child_reaper = &init_task, | |
49f4d8b9 | 82 | .user_ns = &init_user_ns, |
3fbc9648 | 83 | }; |
198fe21b | 84 | EXPORT_SYMBOL_GPL(init_pid_ns); |
1da177e4 | 85 | |
b461cc03 | 86 | int is_container_init(struct task_struct *tsk) |
b460cbc5 | 87 | { |
b461cc03 PE |
88 | int ret = 0; |
89 | struct pid *pid; | |
90 | ||
91 | rcu_read_lock(); | |
92 | pid = task_pid(tsk); | |
93 | if (pid != NULL && pid->numbers[pid->level].nr == 1) | |
94 | ret = 1; | |
95 | rcu_read_unlock(); | |
96 | ||
97 | return ret; | |
b460cbc5 | 98 | } |
b461cc03 | 99 | EXPORT_SYMBOL(is_container_init); |
b460cbc5 | 100 | |
92476d7f EB |
101 | /* |
102 | * Note: disable interrupts while the pidmap_lock is held as an | |
103 | * interrupt might come in and do read_lock(&tasklist_lock). | |
104 | * | |
105 | * If we don't disable interrupts there is a nasty deadlock between | |
106 | * detach_pid()->free_pid() and another cpu that does | |
107 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does | |
108 | * read_lock(&tasklist_lock); | |
109 | * | |
110 | * After we clean up the tasklist_lock and know there are no | |
111 | * irq handlers that take it we can leave the interrupts enabled. | |
112 | * For now it is easier to be safe than to prove it can't happen. | |
113 | */ | |
3fbc9648 | 114 | |
1da177e4 LT |
115 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
116 | ||
b7127aa4 | 117 | static void free_pidmap(struct upid *upid) |
1da177e4 | 118 | { |
b7127aa4 ON |
119 | int nr = upid->nr; |
120 | struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; | |
121 | int offset = nr & BITS_PER_PAGE_MASK; | |
1da177e4 LT |
122 | |
123 | clear_bit(offset, map->page); | |
124 | atomic_inc(&map->nr_free); | |
125 | } | |
126 | ||
5fdee8c4 S |
127 | /* |
128 | * If we started walking pids at 'base', is 'a' seen before 'b'? | |
129 | */ | |
130 | static int pid_before(int base, int a, int b) | |
131 | { | |
132 | /* | |
133 | * This is the same as saying | |
134 | * | |
135 | * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT | |
136 | * and that mapping orders 'a' and 'b' with respect to 'base'. | |
137 | */ | |
138 | return (unsigned)(a - base) < (unsigned)(b - base); | |
139 | } | |
140 | ||
141 | /* | |
b8f566b0 PE |
142 | * We might be racing with someone else trying to set pid_ns->last_pid |
143 | * at the pid allocation time (there's also a sysctl for this, but racing | |
144 | * with this one is OK, see comment in kernel/pid_namespace.c about it). | |
5fdee8c4 S |
145 | * We want the winner to have the "later" value, because if the |
146 | * "earlier" value prevails, then a pid may get reused immediately. | |
147 | * | |
148 | * Since pids rollover, it is not sufficient to just pick the bigger | |
149 | * value. We have to consider where we started counting from. | |
150 | * | |
151 | * 'base' is the value of pid_ns->last_pid that we observed when | |
152 | * we started looking for a pid. | |
153 | * | |
154 | * 'pid' is the pid that we eventually found. | |
155 | */ | |
156 | static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) | |
157 | { | |
158 | int prev; | |
159 | int last_write = base; | |
160 | do { | |
161 | prev = last_write; | |
162 | last_write = cmpxchg(&pid_ns->last_pid, prev, pid); | |
163 | } while ((prev != last_write) && (pid_before(base, last_write, pid))); | |
164 | } | |
165 | ||
61a58c6c | 166 | static int alloc_pidmap(struct pid_namespace *pid_ns) |
1da177e4 | 167 | { |
61a58c6c | 168 | int i, offset, max_scan, pid, last = pid_ns->last_pid; |
6a1f3b84 | 169 | struct pidmap *map; |
1da177e4 LT |
170 | |
171 | pid = last + 1; | |
172 | if (pid >= pid_max) | |
173 | pid = RESERVED_PIDS; | |
174 | offset = pid & BITS_PER_PAGE_MASK; | |
61a58c6c | 175 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; |
c52b0b91 ON |
176 | /* |
177 | * If last_pid points into the middle of the map->page we | |
178 | * want to scan this bitmap block twice, the second time | |
179 | * we start with offset == 0 (or RESERVED_PIDS). | |
180 | */ | |
181 | max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; | |
1da177e4 LT |
182 | for (i = 0; i <= max_scan; ++i) { |
183 | if (unlikely(!map->page)) { | |
3fbc9648 | 184 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1da177e4 LT |
185 | /* |
186 | * Free the page if someone raced with us | |
187 | * installing it: | |
188 | */ | |
92476d7f | 189 | spin_lock_irq(&pidmap_lock); |
7be6d991 | 190 | if (!map->page) { |
3fbc9648 | 191 | map->page = page; |
7be6d991 AGR |
192 | page = NULL; |
193 | } | |
92476d7f | 194 | spin_unlock_irq(&pidmap_lock); |
7be6d991 | 195 | kfree(page); |
1da177e4 LT |
196 | if (unlikely(!map->page)) |
197 | break; | |
198 | } | |
199 | if (likely(atomic_read(&map->nr_free))) { | |
200 | do { | |
201 | if (!test_and_set_bit(offset, map->page)) { | |
202 | atomic_dec(&map->nr_free); | |
5fdee8c4 | 203 | set_last_pid(pid_ns, last, pid); |
1da177e4 LT |
204 | return pid; |
205 | } | |
206 | offset = find_next_offset(map, offset); | |
61a58c6c | 207 | pid = mk_pid(pid_ns, map, offset); |
c52b0b91 | 208 | } while (offset < BITS_PER_PAGE && pid < pid_max); |
1da177e4 | 209 | } |
61a58c6c | 210 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
1da177e4 LT |
211 | ++map; |
212 | offset = 0; | |
213 | } else { | |
61a58c6c | 214 | map = &pid_ns->pidmap[0]; |
1da177e4 LT |
215 | offset = RESERVED_PIDS; |
216 | if (unlikely(last == offset)) | |
217 | break; | |
218 | } | |
61a58c6c | 219 | pid = mk_pid(pid_ns, map, offset); |
1da177e4 LT |
220 | } |
221 | return -1; | |
222 | } | |
223 | ||
c78193e9 | 224 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) |
0804ef4b EB |
225 | { |
226 | int offset; | |
f40f50d3 | 227 | struct pidmap *map, *end; |
0804ef4b | 228 | |
c78193e9 LT |
229 | if (last >= PID_MAX_LIMIT) |
230 | return -1; | |
231 | ||
0804ef4b | 232 | offset = (last + 1) & BITS_PER_PAGE_MASK; |
61a58c6c SB |
233 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
234 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; | |
f40f50d3 | 235 | for (; map < end; map++, offset = 0) { |
0804ef4b EB |
236 | if (unlikely(!map->page)) |
237 | continue; | |
238 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); | |
239 | if (offset < BITS_PER_PAGE) | |
61a58c6c | 240 | return mk_pid(pid_ns, map, offset); |
0804ef4b EB |
241 | } |
242 | return -1; | |
243 | } | |
244 | ||
7ad5b3a5 | 245 | void put_pid(struct pid *pid) |
92476d7f | 246 | { |
baf8f0f8 PE |
247 | struct pid_namespace *ns; |
248 | ||
92476d7f EB |
249 | if (!pid) |
250 | return; | |
baf8f0f8 | 251 | |
8ef047aa | 252 | ns = pid->numbers[pid->level].ns; |
92476d7f | 253 | if ((atomic_read(&pid->count) == 1) || |
8ef047aa | 254 | atomic_dec_and_test(&pid->count)) { |
baf8f0f8 | 255 | kmem_cache_free(ns->pid_cachep, pid); |
b461cc03 | 256 | put_pid_ns(ns); |
8ef047aa | 257 | } |
92476d7f | 258 | } |
bbf73147 | 259 | EXPORT_SYMBOL_GPL(put_pid); |
92476d7f EB |
260 | |
261 | static void delayed_put_pid(struct rcu_head *rhp) | |
262 | { | |
263 | struct pid *pid = container_of(rhp, struct pid, rcu); | |
264 | put_pid(pid); | |
265 | } | |
266 | ||
7ad5b3a5 | 267 | void free_pid(struct pid *pid) |
92476d7f EB |
268 | { |
269 | /* We can be called with write_lock_irq(&tasklist_lock) held */ | |
8ef047aa | 270 | int i; |
92476d7f EB |
271 | unsigned long flags; |
272 | ||
273 | spin_lock_irqsave(&pidmap_lock, flags); | |
0a01f2cc EB |
274 | for (i = 0; i <= pid->level; i++) { |
275 | struct upid *upid = pid->numbers + i; | |
276 | hlist_del_rcu(&upid->pid_chain); | |
277 | if (--upid->ns->nr_hashed == 0) | |
278 | schedule_work(&upid->ns->proc_work); | |
279 | } | |
92476d7f EB |
280 | spin_unlock_irqrestore(&pidmap_lock, flags); |
281 | ||
8ef047aa | 282 | for (i = 0; i <= pid->level; i++) |
b7127aa4 | 283 | free_pidmap(pid->numbers + i); |
8ef047aa | 284 | |
92476d7f EB |
285 | call_rcu(&pid->rcu, delayed_put_pid); |
286 | } | |
287 | ||
8ef047aa | 288 | struct pid *alloc_pid(struct pid_namespace *ns) |
92476d7f EB |
289 | { |
290 | struct pid *pid; | |
291 | enum pid_type type; | |
8ef047aa PE |
292 | int i, nr; |
293 | struct pid_namespace *tmp; | |
198fe21b | 294 | struct upid *upid; |
92476d7f | 295 | |
baf8f0f8 | 296 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
92476d7f EB |
297 | if (!pid) |
298 | goto out; | |
299 | ||
8ef047aa | 300 | tmp = ns; |
0a01f2cc | 301 | pid->level = ns->level; |
8ef047aa PE |
302 | for (i = ns->level; i >= 0; i--) { |
303 | nr = alloc_pidmap(tmp); | |
304 | if (nr < 0) | |
305 | goto out_free; | |
92476d7f | 306 | |
8ef047aa PE |
307 | pid->numbers[i].nr = nr; |
308 | pid->numbers[i].ns = tmp; | |
309 | tmp = tmp->parent; | |
310 | } | |
311 | ||
0a01f2cc EB |
312 | if (unlikely(is_child_reaper(pid))) { |
313 | if (pid_ns_prepare_proc(ns)) | |
314 | goto out_free; | |
315 | } | |
316 | ||
b461cc03 | 317 | get_pid_ns(ns); |
92476d7f | 318 | atomic_set(&pid->count, 1); |
92476d7f EB |
319 | for (type = 0; type < PIDTYPE_MAX; ++type) |
320 | INIT_HLIST_HEAD(&pid->tasks[type]); | |
321 | ||
417e3152 | 322 | upid = pid->numbers + ns->level; |
92476d7f | 323 | spin_lock_irq(&pidmap_lock); |
0a01f2cc | 324 | for ( ; upid >= pid->numbers; --upid) { |
198fe21b PE |
325 | hlist_add_head_rcu(&upid->pid_chain, |
326 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); | |
0a01f2cc EB |
327 | upid->ns->nr_hashed++; |
328 | } | |
92476d7f EB |
329 | spin_unlock_irq(&pidmap_lock); |
330 | ||
331 | out: | |
332 | return pid; | |
333 | ||
334 | out_free: | |
b7127aa4 ON |
335 | while (++i <= ns->level) |
336 | free_pidmap(pid->numbers + i); | |
8ef047aa | 337 | |
baf8f0f8 | 338 | kmem_cache_free(ns->pid_cachep, pid); |
92476d7f EB |
339 | pid = NULL; |
340 | goto out; | |
341 | } | |
342 | ||
7ad5b3a5 | 343 | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) |
1da177e4 LT |
344 | { |
345 | struct hlist_node *elem; | |
198fe21b PE |
346 | struct upid *pnr; |
347 | ||
348 | hlist_for_each_entry_rcu(pnr, elem, | |
349 | &pid_hash[pid_hashfn(nr, ns)], pid_chain) | |
350 | if (pnr->nr == nr && pnr->ns == ns) | |
351 | return container_of(pnr, struct pid, | |
352 | numbers[ns->level]); | |
1da177e4 | 353 | |
1da177e4 LT |
354 | return NULL; |
355 | } | |
198fe21b | 356 | EXPORT_SYMBOL_GPL(find_pid_ns); |
1da177e4 | 357 | |
8990571e PE |
358 | struct pid *find_vpid(int nr) |
359 | { | |
17cf22c3 | 360 | return find_pid_ns(nr, task_active_pid_ns(current)); |
8990571e PE |
361 | } |
362 | EXPORT_SYMBOL_GPL(find_vpid); | |
363 | ||
e713d0da SB |
364 | /* |
365 | * attach_pid() must be called with the tasklist_lock write-held. | |
366 | */ | |
24336eae | 367 | void attach_pid(struct task_struct *task, enum pid_type type, |
e713d0da | 368 | struct pid *pid) |
1da177e4 | 369 | { |
92476d7f | 370 | struct pid_link *link; |
92476d7f | 371 | |
92476d7f | 372 | link = &task->pids[type]; |
e713d0da | 373 | link->pid = pid; |
92476d7f | 374 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); |
1da177e4 LT |
375 | } |
376 | ||
24336eae ON |
377 | static void __change_pid(struct task_struct *task, enum pid_type type, |
378 | struct pid *new) | |
1da177e4 | 379 | { |
92476d7f EB |
380 | struct pid_link *link; |
381 | struct pid *pid; | |
382 | int tmp; | |
1da177e4 | 383 | |
92476d7f EB |
384 | link = &task->pids[type]; |
385 | pid = link->pid; | |
1da177e4 | 386 | |
92476d7f | 387 | hlist_del_rcu(&link->node); |
24336eae | 388 | link->pid = new; |
1da177e4 | 389 | |
92476d7f EB |
390 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
391 | if (!hlist_empty(&pid->tasks[tmp])) | |
392 | return; | |
1da177e4 | 393 | |
92476d7f | 394 | free_pid(pid); |
1da177e4 LT |
395 | } |
396 | ||
24336eae ON |
397 | void detach_pid(struct task_struct *task, enum pid_type type) |
398 | { | |
399 | __change_pid(task, type, NULL); | |
400 | } | |
401 | ||
402 | void change_pid(struct task_struct *task, enum pid_type type, | |
403 | struct pid *pid) | |
404 | { | |
405 | __change_pid(task, type, pid); | |
406 | attach_pid(task, type, pid); | |
407 | } | |
408 | ||
c18258c6 | 409 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
7ad5b3a5 | 410 | void transfer_pid(struct task_struct *old, struct task_struct *new, |
c18258c6 EB |
411 | enum pid_type type) |
412 | { | |
413 | new->pids[type].pid = old->pids[type].pid; | |
414 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); | |
c18258c6 EB |
415 | } |
416 | ||
7ad5b3a5 | 417 | struct task_struct *pid_task(struct pid *pid, enum pid_type type) |
1da177e4 | 418 | { |
92476d7f EB |
419 | struct task_struct *result = NULL; |
420 | if (pid) { | |
421 | struct hlist_node *first; | |
67bdbffd | 422 | first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), |
db1466b3 | 423 | lockdep_tasklist_lock_is_held()); |
92476d7f EB |
424 | if (first) |
425 | result = hlist_entry(first, struct task_struct, pids[(type)].node); | |
426 | } | |
427 | return result; | |
428 | } | |
eccba068 | 429 | EXPORT_SYMBOL(pid_task); |
1da177e4 | 430 | |
92476d7f | 431 | /* |
9728e5d6 | 432 | * Must be called under rcu_read_lock(). |
92476d7f | 433 | */ |
17f98dcf | 434 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
92476d7f | 435 | { |
b3fbab05 PM |
436 | rcu_lockdep_assert(rcu_read_lock_held(), |
437 | "find_task_by_pid_ns() needs rcu_read_lock()" | |
438 | " protection"); | |
17f98dcf | 439 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); |
92476d7f | 440 | } |
1da177e4 | 441 | |
228ebcbe PE |
442 | struct task_struct *find_task_by_vpid(pid_t vnr) |
443 | { | |
17cf22c3 | 444 | return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); |
228ebcbe | 445 | } |
228ebcbe | 446 | |
1a657f78 ON |
447 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
448 | { | |
449 | struct pid *pid; | |
450 | rcu_read_lock(); | |
2ae448ef ON |
451 | if (type != PIDTYPE_PID) |
452 | task = task->group_leader; | |
1a657f78 ON |
453 | pid = get_pid(task->pids[type].pid); |
454 | rcu_read_unlock(); | |
455 | return pid; | |
456 | } | |
77c100c8 | 457 | EXPORT_SYMBOL_GPL(get_task_pid); |
1a657f78 | 458 | |
7ad5b3a5 | 459 | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) |
92476d7f EB |
460 | { |
461 | struct task_struct *result; | |
462 | rcu_read_lock(); | |
463 | result = pid_task(pid, type); | |
464 | if (result) | |
465 | get_task_struct(result); | |
466 | rcu_read_unlock(); | |
467 | return result; | |
1da177e4 | 468 | } |
77c100c8 | 469 | EXPORT_SYMBOL_GPL(get_pid_task); |
1da177e4 | 470 | |
92476d7f | 471 | struct pid *find_get_pid(pid_t nr) |
1da177e4 LT |
472 | { |
473 | struct pid *pid; | |
474 | ||
92476d7f | 475 | rcu_read_lock(); |
198fe21b | 476 | pid = get_pid(find_vpid(nr)); |
92476d7f | 477 | rcu_read_unlock(); |
1da177e4 | 478 | |
92476d7f | 479 | return pid; |
1da177e4 | 480 | } |
339caf2a | 481 | EXPORT_SYMBOL_GPL(find_get_pid); |
1da177e4 | 482 | |
7af57294 PE |
483 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
484 | { | |
485 | struct upid *upid; | |
486 | pid_t nr = 0; | |
487 | ||
488 | if (pid && ns->level <= pid->level) { | |
489 | upid = &pid->numbers[ns->level]; | |
490 | if (upid->ns == ns) | |
491 | nr = upid->nr; | |
492 | } | |
493 | return nr; | |
494 | } | |
4f82f457 | 495 | EXPORT_SYMBOL_GPL(pid_nr_ns); |
7af57294 | 496 | |
44c4e1b2 EB |
497 | pid_t pid_vnr(struct pid *pid) |
498 | { | |
17cf22c3 | 499 | return pid_nr_ns(pid, task_active_pid_ns(current)); |
44c4e1b2 EB |
500 | } |
501 | EXPORT_SYMBOL_GPL(pid_vnr); | |
502 | ||
52ee2dfd ON |
503 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
504 | struct pid_namespace *ns) | |
2f2a3a46 | 505 | { |
52ee2dfd ON |
506 | pid_t nr = 0; |
507 | ||
508 | rcu_read_lock(); | |
509 | if (!ns) | |
17cf22c3 | 510 | ns = task_active_pid_ns(current); |
52ee2dfd ON |
511 | if (likely(pid_alive(task))) { |
512 | if (type != PIDTYPE_PID) | |
513 | task = task->group_leader; | |
514 | nr = pid_nr_ns(task->pids[type].pid, ns); | |
515 | } | |
516 | rcu_read_unlock(); | |
517 | ||
518 | return nr; | |
2f2a3a46 | 519 | } |
52ee2dfd | 520 | EXPORT_SYMBOL(__task_pid_nr_ns); |
2f2a3a46 PE |
521 | |
522 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | |
523 | { | |
524 | return pid_nr_ns(task_tgid(tsk), ns); | |
525 | } | |
526 | EXPORT_SYMBOL(task_tgid_nr_ns); | |
527 | ||
61bce0f1 EB |
528 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) |
529 | { | |
530 | return ns_of_pid(task_pid(tsk)); | |
531 | } | |
532 | EXPORT_SYMBOL_GPL(task_active_pid_ns); | |
533 | ||
0804ef4b | 534 | /* |
025dfdaf | 535 | * Used by proc to find the first pid that is greater than or equal to nr. |
0804ef4b | 536 | * |
e49859e7 | 537 | * If there is a pid at nr this function is exactly the same as find_pid_ns. |
0804ef4b | 538 | */ |
198fe21b | 539 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
0804ef4b EB |
540 | { |
541 | struct pid *pid; | |
542 | ||
543 | do { | |
198fe21b | 544 | pid = find_pid_ns(nr, ns); |
0804ef4b EB |
545 | if (pid) |
546 | break; | |
198fe21b | 547 | nr = next_pidmap(ns, nr); |
0804ef4b EB |
548 | } while (nr > 0); |
549 | ||
550 | return pid; | |
551 | } | |
552 | ||
1da177e4 LT |
553 | /* |
554 | * The pid hash table is scaled according to the amount of memory in the | |
555 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or | |
556 | * more. | |
557 | */ | |
558 | void __init pidhash_init(void) | |
559 | { | |
074b8517 | 560 | unsigned int i, pidhash_size; |
1da177e4 | 561 | |
2c85f51d JB |
562 | pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, |
563 | HASH_EARLY | HASH_SMALL, | |
31fe62b9 TB |
564 | &pidhash_shift, NULL, |
565 | 0, 4096); | |
074b8517 | 566 | pidhash_size = 1U << pidhash_shift; |
1da177e4 | 567 | |
92476d7f EB |
568 | for (i = 0; i < pidhash_size; i++) |
569 | INIT_HLIST_HEAD(&pid_hash[i]); | |
1da177e4 LT |
570 | } |
571 | ||
572 | void __init pidmap_init(void) | |
573 | { | |
72680a19 HB |
574 | /* bump default and minimum pid_max based on number of cpus */ |
575 | pid_max = min(pid_max_max, max_t(int, pid_max, | |
576 | PIDS_PER_CPU_DEFAULT * num_possible_cpus())); | |
577 | pid_max_min = max_t(int, pid_max_min, | |
578 | PIDS_PER_CPU_MIN * num_possible_cpus()); | |
579 | pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); | |
580 | ||
61a58c6c | 581 | init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
73b9ebfe | 582 | /* Reserve PID 0. We never call free_pidmap(0) */ |
61a58c6c SB |
583 | set_bit(0, init_pid_ns.pidmap[0].page); |
584 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); | |
0a01f2cc | 585 | init_pid_ns.nr_hashed = 1; |
92476d7f | 586 | |
74bd59bb PE |
587 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, |
588 | SLAB_HWCACHE_ALIGN | SLAB_PANIC); | |
1da177e4 | 589 | } |