NET: sb1250-mac: Add missing MODULE_LICENSE()
[linux-2.6-block.git] / kernel / pid.c
CommitLineData
1da177e4
LT
1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
6d49e352
NYC
4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 Nadia Yvette Chambers, Oracle
1da177e4
LT
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
30e49c26
PE
21 *
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
26 *
1da177e4
LT
27 */
28
29#include <linux/mm.h>
9984de1a 30#include <linux/export.h>
1da177e4
LT
31#include <linux/slab.h>
32#include <linux/init.h>
82524746 33#include <linux/rculist.h>
1da177e4
LT
34#include <linux/bootmem.h>
35#include <linux/hash.h>
61a58c6c 36#include <linux/pid_namespace.h>
820e45db 37#include <linux/init_task.h>
3eb07c8c 38#include <linux/syscalls.h>
0bb80f24 39#include <linux/proc_ns.h>
0a01f2cc 40#include <linux/proc_fs.h>
29930025 41#include <linux/sched/task.h>
1da177e4 42
8ef047aa
PE
43#define pid_hashfn(nr, ns) \
44 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
92476d7f 45static struct hlist_head *pid_hash;
2c85f51d 46static unsigned int pidhash_shift = 4;
820e45db 47struct pid init_struct_pid = INIT_STRUCT_PID;
1da177e4
LT
48
49int pid_max = PID_MAX_DEFAULT;
1da177e4
LT
50
51#define RESERVED_PIDS 300
52
53int pid_max_min = RESERVED_PIDS + 1;
54int pid_max_max = PID_MAX_LIMIT;
55
61a58c6c
SB
56static inline int mk_pid(struct pid_namespace *pid_ns,
57 struct pidmap *map, int off)
3fbc9648 58{
61a58c6c 59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
3fbc9648
SB
60}
61
1da177e4
LT
62#define find_next_offset(map, off) \
63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64
65/*
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
70 */
61a58c6c 71struct pid_namespace init_pid_ns = {
1e24edca 72 .kref = KREF_INIT(2),
3fbc9648
SB
73 .pidmap = {
74 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
75 },
84d73786 76 .last_pid = 0,
8f75af44 77 .nr_hashed = PIDNS_HASH_ADDING,
faacbfd3
PE
78 .level = 0,
79 .child_reaper = &init_task,
49f4d8b9 80 .user_ns = &init_user_ns,
435d5f4b 81 .ns.inum = PROC_PID_INIT_INO,
33c42940
AV
82#ifdef CONFIG_PID_NS
83 .ns.ops = &pidns_operations,
84#endif
3fbc9648 85};
198fe21b 86EXPORT_SYMBOL_GPL(init_pid_ns);
1da177e4 87
92476d7f
EB
88/*
89 * Note: disable interrupts while the pidmap_lock is held as an
90 * interrupt might come in and do read_lock(&tasklist_lock).
91 *
92 * If we don't disable interrupts there is a nasty deadlock between
93 * detach_pid()->free_pid() and another cpu that does
94 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
95 * read_lock(&tasklist_lock);
96 *
97 * After we clean up the tasklist_lock and know there are no
98 * irq handlers that take it we can leave the interrupts enabled.
99 * For now it is easier to be safe than to prove it can't happen.
100 */
3fbc9648 101
1da177e4
LT
102static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
103
b7127aa4 104static void free_pidmap(struct upid *upid)
1da177e4 105{
b7127aa4
ON
106 int nr = upid->nr;
107 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
108 int offset = nr & BITS_PER_PAGE_MASK;
1da177e4
LT
109
110 clear_bit(offset, map->page);
111 atomic_inc(&map->nr_free);
112}
113
5fdee8c4
S
114/*
115 * If we started walking pids at 'base', is 'a' seen before 'b'?
116 */
117static int pid_before(int base, int a, int b)
118{
119 /*
120 * This is the same as saying
121 *
122 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
123 * and that mapping orders 'a' and 'b' with respect to 'base'.
124 */
125 return (unsigned)(a - base) < (unsigned)(b - base);
126}
127
128/*
b8f566b0
PE
129 * We might be racing with someone else trying to set pid_ns->last_pid
130 * at the pid allocation time (there's also a sysctl for this, but racing
131 * with this one is OK, see comment in kernel/pid_namespace.c about it).
5fdee8c4
S
132 * We want the winner to have the "later" value, because if the
133 * "earlier" value prevails, then a pid may get reused immediately.
134 *
135 * Since pids rollover, it is not sufficient to just pick the bigger
136 * value. We have to consider where we started counting from.
137 *
138 * 'base' is the value of pid_ns->last_pid that we observed when
139 * we started looking for a pid.
140 *
141 * 'pid' is the pid that we eventually found.
142 */
143static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
144{
145 int prev;
146 int last_write = base;
147 do {
148 prev = last_write;
149 last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
150 } while ((prev != last_write) && (pid_before(base, last_write, pid)));
151}
152
61a58c6c 153static int alloc_pidmap(struct pid_namespace *pid_ns)
1da177e4 154{
61a58c6c 155 int i, offset, max_scan, pid, last = pid_ns->last_pid;
6a1f3b84 156 struct pidmap *map;
1da177e4
LT
157
158 pid = last + 1;
159 if (pid >= pid_max)
160 pid = RESERVED_PIDS;
161 offset = pid & BITS_PER_PAGE_MASK;
61a58c6c 162 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
c52b0b91
ON
163 /*
164 * If last_pid points into the middle of the map->page we
165 * want to scan this bitmap block twice, the second time
166 * we start with offset == 0 (or RESERVED_PIDS).
167 */
168 max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
1da177e4
LT
169 for (i = 0; i <= max_scan; ++i) {
170 if (unlikely(!map->page)) {
3fbc9648 171 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1da177e4
LT
172 /*
173 * Free the page if someone raced with us
174 * installing it:
175 */
92476d7f 176 spin_lock_irq(&pidmap_lock);
7be6d991 177 if (!map->page) {
3fbc9648 178 map->page = page;
7be6d991
AGR
179 page = NULL;
180 }
92476d7f 181 spin_unlock_irq(&pidmap_lock);
7be6d991 182 kfree(page);
1da177e4 183 if (unlikely(!map->page))
35f71bc0 184 return -ENOMEM;
1da177e4
LT
185 }
186 if (likely(atomic_read(&map->nr_free))) {
8db049b3 187 for ( ; ; ) {
1da177e4
LT
188 if (!test_and_set_bit(offset, map->page)) {
189 atomic_dec(&map->nr_free);
5fdee8c4 190 set_last_pid(pid_ns, last, pid);
1da177e4
LT
191 return pid;
192 }
193 offset = find_next_offset(map, offset);
8db049b3
RC
194 if (offset >= BITS_PER_PAGE)
195 break;
61a58c6c 196 pid = mk_pid(pid_ns, map, offset);
8db049b3
RC
197 if (pid >= pid_max)
198 break;
199 }
1da177e4 200 }
61a58c6c 201 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
1da177e4
LT
202 ++map;
203 offset = 0;
204 } else {
61a58c6c 205 map = &pid_ns->pidmap[0];
1da177e4
LT
206 offset = RESERVED_PIDS;
207 if (unlikely(last == offset))
208 break;
209 }
61a58c6c 210 pid = mk_pid(pid_ns, map, offset);
1da177e4 211 }
35f71bc0 212 return -EAGAIN;
1da177e4
LT
213}
214
c78193e9 215int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
0804ef4b
EB
216{
217 int offset;
f40f50d3 218 struct pidmap *map, *end;
0804ef4b 219
c78193e9
LT
220 if (last >= PID_MAX_LIMIT)
221 return -1;
222
0804ef4b 223 offset = (last + 1) & BITS_PER_PAGE_MASK;
61a58c6c
SB
224 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
225 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
f40f50d3 226 for (; map < end; map++, offset = 0) {
0804ef4b
EB
227 if (unlikely(!map->page))
228 continue;
229 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
230 if (offset < BITS_PER_PAGE)
61a58c6c 231 return mk_pid(pid_ns, map, offset);
0804ef4b
EB
232 }
233 return -1;
234}
235
7ad5b3a5 236void put_pid(struct pid *pid)
92476d7f 237{
baf8f0f8
PE
238 struct pid_namespace *ns;
239
92476d7f
EB
240 if (!pid)
241 return;
baf8f0f8 242
8ef047aa 243 ns = pid->numbers[pid->level].ns;
92476d7f 244 if ((atomic_read(&pid->count) == 1) ||
8ef047aa 245 atomic_dec_and_test(&pid->count)) {
baf8f0f8 246 kmem_cache_free(ns->pid_cachep, pid);
b461cc03 247 put_pid_ns(ns);
8ef047aa 248 }
92476d7f 249}
bbf73147 250EXPORT_SYMBOL_GPL(put_pid);
92476d7f
EB
251
252static void delayed_put_pid(struct rcu_head *rhp)
253{
254 struct pid *pid = container_of(rhp, struct pid, rcu);
255 put_pid(pid);
256}
257
7ad5b3a5 258void free_pid(struct pid *pid)
92476d7f
EB
259{
260 /* We can be called with write_lock_irq(&tasklist_lock) held */
8ef047aa 261 int i;
92476d7f
EB
262 unsigned long flags;
263
264 spin_lock_irqsave(&pidmap_lock, flags);
0a01f2cc
EB
265 for (i = 0; i <= pid->level; i++) {
266 struct upid *upid = pid->numbers + i;
af4b8a83 267 struct pid_namespace *ns = upid->ns;
0a01f2cc 268 hlist_del_rcu(&upid->pid_chain);
af4b8a83 269 switch(--ns->nr_hashed) {
a6064885 270 case 2:
af4b8a83
EB
271 case 1:
272 /* When all that is left in the pid namespace
273 * is the reaper wake up the reaper. The reaper
274 * may be sleeping in zap_pid_ns_processes().
275 */
276 wake_up_process(ns->child_reaper);
277 break;
314a8ad0
ON
278 case PIDNS_HASH_ADDING:
279 /* Handle a fork failure of the first process */
280 WARN_ON(ns->child_reaper);
281 ns->nr_hashed = 0;
282 /* fall through */
af4b8a83 283 case 0:
af4b8a83
EB
284 schedule_work(&ns->proc_work);
285 break;
5e1182de 286 }
0a01f2cc 287 }
92476d7f
EB
288 spin_unlock_irqrestore(&pidmap_lock, flags);
289
8ef047aa 290 for (i = 0; i <= pid->level; i++)
b7127aa4 291 free_pidmap(pid->numbers + i);
8ef047aa 292
92476d7f
EB
293 call_rcu(&pid->rcu, delayed_put_pid);
294}
295
8ef047aa 296struct pid *alloc_pid(struct pid_namespace *ns)
92476d7f
EB
297{
298 struct pid *pid;
299 enum pid_type type;
8ef047aa
PE
300 int i, nr;
301 struct pid_namespace *tmp;
198fe21b 302 struct upid *upid;
35f71bc0 303 int retval = -ENOMEM;
92476d7f 304
baf8f0f8 305 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
92476d7f 306 if (!pid)
35f71bc0 307 return ERR_PTR(retval);
92476d7f 308
8ef047aa 309 tmp = ns;
0a01f2cc 310 pid->level = ns->level;
8ef047aa
PE
311 for (i = ns->level; i >= 0; i--) {
312 nr = alloc_pidmap(tmp);
287980e4 313 if (nr < 0) {
35f71bc0 314 retval = nr;
8ef047aa 315 goto out_free;
35f71bc0 316 }
92476d7f 317
8ef047aa
PE
318 pid->numbers[i].nr = nr;
319 pid->numbers[i].ns = tmp;
320 tmp = tmp->parent;
321 }
322
0a01f2cc
EB
323 if (unlikely(is_child_reaper(pid))) {
324 if (pid_ns_prepare_proc(ns))
325 goto out_free;
326 }
327
b461cc03 328 get_pid_ns(ns);
92476d7f 329 atomic_set(&pid->count, 1);
92476d7f
EB
330 for (type = 0; type < PIDTYPE_MAX; ++type)
331 INIT_HLIST_HEAD(&pid->tasks[type]);
332
417e3152 333 upid = pid->numbers + ns->level;
92476d7f 334 spin_lock_irq(&pidmap_lock);
c876ad76 335 if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
5e1182de 336 goto out_unlock;
0a01f2cc 337 for ( ; upid >= pid->numbers; --upid) {
198fe21b
PE
338 hlist_add_head_rcu(&upid->pid_chain,
339 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
0a01f2cc
EB
340 upid->ns->nr_hashed++;
341 }
92476d7f
EB
342 spin_unlock_irq(&pidmap_lock);
343
92476d7f
EB
344 return pid;
345
5e1182de 346out_unlock:
6e666884 347 spin_unlock_irq(&pidmap_lock);
24c037eb
ON
348 put_pid_ns(ns);
349
92476d7f 350out_free:
b7127aa4
ON
351 while (++i <= ns->level)
352 free_pidmap(pid->numbers + i);
8ef047aa 353
baf8f0f8 354 kmem_cache_free(ns->pid_cachep, pid);
35f71bc0 355 return ERR_PTR(retval);
92476d7f
EB
356}
357
c876ad76
EB
358void disable_pid_allocation(struct pid_namespace *ns)
359{
360 spin_lock_irq(&pidmap_lock);
361 ns->nr_hashed &= ~PIDNS_HASH_ADDING;
362 spin_unlock_irq(&pidmap_lock);
363}
364
7ad5b3a5 365struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
1da177e4 366{
198fe21b
PE
367 struct upid *pnr;
368
b67bfe0d 369 hlist_for_each_entry_rcu(pnr,
198fe21b
PE
370 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
371 if (pnr->nr == nr && pnr->ns == ns)
372 return container_of(pnr, struct pid,
373 numbers[ns->level]);
1da177e4 374
1da177e4
LT
375 return NULL;
376}
198fe21b 377EXPORT_SYMBOL_GPL(find_pid_ns);
1da177e4 378
8990571e
PE
379struct pid *find_vpid(int nr)
380{
17cf22c3 381 return find_pid_ns(nr, task_active_pid_ns(current));
8990571e
PE
382}
383EXPORT_SYMBOL_GPL(find_vpid);
384
e713d0da
SB
385/*
386 * attach_pid() must be called with the tasklist_lock write-held.
387 */
81907739 388void attach_pid(struct task_struct *task, enum pid_type type)
1da177e4 389{
81907739
ON
390 struct pid_link *link = &task->pids[type];
391 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
1da177e4
LT
392}
393
24336eae
ON
394static void __change_pid(struct task_struct *task, enum pid_type type,
395 struct pid *new)
1da177e4 396{
92476d7f
EB
397 struct pid_link *link;
398 struct pid *pid;
399 int tmp;
1da177e4 400
92476d7f
EB
401 link = &task->pids[type];
402 pid = link->pid;
1da177e4 403
92476d7f 404 hlist_del_rcu(&link->node);
24336eae 405 link->pid = new;
1da177e4 406
92476d7f
EB
407 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
408 if (!hlist_empty(&pid->tasks[tmp]))
409 return;
1da177e4 410
92476d7f 411 free_pid(pid);
1da177e4
LT
412}
413
24336eae
ON
414void detach_pid(struct task_struct *task, enum pid_type type)
415{
416 __change_pid(task, type, NULL);
417}
418
419void change_pid(struct task_struct *task, enum pid_type type,
420 struct pid *pid)
421{
422 __change_pid(task, type, pid);
81907739 423 attach_pid(task, type);
24336eae
ON
424}
425
c18258c6 426/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
7ad5b3a5 427void transfer_pid(struct task_struct *old, struct task_struct *new,
c18258c6
EB
428 enum pid_type type)
429{
430 new->pids[type].pid = old->pids[type].pid;
431 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
c18258c6
EB
432}
433
7ad5b3a5 434struct task_struct *pid_task(struct pid *pid, enum pid_type type)
1da177e4 435{
92476d7f
EB
436 struct task_struct *result = NULL;
437 if (pid) {
438 struct hlist_node *first;
67bdbffd 439 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
db1466b3 440 lockdep_tasklist_lock_is_held());
92476d7f
EB
441 if (first)
442 result = hlist_entry(first, struct task_struct, pids[(type)].node);
443 }
444 return result;
445}
eccba068 446EXPORT_SYMBOL(pid_task);
1da177e4 447
92476d7f 448/*
9728e5d6 449 * Must be called under rcu_read_lock().
92476d7f 450 */
17f98dcf 451struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92476d7f 452{
f78f5b90
PM
453 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
454 "find_task_by_pid_ns() needs rcu_read_lock() protection");
17f98dcf 455 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92476d7f 456}
1da177e4 457
228ebcbe
PE
458struct task_struct *find_task_by_vpid(pid_t vnr)
459{
17cf22c3 460 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
228ebcbe 461}
228ebcbe 462
1a657f78
ON
463struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
464{
465 struct pid *pid;
466 rcu_read_lock();
2ae448ef
ON
467 if (type != PIDTYPE_PID)
468 task = task->group_leader;
81b1a832 469 pid = get_pid(rcu_dereference(task->pids[type].pid));
1a657f78
ON
470 rcu_read_unlock();
471 return pid;
472}
77c100c8 473EXPORT_SYMBOL_GPL(get_task_pid);
1a657f78 474
7ad5b3a5 475struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
92476d7f
EB
476{
477 struct task_struct *result;
478 rcu_read_lock();
479 result = pid_task(pid, type);
480 if (result)
481 get_task_struct(result);
482 rcu_read_unlock();
483 return result;
1da177e4 484}
77c100c8 485EXPORT_SYMBOL_GPL(get_pid_task);
1da177e4 486
92476d7f 487struct pid *find_get_pid(pid_t nr)
1da177e4
LT
488{
489 struct pid *pid;
490
92476d7f 491 rcu_read_lock();
198fe21b 492 pid = get_pid(find_vpid(nr));
92476d7f 493 rcu_read_unlock();
1da177e4 494
92476d7f 495 return pid;
1da177e4 496}
339caf2a 497EXPORT_SYMBOL_GPL(find_get_pid);
1da177e4 498
7af57294
PE
499pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
500{
501 struct upid *upid;
502 pid_t nr = 0;
503
504 if (pid && ns->level <= pid->level) {
505 upid = &pid->numbers[ns->level];
506 if (upid->ns == ns)
507 nr = upid->nr;
508 }
509 return nr;
510}
4f82f457 511EXPORT_SYMBOL_GPL(pid_nr_ns);
7af57294 512
44c4e1b2
EB
513pid_t pid_vnr(struct pid *pid)
514{
17cf22c3 515 return pid_nr_ns(pid, task_active_pid_ns(current));
44c4e1b2
EB
516}
517EXPORT_SYMBOL_GPL(pid_vnr);
518
52ee2dfd
ON
519pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
520 struct pid_namespace *ns)
2f2a3a46 521{
52ee2dfd
ON
522 pid_t nr = 0;
523
524 rcu_read_lock();
525 if (!ns)
17cf22c3 526 ns = task_active_pid_ns(current);
52ee2dfd
ON
527 if (likely(pid_alive(task))) {
528 if (type != PIDTYPE_PID)
529 task = task->group_leader;
81b1a832 530 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
52ee2dfd
ON
531 }
532 rcu_read_unlock();
533
534 return nr;
2f2a3a46 535}
52ee2dfd 536EXPORT_SYMBOL(__task_pid_nr_ns);
2f2a3a46
PE
537
538pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
539{
540 return pid_nr_ns(task_tgid(tsk), ns);
541}
542EXPORT_SYMBOL(task_tgid_nr_ns);
543
61bce0f1
EB
544struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
545{
546 return ns_of_pid(task_pid(tsk));
547}
548EXPORT_SYMBOL_GPL(task_active_pid_ns);
549
0804ef4b 550/*
025dfdaf 551 * Used by proc to find the first pid that is greater than or equal to nr.
0804ef4b 552 *
e49859e7 553 * If there is a pid at nr this function is exactly the same as find_pid_ns.
0804ef4b 554 */
198fe21b 555struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
0804ef4b
EB
556{
557 struct pid *pid;
558
559 do {
198fe21b 560 pid = find_pid_ns(nr, ns);
0804ef4b
EB
561 if (pid)
562 break;
198fe21b 563 nr = next_pidmap(ns, nr);
0804ef4b
EB
564 } while (nr > 0);
565
566 return pid;
567}
568
1da177e4
LT
569/*
570 * The pid hash table is scaled according to the amount of memory in the
571 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
572 * more.
573 */
574void __init pidhash_init(void)
575{
074b8517 576 unsigned int i, pidhash_size;
1da177e4 577
2c85f51d
JB
578 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
579 HASH_EARLY | HASH_SMALL,
31fe62b9
TB
580 &pidhash_shift, NULL,
581 0, 4096);
074b8517 582 pidhash_size = 1U << pidhash_shift;
1da177e4 583
92476d7f
EB
584 for (i = 0; i < pidhash_size; i++)
585 INIT_HLIST_HEAD(&pid_hash[i]);
1da177e4
LT
586}
587
588void __init pidmap_init(void)
589{
840d6fe7 590 /* Verify no one has done anything silly: */
c876ad76
EB
591 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
592
72680a19
HB
593 /* bump default and minimum pid_max based on number of cpus */
594 pid_max = min(pid_max_max, max_t(int, pid_max,
595 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
596 pid_max_min = max_t(int, pid_max_min,
597 PIDS_PER_CPU_MIN * num_possible_cpus());
598 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
599
61a58c6c 600 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
73b9ebfe 601 /* Reserve PID 0. We never call free_pidmap(0) */
61a58c6c
SB
602 set_bit(0, init_pid_ns.pidmap[0].page);
603 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
92476d7f 604
74bd59bb 605 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
5d097056 606 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
1da177e4 607}