[PATCH] pid: implement signal functions that take a struct pid *
[linux-2.6-block.git] / kernel / pid.c
CommitLineData
1da177e4
LT
1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 */
22
23#include <linux/mm.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/bootmem.h>
28#include <linux/hash.h>
29
30#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
92476d7f 31static struct hlist_head *pid_hash;
1da177e4 32static int pidhash_shift;
92476d7f 33static kmem_cache_t *pid_cachep;
1da177e4
LT
34
35int pid_max = PID_MAX_DEFAULT;
36int last_pid;
37
38#define RESERVED_PIDS 300
39
40int pid_max_min = RESERVED_PIDS + 1;
41int pid_max_max = PID_MAX_LIMIT;
42
43#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8)
44#define BITS_PER_PAGE (PAGE_SIZE*8)
45#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
46#define mk_pid(map, off) (((map) - pidmap_array)*BITS_PER_PAGE + (off))
47#define find_next_offset(map, off) \
48 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
49
50/*
51 * PID-map pages start out as NULL, they get allocated upon
52 * first use and are never deallocated. This way a low pid_max
53 * value does not cause lots of bitmaps to be allocated, but
54 * the scheme scales to up to 4 million PIDs, runtime.
55 */
56typedef struct pidmap {
57 atomic_t nr_free;
58 void *page;
59} pidmap_t;
60
61static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
62 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
63
92476d7f
EB
64/*
65 * Note: disable interrupts while the pidmap_lock is held as an
66 * interrupt might come in and do read_lock(&tasklist_lock).
67 *
68 * If we don't disable interrupts there is a nasty deadlock between
69 * detach_pid()->free_pid() and another cpu that does
70 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
71 * read_lock(&tasklist_lock);
72 *
73 * After we clean up the tasklist_lock and know there are no
74 * irq handlers that take it we can leave the interrupts enabled.
75 * For now it is easier to be safe than to prove it can't happen.
76 */
1da177e4
LT
77static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
78
92476d7f 79static fastcall void free_pidmap(int pid)
1da177e4
LT
80{
81 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
82 int offset = pid & BITS_PER_PAGE_MASK;
83
84 clear_bit(offset, map->page);
85 atomic_inc(&map->nr_free);
86}
87
92476d7f 88static int alloc_pidmap(void)
1da177e4
LT
89{
90 int i, offset, max_scan, pid, last = last_pid;
91 pidmap_t *map;
92
93 pid = last + 1;
94 if (pid >= pid_max)
95 pid = RESERVED_PIDS;
96 offset = pid & BITS_PER_PAGE_MASK;
97 map = &pidmap_array[pid/BITS_PER_PAGE];
98 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
99 for (i = 0; i <= max_scan; ++i) {
100 if (unlikely(!map->page)) {
101 unsigned long page = get_zeroed_page(GFP_KERNEL);
102 /*
103 * Free the page if someone raced with us
104 * installing it:
105 */
92476d7f 106 spin_lock_irq(&pidmap_lock);
1da177e4
LT
107 if (map->page)
108 free_page(page);
109 else
110 map->page = (void *)page;
92476d7f 111 spin_unlock_irq(&pidmap_lock);
1da177e4
LT
112 if (unlikely(!map->page))
113 break;
114 }
115 if (likely(atomic_read(&map->nr_free))) {
116 do {
117 if (!test_and_set_bit(offset, map->page)) {
118 atomic_dec(&map->nr_free);
119 last_pid = pid;
120 return pid;
121 }
122 offset = find_next_offset(map, offset);
123 pid = mk_pid(map, offset);
124 /*
125 * find_next_offset() found a bit, the pid from it
126 * is in-bounds, and if we fell back to the last
127 * bitmap block and the final block was the same
128 * as the starting point, pid is before last_pid.
129 */
130 } while (offset < BITS_PER_PAGE && pid < pid_max &&
131 (i != max_scan || pid < last ||
132 !((last+1) & BITS_PER_PAGE_MASK)));
133 }
134 if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) {
135 ++map;
136 offset = 0;
137 } else {
138 map = &pidmap_array[0];
139 offset = RESERVED_PIDS;
140 if (unlikely(last == offset))
141 break;
142 }
143 pid = mk_pid(map, offset);
144 }
145 return -1;
146}
147
0804ef4b
EB
148static int next_pidmap(int last)
149{
150 int offset;
151 pidmap_t *map;
152
153 offset = (last + 1) & BITS_PER_PAGE_MASK;
154 map = &pidmap_array[(last + 1)/BITS_PER_PAGE];
155 for (; map < &pidmap_array[PIDMAP_ENTRIES]; map++, offset = 0) {
156 if (unlikely(!map->page))
157 continue;
158 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
159 if (offset < BITS_PER_PAGE)
160 return mk_pid(map, offset);
161 }
162 return -1;
163}
164
92476d7f
EB
165fastcall void put_pid(struct pid *pid)
166{
167 if (!pid)
168 return;
169 if ((atomic_read(&pid->count) == 1) ||
170 atomic_dec_and_test(&pid->count))
171 kmem_cache_free(pid_cachep, pid);
172}
173
174static void delayed_put_pid(struct rcu_head *rhp)
175{
176 struct pid *pid = container_of(rhp, struct pid, rcu);
177 put_pid(pid);
178}
179
180fastcall void free_pid(struct pid *pid)
181{
182 /* We can be called with write_lock_irq(&tasklist_lock) held */
183 unsigned long flags;
184
185 spin_lock_irqsave(&pidmap_lock, flags);
186 hlist_del_rcu(&pid->pid_chain);
187 spin_unlock_irqrestore(&pidmap_lock, flags);
188
189 free_pidmap(pid->nr);
190 call_rcu(&pid->rcu, delayed_put_pid);
191}
192
193struct pid *alloc_pid(void)
194{
195 struct pid *pid;
196 enum pid_type type;
197 int nr = -1;
198
199 pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
200 if (!pid)
201 goto out;
202
203 nr = alloc_pidmap();
204 if (nr < 0)
205 goto out_free;
206
207 atomic_set(&pid->count, 1);
208 pid->nr = nr;
209 for (type = 0; type < PIDTYPE_MAX; ++type)
210 INIT_HLIST_HEAD(&pid->tasks[type]);
211
212 spin_lock_irq(&pidmap_lock);
213 hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
214 spin_unlock_irq(&pidmap_lock);
215
216out:
217 return pid;
218
219out_free:
220 kmem_cache_free(pid_cachep, pid);
221 pid = NULL;
222 goto out;
223}
224
225struct pid * fastcall find_pid(int nr)
1da177e4
LT
226{
227 struct hlist_node *elem;
228 struct pid *pid;
229
e56d0903 230 hlist_for_each_entry_rcu(pid, elem,
92476d7f 231 &pid_hash[pid_hashfn(nr)], pid_chain) {
1da177e4
LT
232 if (pid->nr == nr)
233 return pid;
234 }
235 return NULL;
236}
237
36c8b586 238int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
1da177e4 239{
92476d7f
EB
240 struct pid_link *link;
241 struct pid *pid;
242
92476d7f
EB
243 link = &task->pids[type];
244 link->pid = pid = find_pid(nr);
245 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
1da177e4
LT
246
247 return 0;
248}
249
36c8b586 250void fastcall detach_pid(struct task_struct *task, enum pid_type type)
1da177e4 251{
92476d7f
EB
252 struct pid_link *link;
253 struct pid *pid;
254 int tmp;
1da177e4 255
92476d7f
EB
256 link = &task->pids[type];
257 pid = link->pid;
1da177e4 258
92476d7f
EB
259 hlist_del_rcu(&link->node);
260 link->pid = NULL;
1da177e4 261
92476d7f
EB
262 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
263 if (!hlist_empty(&pid->tasks[tmp]))
264 return;
1da177e4 265
92476d7f 266 free_pid(pid);
1da177e4
LT
267}
268
c18258c6
EB
269/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
270void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
271 enum pid_type type)
272{
273 new->pids[type].pid = old->pids[type].pid;
274 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
275 old->pids[type].pid = NULL;
276}
277
92476d7f 278struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
1da177e4 279{
92476d7f
EB
280 struct task_struct *result = NULL;
281 if (pid) {
282 struct hlist_node *first;
283 first = rcu_dereference(pid->tasks[type].first);
284 if (first)
285 result = hlist_entry(first, struct task_struct, pids[(type)].node);
286 }
287 return result;
288}
1da177e4 289
92476d7f
EB
290/*
291 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
292 */
36c8b586 293struct task_struct *find_task_by_pid_type(int type, int nr)
92476d7f
EB
294{
295 return pid_task(find_pid(nr), type);
296}
1da177e4 297
92476d7f 298EXPORT_SYMBOL(find_task_by_pid_type);
1da177e4 299
92476d7f
EB
300struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
301{
302 struct task_struct *result;
303 rcu_read_lock();
304 result = pid_task(pid, type);
305 if (result)
306 get_task_struct(result);
307 rcu_read_unlock();
308 return result;
1da177e4
LT
309}
310
92476d7f 311struct pid *find_get_pid(pid_t nr)
1da177e4
LT
312{
313 struct pid *pid;
314
92476d7f
EB
315 rcu_read_lock();
316 pid = get_pid(find_pid(nr));
317 rcu_read_unlock();
1da177e4 318
92476d7f 319 return pid;
1da177e4
LT
320}
321
0804ef4b
EB
322/*
323 * Used by proc to find the first pid that is greater then or equal to nr.
324 *
325 * If there is a pid at nr this function is exactly the same as find_pid.
326 */
327struct pid *find_ge_pid(int nr)
328{
329 struct pid *pid;
330
331 do {
332 pid = find_pid(nr);
333 if (pid)
334 break;
335 nr = next_pidmap(nr);
336 } while (nr > 0);
337
338 return pid;
339}
340
1da177e4
LT
341/*
342 * The pid hash table is scaled according to the amount of memory in the
343 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
344 * more.
345 */
346void __init pidhash_init(void)
347{
92476d7f 348 int i, pidhash_size;
1da177e4
LT
349 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
350
351 pidhash_shift = max(4, fls(megabytes * 4));
352 pidhash_shift = min(12, pidhash_shift);
353 pidhash_size = 1 << pidhash_shift;
354
355 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
356 pidhash_size, pidhash_shift,
92476d7f
EB
357 pidhash_size * sizeof(struct hlist_head));
358
359 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
360 if (!pid_hash)
361 panic("Could not alloc pidhash!\n");
362 for (i = 0; i < pidhash_size; i++)
363 INIT_HLIST_HEAD(&pid_hash[i]);
1da177e4
LT
364}
365
366void __init pidmap_init(void)
367{
1da177e4 368 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
73b9ebfe 369 /* Reserve PID 0. We never call free_pidmap(0) */
1da177e4
LT
370 set_bit(0, pidmap_array->page);
371 atomic_dec(&pidmap_array->nr_free);
92476d7f
EB
372
373 pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
374 __alignof__(struct pid),
375 SLAB_PANIC, NULL, NULL);
1da177e4 376}