4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/export.h>
12 #include <linux/mmzone.h>
13 #include <linux/time.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/rcupdate.h>
23 #include <linux/workqueue.h>
25 struct fdtable_defer {
27 struct work_struct wq;
31 int sysctl_nr_open __read_mostly = 1024*1024;
32 int sysctl_nr_open_min = BITS_PER_LONG;
33 int sysctl_nr_open_max = 1024 * 1024; /* raised later */
36 * We use this list to defer free fdtables that have vmalloced
37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
39 * this per-task structure.
41 static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
43 static void *alloc_fdmem(size_t size)
46 * Very large allocations can stress page reclaim, so fall back to
47 * vmalloc() if the allocation size will be considered "large" by the VM.
49 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
50 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
57 static void free_fdmem(void *ptr)
59 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
62 static void __free_fdtable(struct fdtable *fdt)
65 free_fdmem(fdt->open_fds);
69 static void free_fdtable_work(struct work_struct *work)
71 struct fdtable_defer *f =
72 container_of(work, struct fdtable_defer, wq);
75 spin_lock_bh(&f->lock);
78 spin_unlock_bh(&f->lock);
80 struct fdtable *next = fdt->next;
87 static void free_fdtable_rcu(struct rcu_head *rcu)
89 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
90 struct fdtable_defer *fddef;
94 if (fdt->max_fds <= NR_OPEN_DEFAULT) {
96 * This fdtable is embedded in the files structure and that
97 * structure itself is getting destroyed.
99 kmem_cache_free(files_cachep,
100 container_of(fdt, struct files_struct, fdtab));
103 if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
105 kfree(fdt->open_fds);
108 fddef = &get_cpu_var(fdtable_defer_list);
109 spin_lock(&fddef->lock);
110 fdt->next = fddef->next;
112 /* vmallocs are handled from the workqueue context */
113 schedule_work(&fddef->wq);
114 spin_unlock(&fddef->lock);
115 put_cpu_var(fdtable_defer_list);
119 static inline void free_fdtable(struct fdtable *fdt)
121 call_rcu(&fdt->rcu, free_fdtable_rcu);
125 * Expand the fdset in the files_struct. Called with the files spinlock
128 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
130 unsigned int cpy, set;
132 BUG_ON(nfdt->max_fds < ofdt->max_fds);
134 cpy = ofdt->max_fds * sizeof(struct file *);
135 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
136 memcpy(nfdt->fd, ofdt->fd, cpy);
137 memset((char *)(nfdt->fd) + cpy, 0, set);
139 cpy = ofdt->max_fds / BITS_PER_BYTE;
140 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
141 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
142 memset((char *)(nfdt->open_fds) + cpy, 0, set);
143 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
144 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
147 static struct fdtable * alloc_fdtable(unsigned int nr)
153 * Figure out how many fds we actually want to support in this fdtable.
154 * Allocation steps are keyed to the size of the fdarray, since it
155 * grows far faster than any of the other dynamic data. We try to fit
156 * the fdarray into comfortable page-tuned chunks: starting at 1024B
157 * and growing in powers of two from there on.
159 nr /= (1024 / sizeof(struct file *));
160 nr = roundup_pow_of_two(nr + 1);
161 nr *= (1024 / sizeof(struct file *));
163 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
164 * had been set lower between the check in expand_files() and here. Deal
165 * with that in caller, it's cheaper that way.
167 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
168 * bitmaps handling below becomes unpleasant, to put it mildly...
170 if (unlikely(nr > sysctl_nr_open))
171 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
173 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
177 data = alloc_fdmem(nr * sizeof(struct file *));
182 data = alloc_fdmem(max_t(size_t,
183 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
186 fdt->open_fds = data;
187 data += nr / BITS_PER_BYTE;
188 fdt->close_on_exec = data;
202 * Expand the file descriptor table.
203 * This function will allocate a new fdtable and both fd array and fdset, of
205 * Return <0 error code on error; 1 on successful completion.
206 * The files->file_lock should be held on entry, and will be held on exit.
208 static int expand_fdtable(struct files_struct *files, int nr)
209 __releases(files->file_lock)
210 __acquires(files->file_lock)
212 struct fdtable *new_fdt, *cur_fdt;
214 spin_unlock(&files->file_lock);
215 new_fdt = alloc_fdtable(nr);
216 spin_lock(&files->file_lock);
220 * extremely unlikely race - sysctl_nr_open decreased between the check in
221 * caller and alloc_fdtable(). Cheaper to catch it here...
223 if (unlikely(new_fdt->max_fds <= nr)) {
224 __free_fdtable(new_fdt);
228 * Check again since another task may have expanded the fd table while
229 * we dropped the lock
231 cur_fdt = files_fdtable(files);
232 if (nr >= cur_fdt->max_fds) {
233 /* Continue as planned */
234 copy_fdtable(new_fdt, cur_fdt);
235 rcu_assign_pointer(files->fdt, new_fdt);
236 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
237 free_fdtable(cur_fdt);
239 /* Somebody else expanded, so undo our attempt */
240 __free_fdtable(new_fdt);
247 * This function will expand the file structures, if the requested size exceeds
248 * the current capacity and there is room for expansion.
249 * Return <0 error code on error; 0 when nothing done; 1 when files were
250 * expanded and execution may have blocked.
251 * The files->file_lock should be held on entry, and will be held on exit.
253 int expand_files(struct files_struct *files, int nr)
257 fdt = files_fdtable(files);
259 /* Do we need to expand? */
260 if (nr < fdt->max_fds)
264 if (nr >= sysctl_nr_open)
267 /* All good, so we try */
268 return expand_fdtable(files, nr);
271 static int count_open_files(struct fdtable *fdt)
273 int size = fdt->max_fds;
276 /* Find the last open fd */
277 for (i = size / BITS_PER_LONG; i > 0; ) {
278 if (fdt->open_fds[--i])
281 i = (i + 1) * BITS_PER_LONG;
286 * Allocate a new files structure and copy contents from the
287 * passed in files structure.
288 * errorp will be valid only when the returned files_struct is NULL.
290 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
292 struct files_struct *newf;
293 struct file **old_fds, **new_fds;
294 int open_files, size, i;
295 struct fdtable *old_fdt, *new_fdt;
298 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
302 atomic_set(&newf->count, 1);
304 spin_lock_init(&newf->file_lock);
306 new_fdt = &newf->fdtab;
307 new_fdt->max_fds = NR_OPEN_DEFAULT;
308 new_fdt->close_on_exec = newf->close_on_exec_init;
309 new_fdt->open_fds = newf->open_fds_init;
310 new_fdt->fd = &newf->fd_array[0];
311 new_fdt->next = NULL;
313 spin_lock(&oldf->file_lock);
314 old_fdt = files_fdtable(oldf);
315 open_files = count_open_files(old_fdt);
318 * Check whether we need to allocate a larger fd array and fd set.
320 while (unlikely(open_files > new_fdt->max_fds)) {
321 spin_unlock(&oldf->file_lock);
323 if (new_fdt != &newf->fdtab)
324 __free_fdtable(new_fdt);
326 new_fdt = alloc_fdtable(open_files - 1);
332 /* beyond sysctl_nr_open; nothing to do */
333 if (unlikely(new_fdt->max_fds < open_files)) {
334 __free_fdtable(new_fdt);
340 * Reacquire the oldf lock and a pointer to its fd table
341 * who knows it may have a new bigger fd table. We need
342 * the latest pointer.
344 spin_lock(&oldf->file_lock);
345 old_fdt = files_fdtable(oldf);
346 open_files = count_open_files(old_fdt);
349 old_fds = old_fdt->fd;
350 new_fds = new_fdt->fd;
352 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
353 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
355 for (i = open_files; i != 0; i--) {
356 struct file *f = *old_fds++;
361 * The fd may be claimed in the fd bitmap but not yet
362 * instantiated in the files array if a sibling thread
363 * is partway through open(). So make sure that this
364 * fd is available to the new process.
366 __clear_open_fd(open_files - i, new_fdt);
368 rcu_assign_pointer(*new_fds++, f);
370 spin_unlock(&oldf->file_lock);
372 /* compute the remainder to be cleared */
373 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
375 /* This is long word aligned thus could use a optimized version */
376 memset(new_fds, 0, size);
378 if (new_fdt->max_fds > open_files) {
379 int left = (new_fdt->max_fds - open_files) / 8;
380 int start = open_files / BITS_PER_LONG;
382 memset(&new_fdt->open_fds[start], 0, left);
383 memset(&new_fdt->close_on_exec[start], 0, left);
386 rcu_assign_pointer(newf->fdt, new_fdt);
391 kmem_cache_free(files_cachep, newf);
396 static void close_files(struct files_struct * files)
404 * It is safe to dereference the fd table without RCU or
405 * ->file_lock because this is the last reference to the
406 * files structure. But use RCU to shut RCU-lockdep up.
409 fdt = files_fdtable(files);
413 i = j * BITS_PER_LONG;
414 if (i >= fdt->max_fds)
416 set = fdt->open_fds[j++];
419 struct file * file = xchg(&fdt->fd[i], NULL);
421 filp_close(file, files);
431 struct files_struct *get_files_struct(struct task_struct *task)
433 struct files_struct *files;
438 atomic_inc(&files->count);
444 void put_files_struct(struct files_struct *files)
448 if (atomic_dec_and_test(&files->count)) {
451 * Free the fd and fdset arrays if we expanded them.
452 * If the fdtable was embedded, pass files for freeing
453 * at the end of the RCU grace period. Otherwise,
454 * you can free files immediately.
457 fdt = files_fdtable(files);
458 if (fdt != &files->fdtab)
459 kmem_cache_free(files_cachep, files);
465 void reset_files_struct(struct files_struct *files)
467 struct task_struct *tsk = current;
468 struct files_struct *old;
474 put_files_struct(old);
477 void exit_files(struct task_struct *tsk)
479 struct files_struct * files = tsk->files;
485 put_files_struct(files);
489 static void __devinit fdtable_defer_list_init(int cpu)
491 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
492 spin_lock_init(&fddef->lock);
493 INIT_WORK(&fddef->wq, free_fdtable_work);
497 void __init files_defer_init(void)
500 for_each_possible_cpu(i)
501 fdtable_defer_list_init(i);
502 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
506 struct files_struct init_files = {
507 .count = ATOMIC_INIT(1),
508 .fdt = &init_files.fdtab,
510 .max_fds = NR_OPEN_DEFAULT,
511 .fd = &init_files.fd_array[0],
512 .close_on_exec = init_files.close_on_exec_init,
513 .open_fds = init_files.open_fds_init,
515 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
519 * allocate a file descriptor, mark it busy.
521 int __alloc_fd(struct files_struct *files,
522 unsigned start, unsigned end, unsigned flags)
528 spin_lock(&files->file_lock);
530 fdt = files_fdtable(files);
532 if (fd < files->next_fd)
535 if (fd < fdt->max_fds)
536 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
539 * N.B. For clone tasks sharing a files structure, this test
540 * will limit the total number of files that can be opened.
546 error = expand_files(files, fd);
551 * If we needed to expand the fs array we
552 * might have blocked - try again.
557 if (start <= files->next_fd)
558 files->next_fd = fd + 1;
560 __set_open_fd(fd, fdt);
561 if (flags & O_CLOEXEC)
562 __set_close_on_exec(fd, fdt);
564 __clear_close_on_exec(fd, fdt);
568 if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
569 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
570 rcu_assign_pointer(fdt->fd[fd], NULL);
575 spin_unlock(&files->file_lock);
579 int alloc_fd(unsigned start, unsigned flags)
581 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
584 int get_unused_fd_flags(unsigned flags)
586 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
588 EXPORT_SYMBOL(get_unused_fd_flags);