don't bugger nd->seq on set_root_rcu() from follow_dotdot_rcu()
[linux-2.6-block.git] / fs / namei.c
... / ...
CommitLineData
1/*
2 * linux/fs/namei.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * Some corrections by tytso.
9 */
10
11/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
12 * lookup logic.
13 */
14/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
15 */
16
17#include <linux/init.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/namei.h>
23#include <linux/pagemap.h>
24#include <linux/fsnotify.h>
25#include <linux/personality.h>
26#include <linux/security.h>
27#include <linux/ima.h>
28#include <linux/syscalls.h>
29#include <linux/mount.h>
30#include <linux/audit.h>
31#include <linux/capability.h>
32#include <linux/file.h>
33#include <linux/fcntl.h>
34#include <linux/device_cgroup.h>
35#include <linux/fs_struct.h>
36#include <linux/posix_acl.h>
37#include <asm/uaccess.h>
38
39#include "internal.h"
40#include "mount.h"
41
42/* [Feb-1997 T. Schoebel-Theuer]
43 * Fundamental changes in the pathname lookup mechanisms (namei)
44 * were necessary because of omirr. The reason is that omirr needs
45 * to know the _real_ pathname, not the user-supplied one, in case
46 * of symlinks (and also when transname replacements occur).
47 *
48 * The new code replaces the old recursive symlink resolution with
49 * an iterative one (in case of non-nested symlink chains). It does
50 * this with calls to <fs>_follow_link().
51 * As a side effect, dir_namei(), _namei() and follow_link() are now
52 * replaced with a single function lookup_dentry() that can handle all
53 * the special cases of the former code.
54 *
55 * With the new dcache, the pathname is stored at each inode, at least as
56 * long as the refcount of the inode is positive. As a side effect, the
57 * size of the dcache depends on the inode cache and thus is dynamic.
58 *
59 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
60 * resolution to correspond with current state of the code.
61 *
62 * Note that the symlink resolution is not *completely* iterative.
63 * There is still a significant amount of tail- and mid- recursion in
64 * the algorithm. Also, note that <fs>_readlink() is not used in
65 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
66 * may return different results than <fs>_follow_link(). Many virtual
67 * filesystems (including /proc) exhibit this behavior.
68 */
69
70/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
71 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
72 * and the name already exists in form of a symlink, try to create the new
73 * name indicated by the symlink. The old code always complained that the
74 * name already exists, due to not following the symlink even if its target
75 * is nonexistent. The new semantics affects also mknod() and link() when
76 * the name is a symlink pointing to a non-existent name.
77 *
78 * I don't know which semantics is the right one, since I have no access
79 * to standards. But I found by trial that HP-UX 9.0 has the full "new"
80 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
81 * "old" one. Personally, I think the new semantics is much more logical.
82 * Note that "ln old new" where "new" is a symlink pointing to a non-existing
83 * file does succeed in both HP-UX and SunOs, but not in Solaris
84 * and in the old Linux semantics.
85 */
86
87/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
88 * semantics. See the comments in "open_namei" and "do_link" below.
89 *
90 * [10-Sep-98 Alan Modra] Another symlink change.
91 */
92
93/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
94 * inside the path - always follow.
95 * in the last component in creation/removal/renaming - never follow.
96 * if LOOKUP_FOLLOW passed - follow.
97 * if the pathname has trailing slashes - follow.
98 * otherwise - don't follow.
99 * (applied in that order).
100 *
101 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
102 * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
103 * During the 2.4 we need to fix the userland stuff depending on it -
104 * hopefully we will be able to get rid of that wart in 2.5. So far only
105 * XEmacs seems to be relying on it...
106 */
107/*
108 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
109 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
110 * any extra contention...
111 */
112
113/* In order to reduce some races, while at the same time doing additional
114 * checking and hopefully speeding things up, we copy filenames to the
115 * kernel data space before using them..
116 *
117 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
118 * PATH_MAX includes the nul terminator --RR.
119 */
120void final_putname(struct filename *name)
121{
122 if (name->separate) {
123 __putname(name->name);
124 kfree(name);
125 } else {
126 __putname(name);
127 }
128}
129
130#define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename))
131
132static struct filename *
133getname_flags(const char __user *filename, int flags, int *empty)
134{
135 struct filename *result, *err;
136 int len;
137 long max;
138 char *kname;
139
140 result = audit_reusename(filename);
141 if (result)
142 return result;
143
144 result = __getname();
145 if (unlikely(!result))
146 return ERR_PTR(-ENOMEM);
147
148 /*
149 * First, try to embed the struct filename inside the names_cache
150 * allocation
151 */
152 kname = (char *)result + sizeof(*result);
153 result->name = kname;
154 result->separate = false;
155 max = EMBEDDED_NAME_MAX;
156
157recopy:
158 len = strncpy_from_user(kname, filename, max);
159 if (unlikely(len < 0)) {
160 err = ERR_PTR(len);
161 goto error;
162 }
163
164 /*
165 * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
166 * separate struct filename so we can dedicate the entire
167 * names_cache allocation for the pathname, and re-do the copy from
168 * userland.
169 */
170 if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) {
171 kname = (char *)result;
172
173 result = kzalloc(sizeof(*result), GFP_KERNEL);
174 if (!result) {
175 err = ERR_PTR(-ENOMEM);
176 result = (struct filename *)kname;
177 goto error;
178 }
179 result->name = kname;
180 result->separate = true;
181 max = PATH_MAX;
182 goto recopy;
183 }
184
185 /* The empty path is special. */
186 if (unlikely(!len)) {
187 if (empty)
188 *empty = 1;
189 err = ERR_PTR(-ENOENT);
190 if (!(flags & LOOKUP_EMPTY))
191 goto error;
192 }
193
194 err = ERR_PTR(-ENAMETOOLONG);
195 if (unlikely(len >= PATH_MAX))
196 goto error;
197
198 result->uptr = filename;
199 result->aname = NULL;
200 audit_getname(result);
201 return result;
202
203error:
204 final_putname(result);
205 return err;
206}
207
208struct filename *
209getname(const char __user * filename)
210{
211 return getname_flags(filename, 0, NULL);
212}
213
214/*
215 * The "getname_kernel()" interface doesn't do pathnames longer
216 * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user.
217 */
218struct filename *
219getname_kernel(const char * filename)
220{
221 struct filename *result;
222 char *kname;
223 int len;
224
225 len = strlen(filename);
226 if (len >= EMBEDDED_NAME_MAX)
227 return ERR_PTR(-ENAMETOOLONG);
228
229 result = __getname();
230 if (unlikely(!result))
231 return ERR_PTR(-ENOMEM);
232
233 kname = (char *)result + sizeof(*result);
234 result->name = kname;
235 result->uptr = NULL;
236 result->aname = NULL;
237 result->separate = false;
238
239 strlcpy(kname, filename, EMBEDDED_NAME_MAX);
240 return result;
241}
242
243#ifdef CONFIG_AUDITSYSCALL
244void putname(struct filename *name)
245{
246 if (unlikely(!audit_dummy_context()))
247 return audit_putname(name);
248 final_putname(name);
249}
250#endif
251
252static int check_acl(struct inode *inode, int mask)
253{
254#ifdef CONFIG_FS_POSIX_ACL
255 struct posix_acl *acl;
256
257 if (mask & MAY_NOT_BLOCK) {
258 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
259 if (!acl)
260 return -EAGAIN;
261 /* no ->get_acl() calls in RCU mode... */
262 if (acl == ACL_NOT_CACHED)
263 return -ECHILD;
264 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
265 }
266
267 acl = get_acl(inode, ACL_TYPE_ACCESS);
268 if (IS_ERR(acl))
269 return PTR_ERR(acl);
270 if (acl) {
271 int error = posix_acl_permission(inode, acl, mask);
272 posix_acl_release(acl);
273 return error;
274 }
275#endif
276
277 return -EAGAIN;
278}
279
280/*
281 * This does the basic permission checking
282 */
283static int acl_permission_check(struct inode *inode, int mask)
284{
285 unsigned int mode = inode->i_mode;
286
287 if (likely(uid_eq(current_fsuid(), inode->i_uid)))
288 mode >>= 6;
289 else {
290 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
291 int error = check_acl(inode, mask);
292 if (error != -EAGAIN)
293 return error;
294 }
295
296 if (in_group_p(inode->i_gid))
297 mode >>= 3;
298 }
299
300 /*
301 * If the DACs are ok we don't need any capability check.
302 */
303 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
304 return 0;
305 return -EACCES;
306}
307
308/**
309 * generic_permission - check for access rights on a Posix-like filesystem
310 * @inode: inode to check access rights for
311 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
312 *
313 * Used to check for read/write/execute permissions on a file.
314 * We use "fsuid" for this, letting us set arbitrary permissions
315 * for filesystem access without changing the "normal" uids which
316 * are used for other things.
317 *
318 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
319 * request cannot be satisfied (eg. requires blocking or too much complexity).
320 * It would then be called again in ref-walk mode.
321 */
322int generic_permission(struct inode *inode, int mask)
323{
324 int ret;
325
326 /*
327 * Do the basic permission checks.
328 */
329 ret = acl_permission_check(inode, mask);
330 if (ret != -EACCES)
331 return ret;
332
333 if (S_ISDIR(inode->i_mode)) {
334 /* DACs are overridable for directories */
335 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
336 return 0;
337 if (!(mask & MAY_WRITE))
338 if (capable_wrt_inode_uidgid(inode,
339 CAP_DAC_READ_SEARCH))
340 return 0;
341 return -EACCES;
342 }
343 /*
344 * Read/write DACs are always overridable.
345 * Executable DACs are overridable when there is
346 * at least one exec bit set.
347 */
348 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
349 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
350 return 0;
351
352 /*
353 * Searching includes executable on directories, else just read.
354 */
355 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
356 if (mask == MAY_READ)
357 if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
358 return 0;
359
360 return -EACCES;
361}
362EXPORT_SYMBOL(generic_permission);
363
364/*
365 * We _really_ want to just do "generic_permission()" without
366 * even looking at the inode->i_op values. So we keep a cache
367 * flag in inode->i_opflags, that says "this has not special
368 * permission function, use the fast case".
369 */
370static inline int do_inode_permission(struct inode *inode, int mask)
371{
372 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
373 if (likely(inode->i_op->permission))
374 return inode->i_op->permission(inode, mask);
375
376 /* This gets set once for the inode lifetime */
377 spin_lock(&inode->i_lock);
378 inode->i_opflags |= IOP_FASTPERM;
379 spin_unlock(&inode->i_lock);
380 }
381 return generic_permission(inode, mask);
382}
383
384/**
385 * __inode_permission - Check for access rights to a given inode
386 * @inode: Inode to check permission on
387 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
388 *
389 * Check for read/write/execute permissions on an inode.
390 *
391 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
392 *
393 * This does not check for a read-only file system. You probably want
394 * inode_permission().
395 */
396int __inode_permission(struct inode *inode, int mask)
397{
398 int retval;
399
400 if (unlikely(mask & MAY_WRITE)) {
401 /*
402 * Nobody gets write access to an immutable file.
403 */
404 if (IS_IMMUTABLE(inode))
405 return -EACCES;
406 }
407
408 retval = do_inode_permission(inode, mask);
409 if (retval)
410 return retval;
411
412 retval = devcgroup_inode_permission(inode, mask);
413 if (retval)
414 return retval;
415
416 return security_inode_permission(inode, mask);
417}
418
419/**
420 * sb_permission - Check superblock-level permissions
421 * @sb: Superblock of inode to check permission on
422 * @inode: Inode to check permission on
423 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
424 *
425 * Separate out file-system wide checks from inode-specific permission checks.
426 */
427static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
428{
429 if (unlikely(mask & MAY_WRITE)) {
430 umode_t mode = inode->i_mode;
431
432 /* Nobody gets write access to a read-only fs. */
433 if ((sb->s_flags & MS_RDONLY) &&
434 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
435 return -EROFS;
436 }
437 return 0;
438}
439
440/**
441 * inode_permission - Check for access rights to a given inode
442 * @inode: Inode to check permission on
443 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
444 *
445 * Check for read/write/execute permissions on an inode. We use fs[ug]id for
446 * this, letting us set arbitrary permissions for filesystem access without
447 * changing the "normal" UIDs which are used for other things.
448 *
449 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
450 */
451int inode_permission(struct inode *inode, int mask)
452{
453 int retval;
454
455 retval = sb_permission(inode->i_sb, inode, mask);
456 if (retval)
457 return retval;
458 return __inode_permission(inode, mask);
459}
460EXPORT_SYMBOL(inode_permission);
461
462/**
463 * path_get - get a reference to a path
464 * @path: path to get the reference to
465 *
466 * Given a path increment the reference count to the dentry and the vfsmount.
467 */
468void path_get(const struct path *path)
469{
470 mntget(path->mnt);
471 dget(path->dentry);
472}
473EXPORT_SYMBOL(path_get);
474
475/**
476 * path_put - put a reference to a path
477 * @path: path to put the reference to
478 *
479 * Given a path decrement the reference count to the dentry and the vfsmount.
480 */
481void path_put(const struct path *path)
482{
483 dput(path->dentry);
484 mntput(path->mnt);
485}
486EXPORT_SYMBOL(path_put);
487
488/*
489 * Path walking has 2 modes, rcu-walk and ref-walk (see
490 * Documentation/filesystems/path-lookup.txt). In situations when we can't
491 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
492 * normal reference counts on dentries and vfsmounts to transition to rcu-walk
493 * mode. Refcounts are grabbed at the last known good point before rcu-walk
494 * got stuck, so ref-walk may continue from there. If this is not successful
495 * (eg. a seqcount has changed), then failure is returned and it's up to caller
496 * to restart the path walk from the beginning in ref-walk mode.
497 */
498
499/**
500 * unlazy_walk - try to switch to ref-walk mode.
501 * @nd: nameidata pathwalk data
502 * @dentry: child of nd->path.dentry or NULL
503 * Returns: 0 on success, -ECHILD on failure
504 *
505 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
506 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
507 * @nd or NULL. Must be called from rcu-walk context.
508 */
509static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
510{
511 struct fs_struct *fs = current->fs;
512 struct dentry *parent = nd->path.dentry;
513
514 BUG_ON(!(nd->flags & LOOKUP_RCU));
515
516 /*
517 * After legitimizing the bastards, terminate_walk()
518 * will do the right thing for non-RCU mode, and all our
519 * subsequent exit cases should rcu_read_unlock()
520 * before returning. Do vfsmount first; if dentry
521 * can't be legitimized, just set nd->path.dentry to NULL
522 * and rely on dput(NULL) being a no-op.
523 */
524 if (!legitimize_mnt(nd->path.mnt, nd->m_seq))
525 return -ECHILD;
526 nd->flags &= ~LOOKUP_RCU;
527
528 if (!lockref_get_not_dead(&parent->d_lockref)) {
529 nd->path.dentry = NULL;
530 goto out;
531 }
532
533 /*
534 * For a negative lookup, the lookup sequence point is the parents
535 * sequence point, and it only needs to revalidate the parent dentry.
536 *
537 * For a positive lookup, we need to move both the parent and the
538 * dentry from the RCU domain to be properly refcounted. And the
539 * sequence number in the dentry validates *both* dentry counters,
540 * since we checked the sequence number of the parent after we got
541 * the child sequence number. So we know the parent must still
542 * be valid if the child sequence number is still valid.
543 */
544 if (!dentry) {
545 if (read_seqcount_retry(&parent->d_seq, nd->seq))
546 goto out;
547 BUG_ON(nd->inode != parent->d_inode);
548 } else {
549 if (!lockref_get_not_dead(&dentry->d_lockref))
550 goto out;
551 if (read_seqcount_retry(&dentry->d_seq, nd->seq))
552 goto drop_dentry;
553 }
554
555 /*
556 * Sequence counts matched. Now make sure that the root is
557 * still valid and get it if required.
558 */
559 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
560 spin_lock(&fs->lock);
561 if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry)
562 goto unlock_and_drop_dentry;
563 path_get(&nd->root);
564 spin_unlock(&fs->lock);
565 }
566
567 rcu_read_unlock();
568 return 0;
569
570unlock_and_drop_dentry:
571 spin_unlock(&fs->lock);
572drop_dentry:
573 rcu_read_unlock();
574 dput(dentry);
575 goto drop_root_mnt;
576out:
577 rcu_read_unlock();
578drop_root_mnt:
579 if (!(nd->flags & LOOKUP_ROOT))
580 nd->root.mnt = NULL;
581 return -ECHILD;
582}
583
584static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
585{
586 return dentry->d_op->d_revalidate(dentry, flags);
587}
588
589/**
590 * complete_walk - successful completion of path walk
591 * @nd: pointer nameidata
592 *
593 * If we had been in RCU mode, drop out of it and legitimize nd->path.
594 * Revalidate the final result, unless we'd already done that during
595 * the path walk or the filesystem doesn't ask for it. Return 0 on
596 * success, -error on failure. In case of failure caller does not
597 * need to drop nd->path.
598 */
599static int complete_walk(struct nameidata *nd)
600{
601 struct dentry *dentry = nd->path.dentry;
602 int status;
603
604 if (nd->flags & LOOKUP_RCU) {
605 nd->flags &= ~LOOKUP_RCU;
606 if (!(nd->flags & LOOKUP_ROOT))
607 nd->root.mnt = NULL;
608
609 if (!legitimize_mnt(nd->path.mnt, nd->m_seq)) {
610 rcu_read_unlock();
611 return -ECHILD;
612 }
613 if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) {
614 rcu_read_unlock();
615 mntput(nd->path.mnt);
616 return -ECHILD;
617 }
618 if (read_seqcount_retry(&dentry->d_seq, nd->seq)) {
619 rcu_read_unlock();
620 dput(dentry);
621 mntput(nd->path.mnt);
622 return -ECHILD;
623 }
624 rcu_read_unlock();
625 }
626
627 if (likely(!(nd->flags & LOOKUP_JUMPED)))
628 return 0;
629
630 if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
631 return 0;
632
633 status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
634 if (status > 0)
635 return 0;
636
637 if (!status)
638 status = -ESTALE;
639
640 path_put(&nd->path);
641 return status;
642}
643
644static __always_inline void set_root(struct nameidata *nd)
645{
646 get_fs_root(current->fs, &nd->root);
647}
648
649static int link_path_walk(const char *, struct nameidata *);
650
651static __always_inline unsigned set_root_rcu(struct nameidata *nd)
652{
653 struct fs_struct *fs = current->fs;
654 unsigned seq, res;
655
656 do {
657 seq = read_seqcount_begin(&fs->seq);
658 nd->root = fs->root;
659 res = __read_seqcount_begin(&nd->root.dentry->d_seq);
660 } while (read_seqcount_retry(&fs->seq, seq));
661 return res;
662}
663
664static void path_put_conditional(struct path *path, struct nameidata *nd)
665{
666 dput(path->dentry);
667 if (path->mnt != nd->path.mnt)
668 mntput(path->mnt);
669}
670
671static inline void path_to_nameidata(const struct path *path,
672 struct nameidata *nd)
673{
674 if (!(nd->flags & LOOKUP_RCU)) {
675 dput(nd->path.dentry);
676 if (nd->path.mnt != path->mnt)
677 mntput(nd->path.mnt);
678 }
679 nd->path.mnt = path->mnt;
680 nd->path.dentry = path->dentry;
681}
682
683/*
684 * Helper to directly jump to a known parsed path from ->follow_link,
685 * caller must have taken a reference to path beforehand.
686 */
687void nd_jump_link(struct nameidata *nd, struct path *path)
688{
689 path_put(&nd->path);
690
691 nd->path = *path;
692 nd->inode = nd->path.dentry->d_inode;
693 nd->flags |= LOOKUP_JUMPED;
694}
695
696static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
697{
698 struct inode *inode = link->dentry->d_inode;
699 if (inode->i_op->put_link)
700 inode->i_op->put_link(link->dentry, nd, cookie);
701 path_put(link);
702}
703
704int sysctl_protected_symlinks __read_mostly = 0;
705int sysctl_protected_hardlinks __read_mostly = 0;
706
707/**
708 * may_follow_link - Check symlink following for unsafe situations
709 * @link: The path of the symlink
710 * @nd: nameidata pathwalk data
711 *
712 * In the case of the sysctl_protected_symlinks sysctl being enabled,
713 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
714 * in a sticky world-writable directory. This is to protect privileged
715 * processes from failing races against path names that may change out
716 * from under them by way of other users creating malicious symlinks.
717 * It will permit symlinks to be followed only when outside a sticky
718 * world-writable directory, or when the uid of the symlink and follower
719 * match, or when the directory owner matches the symlink's owner.
720 *
721 * Returns 0 if following the symlink is allowed, -ve on error.
722 */
723static inline int may_follow_link(struct path *link, struct nameidata *nd)
724{
725 const struct inode *inode;
726 const struct inode *parent;
727
728 if (!sysctl_protected_symlinks)
729 return 0;
730
731 /* Allowed if owner and follower match. */
732 inode = link->dentry->d_inode;
733 if (uid_eq(current_cred()->fsuid, inode->i_uid))
734 return 0;
735
736 /* Allowed if parent directory not sticky and world-writable. */
737 parent = nd->path.dentry->d_inode;
738 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
739 return 0;
740
741 /* Allowed if parent directory and link owner match. */
742 if (uid_eq(parent->i_uid, inode->i_uid))
743 return 0;
744
745 audit_log_link_denied("follow_link", link);
746 path_put_conditional(link, nd);
747 path_put(&nd->path);
748 return -EACCES;
749}
750
751/**
752 * safe_hardlink_source - Check for safe hardlink conditions
753 * @inode: the source inode to hardlink from
754 *
755 * Return false if at least one of the following conditions:
756 * - inode is not a regular file
757 * - inode is setuid
758 * - inode is setgid and group-exec
759 * - access failure for read and write
760 *
761 * Otherwise returns true.
762 */
763static bool safe_hardlink_source(struct inode *inode)
764{
765 umode_t mode = inode->i_mode;
766
767 /* Special files should not get pinned to the filesystem. */
768 if (!S_ISREG(mode))
769 return false;
770
771 /* Setuid files should not get pinned to the filesystem. */
772 if (mode & S_ISUID)
773 return false;
774
775 /* Executable setgid files should not get pinned to the filesystem. */
776 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
777 return false;
778
779 /* Hardlinking to unreadable or unwritable sources is dangerous. */
780 if (inode_permission(inode, MAY_READ | MAY_WRITE))
781 return false;
782
783 return true;
784}
785
786/**
787 * may_linkat - Check permissions for creating a hardlink
788 * @link: the source to hardlink from
789 *
790 * Block hardlink when all of:
791 * - sysctl_protected_hardlinks enabled
792 * - fsuid does not match inode
793 * - hardlink source is unsafe (see safe_hardlink_source() above)
794 * - not CAP_FOWNER
795 *
796 * Returns 0 if successful, -ve on error.
797 */
798static int may_linkat(struct path *link)
799{
800 const struct cred *cred;
801 struct inode *inode;
802
803 if (!sysctl_protected_hardlinks)
804 return 0;
805
806 cred = current_cred();
807 inode = link->dentry->d_inode;
808
809 /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
810 * otherwise, it must be a safe source.
811 */
812 if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
813 capable(CAP_FOWNER))
814 return 0;
815
816 audit_log_link_denied("linkat", link);
817 return -EPERM;
818}
819
820static __always_inline int
821follow_link(struct path *link, struct nameidata *nd, void **p)
822{
823 struct dentry *dentry = link->dentry;
824 int error;
825 char *s;
826
827 BUG_ON(nd->flags & LOOKUP_RCU);
828
829 if (link->mnt == nd->path.mnt)
830 mntget(link->mnt);
831
832 error = -ELOOP;
833 if (unlikely(current->total_link_count >= 40))
834 goto out_put_nd_path;
835
836 cond_resched();
837 current->total_link_count++;
838
839 touch_atime(link);
840 nd_set_link(nd, NULL);
841
842 error = security_inode_follow_link(link->dentry, nd);
843 if (error)
844 goto out_put_nd_path;
845
846 nd->last_type = LAST_BIND;
847 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
848 error = PTR_ERR(*p);
849 if (IS_ERR(*p))
850 goto out_put_nd_path;
851
852 error = 0;
853 s = nd_get_link(nd);
854 if (s) {
855 if (unlikely(IS_ERR(s))) {
856 path_put(&nd->path);
857 put_link(nd, link, *p);
858 return PTR_ERR(s);
859 }
860 if (*s == '/') {
861 if (!nd->root.mnt)
862 set_root(nd);
863 path_put(&nd->path);
864 nd->path = nd->root;
865 path_get(&nd->root);
866 nd->flags |= LOOKUP_JUMPED;
867 }
868 nd->inode = nd->path.dentry->d_inode;
869 error = link_path_walk(s, nd);
870 if (unlikely(error))
871 put_link(nd, link, *p);
872 }
873
874 return error;
875
876out_put_nd_path:
877 *p = NULL;
878 path_put(&nd->path);
879 path_put(link);
880 return error;
881}
882
883static int follow_up_rcu(struct path *path)
884{
885 struct mount *mnt = real_mount(path->mnt);
886 struct mount *parent;
887 struct dentry *mountpoint;
888
889 parent = mnt->mnt_parent;
890 if (&parent->mnt == path->mnt)
891 return 0;
892 mountpoint = mnt->mnt_mountpoint;
893 path->dentry = mountpoint;
894 path->mnt = &parent->mnt;
895 return 1;
896}
897
898/*
899 * follow_up - Find the mountpoint of path's vfsmount
900 *
901 * Given a path, find the mountpoint of its source file system.
902 * Replace @path with the path of the mountpoint in the parent mount.
903 * Up is towards /.
904 *
905 * Return 1 if we went up a level and 0 if we were already at the
906 * root.
907 */
908int follow_up(struct path *path)
909{
910 struct mount *mnt = real_mount(path->mnt);
911 struct mount *parent;
912 struct dentry *mountpoint;
913
914 read_seqlock_excl(&mount_lock);
915 parent = mnt->mnt_parent;
916 if (parent == mnt) {
917 read_sequnlock_excl(&mount_lock);
918 return 0;
919 }
920 mntget(&parent->mnt);
921 mountpoint = dget(mnt->mnt_mountpoint);
922 read_sequnlock_excl(&mount_lock);
923 dput(path->dentry);
924 path->dentry = mountpoint;
925 mntput(path->mnt);
926 path->mnt = &parent->mnt;
927 return 1;
928}
929EXPORT_SYMBOL(follow_up);
930
931/*
932 * Perform an automount
933 * - return -EISDIR to tell follow_managed() to stop and return the path we
934 * were called with.
935 */
936static int follow_automount(struct path *path, unsigned flags,
937 bool *need_mntput)
938{
939 struct vfsmount *mnt;
940 int err;
941
942 if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
943 return -EREMOTE;
944
945 /* We don't want to mount if someone's just doing a stat -
946 * unless they're stat'ing a directory and appended a '/' to
947 * the name.
948 *
949 * We do, however, want to mount if someone wants to open or
950 * create a file of any type under the mountpoint, wants to
951 * traverse through the mountpoint or wants to open the
952 * mounted directory. Also, autofs may mark negative dentries
953 * as being automount points. These will need the attentions
954 * of the daemon to instantiate them before they can be used.
955 */
956 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
957 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
958 path->dentry->d_inode)
959 return -EISDIR;
960
961 current->total_link_count++;
962 if (current->total_link_count >= 40)
963 return -ELOOP;
964
965 mnt = path->dentry->d_op->d_automount(path);
966 if (IS_ERR(mnt)) {
967 /*
968 * The filesystem is allowed to return -EISDIR here to indicate
969 * it doesn't want to automount. For instance, autofs would do
970 * this so that its userspace daemon can mount on this dentry.
971 *
972 * However, we can only permit this if it's a terminal point in
973 * the path being looked up; if it wasn't then the remainder of
974 * the path is inaccessible and we should say so.
975 */
976 if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
977 return -EREMOTE;
978 return PTR_ERR(mnt);
979 }
980
981 if (!mnt) /* mount collision */
982 return 0;
983
984 if (!*need_mntput) {
985 /* lock_mount() may release path->mnt on error */
986 mntget(path->mnt);
987 *need_mntput = true;
988 }
989 err = finish_automount(mnt, path);
990
991 switch (err) {
992 case -EBUSY:
993 /* Someone else made a mount here whilst we were busy */
994 return 0;
995 case 0:
996 path_put(path);
997 path->mnt = mnt;
998 path->dentry = dget(mnt->mnt_root);
999 return 0;
1000 default:
1001 return err;
1002 }
1003
1004}
1005
1006/*
1007 * Handle a dentry that is managed in some way.
1008 * - Flagged for transit management (autofs)
1009 * - Flagged as mountpoint
1010 * - Flagged as automount point
1011 *
1012 * This may only be called in refwalk mode.
1013 *
1014 * Serialization is taken care of in namespace.c
1015 */
1016static int follow_managed(struct path *path, unsigned flags)
1017{
1018 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
1019 unsigned managed;
1020 bool need_mntput = false;
1021 int ret = 0;
1022
1023 /* Given that we're not holding a lock here, we retain the value in a
1024 * local variable for each dentry as we look at it so that we don't see
1025 * the components of that value change under us */
1026 while (managed = ACCESS_ONCE(path->dentry->d_flags),
1027 managed &= DCACHE_MANAGED_DENTRY,
1028 unlikely(managed != 0)) {
1029 /* Allow the filesystem to manage the transit without i_mutex
1030 * being held. */
1031 if (managed & DCACHE_MANAGE_TRANSIT) {
1032 BUG_ON(!path->dentry->d_op);
1033 BUG_ON(!path->dentry->d_op->d_manage);
1034 ret = path->dentry->d_op->d_manage(path->dentry, false);
1035 if (ret < 0)
1036 break;
1037 }
1038
1039 /* Transit to a mounted filesystem. */
1040 if (managed & DCACHE_MOUNTED) {
1041 struct vfsmount *mounted = lookup_mnt(path);
1042 if (mounted) {
1043 dput(path->dentry);
1044 if (need_mntput)
1045 mntput(path->mnt);
1046 path->mnt = mounted;
1047 path->dentry = dget(mounted->mnt_root);
1048 need_mntput = true;
1049 continue;
1050 }
1051
1052 /* Something is mounted on this dentry in another
1053 * namespace and/or whatever was mounted there in this
1054 * namespace got unmounted before lookup_mnt() could
1055 * get it */
1056 }
1057
1058 /* Handle an automount point */
1059 if (managed & DCACHE_NEED_AUTOMOUNT) {
1060 ret = follow_automount(path, flags, &need_mntput);
1061 if (ret < 0)
1062 break;
1063 continue;
1064 }
1065
1066 /* We didn't change the current path point */
1067 break;
1068 }
1069
1070 if (need_mntput && path->mnt == mnt)
1071 mntput(path->mnt);
1072 if (ret == -EISDIR)
1073 ret = 0;
1074 return ret < 0 ? ret : need_mntput;
1075}
1076
1077int follow_down_one(struct path *path)
1078{
1079 struct vfsmount *mounted;
1080
1081 mounted = lookup_mnt(path);
1082 if (mounted) {
1083 dput(path->dentry);
1084 mntput(path->mnt);
1085 path->mnt = mounted;
1086 path->dentry = dget(mounted->mnt_root);
1087 return 1;
1088 }
1089 return 0;
1090}
1091EXPORT_SYMBOL(follow_down_one);
1092
1093static inline int managed_dentry_rcu(struct dentry *dentry)
1094{
1095 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
1096 dentry->d_op->d_manage(dentry, true) : 0;
1097}
1098
1099/*
1100 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
1101 * we meet a managed dentry that would need blocking.
1102 */
1103static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
1104 struct inode **inode)
1105{
1106 for (;;) {
1107 struct mount *mounted;
1108 /*
1109 * Don't forget we might have a non-mountpoint managed dentry
1110 * that wants to block transit.
1111 */
1112 switch (managed_dentry_rcu(path->dentry)) {
1113 case -ECHILD:
1114 default:
1115 return false;
1116 case -EISDIR:
1117 return true;
1118 case 0:
1119 break;
1120 }
1121
1122 if (!d_mountpoint(path->dentry))
1123 return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
1124
1125 mounted = __lookup_mnt(path->mnt, path->dentry);
1126 if (!mounted)
1127 break;
1128 path->mnt = &mounted->mnt;
1129 path->dentry = mounted->mnt.mnt_root;
1130 nd->flags |= LOOKUP_JUMPED;
1131 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
1132 /*
1133 * Update the inode too. We don't need to re-check the
1134 * dentry sequence number here after this d_inode read,
1135 * because a mount-point is always pinned.
1136 */
1137 *inode = path->dentry->d_inode;
1138 }
1139 return !read_seqretry(&mount_lock, nd->m_seq) &&
1140 !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
1141}
1142
1143static int follow_dotdot_rcu(struct nameidata *nd)
1144{
1145 if (!nd->root.mnt)
1146 set_root_rcu(nd);
1147
1148 while (1) {
1149 if (nd->path.dentry == nd->root.dentry &&
1150 nd->path.mnt == nd->root.mnt) {
1151 break;
1152 }
1153 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1154 struct dentry *old = nd->path.dentry;
1155 struct dentry *parent = old->d_parent;
1156 unsigned seq;
1157
1158 seq = read_seqcount_begin(&parent->d_seq);
1159 if (read_seqcount_retry(&old->d_seq, nd->seq))
1160 goto failed;
1161 nd->path.dentry = parent;
1162 nd->seq = seq;
1163 break;
1164 }
1165 if (!follow_up_rcu(&nd->path))
1166 break;
1167 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1168 }
1169 while (d_mountpoint(nd->path.dentry)) {
1170 struct mount *mounted;
1171 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
1172 if (!mounted)
1173 break;
1174 nd->path.mnt = &mounted->mnt;
1175 nd->path.dentry = mounted->mnt.mnt_root;
1176 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1177 if (read_seqretry(&mount_lock, nd->m_seq))
1178 goto failed;
1179 }
1180 nd->inode = nd->path.dentry->d_inode;
1181 return 0;
1182
1183failed:
1184 nd->flags &= ~LOOKUP_RCU;
1185 if (!(nd->flags & LOOKUP_ROOT))
1186 nd->root.mnt = NULL;
1187 rcu_read_unlock();
1188 return -ECHILD;
1189}
1190
1191/*
1192 * Follow down to the covering mount currently visible to userspace. At each
1193 * point, the filesystem owning that dentry may be queried as to whether the
1194 * caller is permitted to proceed or not.
1195 */
1196int follow_down(struct path *path)
1197{
1198 unsigned managed;
1199 int ret;
1200
1201 while (managed = ACCESS_ONCE(path->dentry->d_flags),
1202 unlikely(managed & DCACHE_MANAGED_DENTRY)) {
1203 /* Allow the filesystem to manage the transit without i_mutex
1204 * being held.
1205 *
1206 * We indicate to the filesystem if someone is trying to mount
1207 * something here. This gives autofs the chance to deny anyone
1208 * other than its daemon the right to mount on its
1209 * superstructure.
1210 *
1211 * The filesystem may sleep at this point.
1212 */
1213 if (managed & DCACHE_MANAGE_TRANSIT) {
1214 BUG_ON(!path->dentry->d_op);
1215 BUG_ON(!path->dentry->d_op->d_manage);
1216 ret = path->dentry->d_op->d_manage(
1217 path->dentry, false);
1218 if (ret < 0)
1219 return ret == -EISDIR ? 0 : ret;
1220 }
1221
1222 /* Transit to a mounted filesystem. */
1223 if (managed & DCACHE_MOUNTED) {
1224 struct vfsmount *mounted = lookup_mnt(path);
1225 if (!mounted)
1226 break;
1227 dput(path->dentry);
1228 mntput(path->mnt);
1229 path->mnt = mounted;
1230 path->dentry = dget(mounted->mnt_root);
1231 continue;
1232 }
1233
1234 /* Don't handle automount points here */
1235 break;
1236 }
1237 return 0;
1238}
1239EXPORT_SYMBOL(follow_down);
1240
1241/*
1242 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
1243 */
1244static void follow_mount(struct path *path)
1245{
1246 while (d_mountpoint(path->dentry)) {
1247 struct vfsmount *mounted = lookup_mnt(path);
1248 if (!mounted)
1249 break;
1250 dput(path->dentry);
1251 mntput(path->mnt);
1252 path->mnt = mounted;
1253 path->dentry = dget(mounted->mnt_root);
1254 }
1255}
1256
1257static void follow_dotdot(struct nameidata *nd)
1258{
1259 if (!nd->root.mnt)
1260 set_root(nd);
1261
1262 while(1) {
1263 struct dentry *old = nd->path.dentry;
1264
1265 if (nd->path.dentry == nd->root.dentry &&
1266 nd->path.mnt == nd->root.mnt) {
1267 break;
1268 }
1269 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1270 /* rare case of legitimate dget_parent()... */
1271 nd->path.dentry = dget_parent(nd->path.dentry);
1272 dput(old);
1273 break;
1274 }
1275 if (!follow_up(&nd->path))
1276 break;
1277 }
1278 follow_mount(&nd->path);
1279 nd->inode = nd->path.dentry->d_inode;
1280}
1281
1282/*
1283 * This looks up the name in dcache, possibly revalidates the old dentry and
1284 * allocates a new one if not found or not valid. In the need_lookup argument
1285 * returns whether i_op->lookup is necessary.
1286 *
1287 * dir->d_inode->i_mutex must be held
1288 */
1289static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
1290 unsigned int flags, bool *need_lookup)
1291{
1292 struct dentry *dentry;
1293 int error;
1294
1295 *need_lookup = false;
1296 dentry = d_lookup(dir, name);
1297 if (dentry) {
1298 if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
1299 error = d_revalidate(dentry, flags);
1300 if (unlikely(error <= 0)) {
1301 if (error < 0) {
1302 dput(dentry);
1303 return ERR_PTR(error);
1304 } else if (!d_invalidate(dentry)) {
1305 dput(dentry);
1306 dentry = NULL;
1307 }
1308 }
1309 }
1310 }
1311
1312 if (!dentry) {
1313 dentry = d_alloc(dir, name);
1314 if (unlikely(!dentry))
1315 return ERR_PTR(-ENOMEM);
1316
1317 *need_lookup = true;
1318 }
1319 return dentry;
1320}
1321
1322/*
1323 * Call i_op->lookup on the dentry. The dentry must be negative and
1324 * unhashed.
1325 *
1326 * dir->d_inode->i_mutex must be held
1327 */
1328static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1329 unsigned int flags)
1330{
1331 struct dentry *old;
1332
1333 /* Don't create child dentry for a dead directory. */
1334 if (unlikely(IS_DEADDIR(dir))) {
1335 dput(dentry);
1336 return ERR_PTR(-ENOENT);
1337 }
1338
1339 old = dir->i_op->lookup(dir, dentry, flags);
1340 if (unlikely(old)) {
1341 dput(dentry);
1342 dentry = old;
1343 }
1344 return dentry;
1345}
1346
1347static struct dentry *__lookup_hash(struct qstr *name,
1348 struct dentry *base, unsigned int flags)
1349{
1350 bool need_lookup;
1351 struct dentry *dentry;
1352
1353 dentry = lookup_dcache(name, base, flags, &need_lookup);
1354 if (!need_lookup)
1355 return dentry;
1356
1357 return lookup_real(base->d_inode, dentry, flags);
1358}
1359
1360/*
1361 * It's more convoluted than I'd like it to be, but... it's still fairly
1362 * small and for now I'd prefer to have fast path as straight as possible.
1363 * It _is_ time-critical.
1364 */
1365static int lookup_fast(struct nameidata *nd,
1366 struct path *path, struct inode **inode)
1367{
1368 struct vfsmount *mnt = nd->path.mnt;
1369 struct dentry *dentry, *parent = nd->path.dentry;
1370 int need_reval = 1;
1371 int status = 1;
1372 int err;
1373
1374 /*
1375 * Rename seqlock is not required here because in the off chance
1376 * of a false negative due to a concurrent rename, we're going to
1377 * do the non-racy lookup, below.
1378 */
1379 if (nd->flags & LOOKUP_RCU) {
1380 unsigned seq;
1381 dentry = __d_lookup_rcu(parent, &nd->last, &seq);
1382 if (!dentry)
1383 goto unlazy;
1384
1385 /*
1386 * This sequence count validates that the inode matches
1387 * the dentry name information from lookup.
1388 */
1389 *inode = dentry->d_inode;
1390 if (read_seqcount_retry(&dentry->d_seq, seq))
1391 return -ECHILD;
1392
1393 /*
1394 * This sequence count validates that the parent had no
1395 * changes while we did the lookup of the dentry above.
1396 *
1397 * The memory barrier in read_seqcount_begin of child is
1398 * enough, we can use __read_seqcount_retry here.
1399 */
1400 if (__read_seqcount_retry(&parent->d_seq, nd->seq))
1401 return -ECHILD;
1402 nd->seq = seq;
1403
1404 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1405 status = d_revalidate(dentry, nd->flags);
1406 if (unlikely(status <= 0)) {
1407 if (status != -ECHILD)
1408 need_reval = 0;
1409 goto unlazy;
1410 }
1411 }
1412 path->mnt = mnt;
1413 path->dentry = dentry;
1414 if (likely(__follow_mount_rcu(nd, path, inode)))
1415 return 0;
1416unlazy:
1417 if (unlazy_walk(nd, dentry))
1418 return -ECHILD;
1419 } else {
1420 dentry = __d_lookup(parent, &nd->last);
1421 }
1422
1423 if (unlikely(!dentry))
1424 goto need_lookup;
1425
1426 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
1427 status = d_revalidate(dentry, nd->flags);
1428 if (unlikely(status <= 0)) {
1429 if (status < 0) {
1430 dput(dentry);
1431 return status;
1432 }
1433 if (!d_invalidate(dentry)) {
1434 dput(dentry);
1435 goto need_lookup;
1436 }
1437 }
1438
1439 path->mnt = mnt;
1440 path->dentry = dentry;
1441 err = follow_managed(path, nd->flags);
1442 if (unlikely(err < 0)) {
1443 path_put_conditional(path, nd);
1444 return err;
1445 }
1446 if (err)
1447 nd->flags |= LOOKUP_JUMPED;
1448 *inode = path->dentry->d_inode;
1449 return 0;
1450
1451need_lookup:
1452 return 1;
1453}
1454
1455/* Fast lookup failed, do it the slow way */
1456static int lookup_slow(struct nameidata *nd, struct path *path)
1457{
1458 struct dentry *dentry, *parent;
1459 int err;
1460
1461 parent = nd->path.dentry;
1462 BUG_ON(nd->inode != parent->d_inode);
1463
1464 mutex_lock(&parent->d_inode->i_mutex);
1465 dentry = __lookup_hash(&nd->last, parent, nd->flags);
1466 mutex_unlock(&parent->d_inode->i_mutex);
1467 if (IS_ERR(dentry))
1468 return PTR_ERR(dentry);
1469 path->mnt = nd->path.mnt;
1470 path->dentry = dentry;
1471 err = follow_managed(path, nd->flags);
1472 if (unlikely(err < 0)) {
1473 path_put_conditional(path, nd);
1474 return err;
1475 }
1476 if (err)
1477 nd->flags |= LOOKUP_JUMPED;
1478 return 0;
1479}
1480
1481static inline int may_lookup(struct nameidata *nd)
1482{
1483 if (nd->flags & LOOKUP_RCU) {
1484 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1485 if (err != -ECHILD)
1486 return err;
1487 if (unlazy_walk(nd, NULL))
1488 return -ECHILD;
1489 }
1490 return inode_permission(nd->inode, MAY_EXEC);
1491}
1492
1493static inline int handle_dots(struct nameidata *nd, int type)
1494{
1495 if (type == LAST_DOTDOT) {
1496 if (nd->flags & LOOKUP_RCU) {
1497 if (follow_dotdot_rcu(nd))
1498 return -ECHILD;
1499 } else
1500 follow_dotdot(nd);
1501 }
1502 return 0;
1503}
1504
1505static void terminate_walk(struct nameidata *nd)
1506{
1507 if (!(nd->flags & LOOKUP_RCU)) {
1508 path_put(&nd->path);
1509 } else {
1510 nd->flags &= ~LOOKUP_RCU;
1511 if (!(nd->flags & LOOKUP_ROOT))
1512 nd->root.mnt = NULL;
1513 rcu_read_unlock();
1514 }
1515}
1516
1517/*
1518 * Do we need to follow links? We _really_ want to be able
1519 * to do this check without having to look at inode->i_op,
1520 * so we keep a cache of "no, this doesn't need follow_link"
1521 * for the common case.
1522 */
1523static inline int should_follow_link(struct dentry *dentry, int follow)
1524{
1525 return unlikely(d_is_symlink(dentry)) ? follow : 0;
1526}
1527
1528static inline int walk_component(struct nameidata *nd, struct path *path,
1529 int follow)
1530{
1531 struct inode *inode;
1532 int err;
1533 /*
1534 * "." and ".." are special - ".." especially so because it has
1535 * to be able to know about the current root directory and
1536 * parent relationships.
1537 */
1538 if (unlikely(nd->last_type != LAST_NORM))
1539 return handle_dots(nd, nd->last_type);
1540 err = lookup_fast(nd, path, &inode);
1541 if (unlikely(err)) {
1542 if (err < 0)
1543 goto out_err;
1544
1545 err = lookup_slow(nd, path);
1546 if (err < 0)
1547 goto out_err;
1548
1549 inode = path->dentry->d_inode;
1550 }
1551 err = -ENOENT;
1552 if (!inode || d_is_negative(path->dentry))
1553 goto out_path_put;
1554
1555 if (should_follow_link(path->dentry, follow)) {
1556 if (nd->flags & LOOKUP_RCU) {
1557 if (unlikely(unlazy_walk(nd, path->dentry))) {
1558 err = -ECHILD;
1559 goto out_err;
1560 }
1561 }
1562 BUG_ON(inode != path->dentry->d_inode);
1563 return 1;
1564 }
1565 path_to_nameidata(path, nd);
1566 nd->inode = inode;
1567 return 0;
1568
1569out_path_put:
1570 path_to_nameidata(path, nd);
1571out_err:
1572 terminate_walk(nd);
1573 return err;
1574}
1575
1576/*
1577 * This limits recursive symlink follows to 8, while
1578 * limiting consecutive symlinks to 40.
1579 *
1580 * Without that kind of total limit, nasty chains of consecutive
1581 * symlinks can cause almost arbitrarily long lookups.
1582 */
1583static inline int nested_symlink(struct path *path, struct nameidata *nd)
1584{
1585 int res;
1586
1587 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
1588 path_put_conditional(path, nd);
1589 path_put(&nd->path);
1590 return -ELOOP;
1591 }
1592 BUG_ON(nd->depth >= MAX_NESTED_LINKS);
1593
1594 nd->depth++;
1595 current->link_count++;
1596
1597 do {
1598 struct path link = *path;
1599 void *cookie;
1600
1601 res = follow_link(&link, nd, &cookie);
1602 if (res)
1603 break;
1604 res = walk_component(nd, path, LOOKUP_FOLLOW);
1605 put_link(nd, &link, cookie);
1606 } while (res > 0);
1607
1608 current->link_count--;
1609 nd->depth--;
1610 return res;
1611}
1612
1613/*
1614 * We can do the critical dentry name comparison and hashing
1615 * operations one word at a time, but we are limited to:
1616 *
1617 * - Architectures with fast unaligned word accesses. We could
1618 * do a "get_unaligned()" if this helps and is sufficiently
1619 * fast.
1620 *
1621 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1622 * do not trap on the (extremely unlikely) case of a page
1623 * crossing operation.
1624 *
1625 * - Furthermore, we need an efficient 64-bit compile for the
1626 * 64-bit case in order to generate the "number of bytes in
1627 * the final mask". Again, that could be replaced with a
1628 * efficient population count instruction or similar.
1629 */
1630#ifdef CONFIG_DCACHE_WORD_ACCESS
1631
1632#include <asm/word-at-a-time.h>
1633
1634#ifdef CONFIG_64BIT
1635
1636static inline unsigned int fold_hash(unsigned long hash)
1637{
1638 hash += hash >> (8*sizeof(int));
1639 return hash;
1640}
1641
1642#else /* 32-bit case */
1643
1644#define fold_hash(x) (x)
1645
1646#endif
1647
1648unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1649{
1650 unsigned long a, mask;
1651 unsigned long hash = 0;
1652
1653 for (;;) {
1654 a = load_unaligned_zeropad(name);
1655 if (len < sizeof(unsigned long))
1656 break;
1657 hash += a;
1658 hash *= 9;
1659 name += sizeof(unsigned long);
1660 len -= sizeof(unsigned long);
1661 if (!len)
1662 goto done;
1663 }
1664 mask = bytemask_from_count(len);
1665 hash += mask & a;
1666done:
1667 return fold_hash(hash);
1668}
1669EXPORT_SYMBOL(full_name_hash);
1670
1671/*
1672 * Calculate the length and hash of the path component, and
1673 * return the length of the component;
1674 */
1675static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1676{
1677 unsigned long a, b, adata, bdata, mask, hash, len;
1678 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1679
1680 hash = a = 0;
1681 len = -sizeof(unsigned long);
1682 do {
1683 hash = (hash + a) * 9;
1684 len += sizeof(unsigned long);
1685 a = load_unaligned_zeropad(name+len);
1686 b = a ^ REPEAT_BYTE('/');
1687 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
1688
1689 adata = prep_zero_mask(a, adata, &constants);
1690 bdata = prep_zero_mask(b, bdata, &constants);
1691
1692 mask = create_zero_mask(adata | bdata);
1693
1694 hash += a & zero_bytemask(mask);
1695 *hashp = fold_hash(hash);
1696
1697 return len + find_zero(mask);
1698}
1699
1700#else
1701
1702unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1703{
1704 unsigned long hash = init_name_hash();
1705 while (len--)
1706 hash = partial_name_hash(*name++, hash);
1707 return end_name_hash(hash);
1708}
1709EXPORT_SYMBOL(full_name_hash);
1710
1711/*
1712 * We know there's a real path component here of at least
1713 * one character.
1714 */
1715static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1716{
1717 unsigned long hash = init_name_hash();
1718 unsigned long len = 0, c;
1719
1720 c = (unsigned char)*name;
1721 do {
1722 len++;
1723 hash = partial_name_hash(c, hash);
1724 c = (unsigned char)name[len];
1725 } while (c && c != '/');
1726 *hashp = end_name_hash(hash);
1727 return len;
1728}
1729
1730#endif
1731
1732/*
1733 * Name resolution.
1734 * This is the basic name resolution function, turning a pathname into
1735 * the final dentry. We expect 'base' to be positive and a directory.
1736 *
1737 * Returns 0 and nd will have valid dentry and mnt on success.
1738 * Returns error and drops reference to input namei data on failure.
1739 */
1740static int link_path_walk(const char *name, struct nameidata *nd)
1741{
1742 struct path next;
1743 int err;
1744
1745 while (*name=='/')
1746 name++;
1747 if (!*name)
1748 return 0;
1749
1750 /* At this point we know we have a real path component. */
1751 for(;;) {
1752 struct qstr this;
1753 long len;
1754 int type;
1755
1756 err = may_lookup(nd);
1757 if (err)
1758 break;
1759
1760 len = hash_name(name, &this.hash);
1761 this.name = name;
1762 this.len = len;
1763
1764 type = LAST_NORM;
1765 if (name[0] == '.') switch (len) {
1766 case 2:
1767 if (name[1] == '.') {
1768 type = LAST_DOTDOT;
1769 nd->flags |= LOOKUP_JUMPED;
1770 }
1771 break;
1772 case 1:
1773 type = LAST_DOT;
1774 }
1775 if (likely(type == LAST_NORM)) {
1776 struct dentry *parent = nd->path.dentry;
1777 nd->flags &= ~LOOKUP_JUMPED;
1778 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
1779 err = parent->d_op->d_hash(parent, &this);
1780 if (err < 0)
1781 break;
1782 }
1783 }
1784
1785 nd->last = this;
1786 nd->last_type = type;
1787
1788 if (!name[len])
1789 return 0;
1790 /*
1791 * If it wasn't NUL, we know it was '/'. Skip that
1792 * slash, and continue until no more slashes.
1793 */
1794 do {
1795 len++;
1796 } while (unlikely(name[len] == '/'));
1797 if (!name[len])
1798 return 0;
1799
1800 name += len;
1801
1802 err = walk_component(nd, &next, LOOKUP_FOLLOW);
1803 if (err < 0)
1804 return err;
1805
1806 if (err) {
1807 err = nested_symlink(&next, nd);
1808 if (err)
1809 return err;
1810 }
1811 if (!d_can_lookup(nd->path.dentry)) {
1812 err = -ENOTDIR;
1813 break;
1814 }
1815 }
1816 terminate_walk(nd);
1817 return err;
1818}
1819
1820static int path_init(int dfd, const char *name, unsigned int flags,
1821 struct nameidata *nd, struct file **fp)
1822{
1823 int retval = 0;
1824
1825 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1826 nd->flags = flags | LOOKUP_JUMPED;
1827 nd->depth = 0;
1828 if (flags & LOOKUP_ROOT) {
1829 struct dentry *root = nd->root.dentry;
1830 struct inode *inode = root->d_inode;
1831 if (*name) {
1832 if (!d_can_lookup(root))
1833 return -ENOTDIR;
1834 retval = inode_permission(inode, MAY_EXEC);
1835 if (retval)
1836 return retval;
1837 }
1838 nd->path = nd->root;
1839 nd->inode = inode;
1840 if (flags & LOOKUP_RCU) {
1841 rcu_read_lock();
1842 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1843 nd->m_seq = read_seqbegin(&mount_lock);
1844 } else {
1845 path_get(&nd->path);
1846 }
1847 return 0;
1848 }
1849
1850 nd->root.mnt = NULL;
1851
1852 nd->m_seq = read_seqbegin(&mount_lock);
1853 if (*name=='/') {
1854 if (flags & LOOKUP_RCU) {
1855 rcu_read_lock();
1856 nd->seq = set_root_rcu(nd);
1857 } else {
1858 set_root(nd);
1859 path_get(&nd->root);
1860 }
1861 nd->path = nd->root;
1862 } else if (dfd == AT_FDCWD) {
1863 if (flags & LOOKUP_RCU) {
1864 struct fs_struct *fs = current->fs;
1865 unsigned seq;
1866
1867 rcu_read_lock();
1868
1869 do {
1870 seq = read_seqcount_begin(&fs->seq);
1871 nd->path = fs->pwd;
1872 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1873 } while (read_seqcount_retry(&fs->seq, seq));
1874 } else {
1875 get_fs_pwd(current->fs, &nd->path);
1876 }
1877 } else {
1878 /* Caller must check execute permissions on the starting path component */
1879 struct fd f = fdget_raw(dfd);
1880 struct dentry *dentry;
1881
1882 if (!f.file)
1883 return -EBADF;
1884
1885 dentry = f.file->f_path.dentry;
1886
1887 if (*name) {
1888 if (!d_can_lookup(dentry)) {
1889 fdput(f);
1890 return -ENOTDIR;
1891 }
1892 }
1893
1894 nd->path = f.file->f_path;
1895 if (flags & LOOKUP_RCU) {
1896 if (f.flags & FDPUT_FPUT)
1897 *fp = f.file;
1898 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1899 rcu_read_lock();
1900 } else {
1901 path_get(&nd->path);
1902 fdput(f);
1903 }
1904 }
1905
1906 nd->inode = nd->path.dentry->d_inode;
1907 return 0;
1908}
1909
1910static inline int lookup_last(struct nameidata *nd, struct path *path)
1911{
1912 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
1913 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
1914
1915 nd->flags &= ~LOOKUP_PARENT;
1916 return walk_component(nd, path, nd->flags & LOOKUP_FOLLOW);
1917}
1918
1919/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
1920static int path_lookupat(int dfd, const char *name,
1921 unsigned int flags, struct nameidata *nd)
1922{
1923 struct file *base = NULL;
1924 struct path path;
1925 int err;
1926
1927 /*
1928 * Path walking is largely split up into 2 different synchronisation
1929 * schemes, rcu-walk and ref-walk (explained in
1930 * Documentation/filesystems/path-lookup.txt). These share much of the
1931 * path walk code, but some things particularly setup, cleanup, and
1932 * following mounts are sufficiently divergent that functions are
1933 * duplicated. Typically there is a function foo(), and its RCU
1934 * analogue, foo_rcu().
1935 *
1936 * -ECHILD is the error number of choice (just to avoid clashes) that
1937 * is returned if some aspect of an rcu-walk fails. Such an error must
1938 * be handled by restarting a traditional ref-walk (which will always
1939 * be able to complete).
1940 */
1941 err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
1942
1943 if (unlikely(err))
1944 return err;
1945
1946 current->total_link_count = 0;
1947 err = link_path_walk(name, nd);
1948
1949 if (!err && !(flags & LOOKUP_PARENT)) {
1950 err = lookup_last(nd, &path);
1951 while (err > 0) {
1952 void *cookie;
1953 struct path link = path;
1954 err = may_follow_link(&link, nd);
1955 if (unlikely(err))
1956 break;
1957 nd->flags |= LOOKUP_PARENT;
1958 err = follow_link(&link, nd, &cookie);
1959 if (err)
1960 break;
1961 err = lookup_last(nd, &path);
1962 put_link(nd, &link, cookie);
1963 }
1964 }
1965
1966 if (!err)
1967 err = complete_walk(nd);
1968
1969 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1970 if (!d_can_lookup(nd->path.dentry)) {
1971 path_put(&nd->path);
1972 err = -ENOTDIR;
1973 }
1974 }
1975
1976 if (base)
1977 fput(base);
1978
1979 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
1980 path_put(&nd->root);
1981 nd->root.mnt = NULL;
1982 }
1983 return err;
1984}
1985
1986static int filename_lookup(int dfd, struct filename *name,
1987 unsigned int flags, struct nameidata *nd)
1988{
1989 int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd);
1990 if (unlikely(retval == -ECHILD))
1991 retval = path_lookupat(dfd, name->name, flags, nd);
1992 if (unlikely(retval == -ESTALE))
1993 retval = path_lookupat(dfd, name->name,
1994 flags | LOOKUP_REVAL, nd);
1995
1996 if (likely(!retval))
1997 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
1998 return retval;
1999}
2000
2001static int do_path_lookup(int dfd, const char *name,
2002 unsigned int flags, struct nameidata *nd)
2003{
2004 struct filename filename = { .name = name };
2005
2006 return filename_lookup(dfd, &filename, flags, nd);
2007}
2008
2009/* does lookup, returns the object with parent locked */
2010struct dentry *kern_path_locked(const char *name, struct path *path)
2011{
2012 struct nameidata nd;
2013 struct dentry *d;
2014 int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd);
2015 if (err)
2016 return ERR_PTR(err);
2017 if (nd.last_type != LAST_NORM) {
2018 path_put(&nd.path);
2019 return ERR_PTR(-EINVAL);
2020 }
2021 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2022 d = __lookup_hash(&nd.last, nd.path.dentry, 0);
2023 if (IS_ERR(d)) {
2024 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2025 path_put(&nd.path);
2026 return d;
2027 }
2028 *path = nd.path;
2029 return d;
2030}
2031
2032int kern_path(const char *name, unsigned int flags, struct path *path)
2033{
2034 struct nameidata nd;
2035 int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
2036 if (!res)
2037 *path = nd.path;
2038 return res;
2039}
2040EXPORT_SYMBOL(kern_path);
2041
2042/**
2043 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
2044 * @dentry: pointer to dentry of the base directory
2045 * @mnt: pointer to vfs mount of the base directory
2046 * @name: pointer to file name
2047 * @flags: lookup flags
2048 * @path: pointer to struct path to fill
2049 */
2050int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
2051 const char *name, unsigned int flags,
2052 struct path *path)
2053{
2054 struct nameidata nd;
2055 int err;
2056 nd.root.dentry = dentry;
2057 nd.root.mnt = mnt;
2058 BUG_ON(flags & LOOKUP_PARENT);
2059 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
2060 err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
2061 if (!err)
2062 *path = nd.path;
2063 return err;
2064}
2065EXPORT_SYMBOL(vfs_path_lookup);
2066
2067/*
2068 * Restricted form of lookup. Doesn't follow links, single-component only,
2069 * needs parent already locked. Doesn't follow mounts.
2070 * SMP-safe.
2071 */
2072static struct dentry *lookup_hash(struct nameidata *nd)
2073{
2074 return __lookup_hash(&nd->last, nd->path.dentry, nd->flags);
2075}
2076
2077/**
2078 * lookup_one_len - filesystem helper to lookup single pathname component
2079 * @name: pathname component to lookup
2080 * @base: base directory to lookup from
2081 * @len: maximum length @len should be interpreted to
2082 *
2083 * Note that this routine is purely a helper for filesystem usage and should
2084 * not be called by generic code. Also note that by using this function the
2085 * nameidata argument is passed to the filesystem methods and a filesystem
2086 * using this helper needs to be prepared for that.
2087 */
2088struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
2089{
2090 struct qstr this;
2091 unsigned int c;
2092 int err;
2093
2094 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
2095
2096 this.name = name;
2097 this.len = len;
2098 this.hash = full_name_hash(name, len);
2099 if (!len)
2100 return ERR_PTR(-EACCES);
2101
2102 if (unlikely(name[0] == '.')) {
2103 if (len < 2 || (len == 2 && name[1] == '.'))
2104 return ERR_PTR(-EACCES);
2105 }
2106
2107 while (len--) {
2108 c = *(const unsigned char *)name++;
2109 if (c == '/' || c == '\0')
2110 return ERR_PTR(-EACCES);
2111 }
2112 /*
2113 * See if the low-level filesystem might want
2114 * to use its own hash..
2115 */
2116 if (base->d_flags & DCACHE_OP_HASH) {
2117 int err = base->d_op->d_hash(base, &this);
2118 if (err < 0)
2119 return ERR_PTR(err);
2120 }
2121
2122 err = inode_permission(base->d_inode, MAY_EXEC);
2123 if (err)
2124 return ERR_PTR(err);
2125
2126 return __lookup_hash(&this, base, 0);
2127}
2128EXPORT_SYMBOL(lookup_one_len);
2129
2130int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
2131 struct path *path, int *empty)
2132{
2133 struct nameidata nd;
2134 struct filename *tmp = getname_flags(name, flags, empty);
2135 int err = PTR_ERR(tmp);
2136 if (!IS_ERR(tmp)) {
2137
2138 BUG_ON(flags & LOOKUP_PARENT);
2139
2140 err = filename_lookup(dfd, tmp, flags, &nd);
2141 putname(tmp);
2142 if (!err)
2143 *path = nd.path;
2144 }
2145 return err;
2146}
2147
2148int user_path_at(int dfd, const char __user *name, unsigned flags,
2149 struct path *path)
2150{
2151 return user_path_at_empty(dfd, name, flags, path, NULL);
2152}
2153EXPORT_SYMBOL(user_path_at);
2154
2155/*
2156 * NB: most callers don't do anything directly with the reference to the
2157 * to struct filename, but the nd->last pointer points into the name string
2158 * allocated by getname. So we must hold the reference to it until all
2159 * path-walking is complete.
2160 */
2161static struct filename *
2162user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
2163 unsigned int flags)
2164{
2165 struct filename *s = getname(path);
2166 int error;
2167
2168 /* only LOOKUP_REVAL is allowed in extra flags */
2169 flags &= LOOKUP_REVAL;
2170
2171 if (IS_ERR(s))
2172 return s;
2173
2174 error = filename_lookup(dfd, s, flags | LOOKUP_PARENT, nd);
2175 if (error) {
2176 putname(s);
2177 return ERR_PTR(error);
2178 }
2179
2180 return s;
2181}
2182
2183/**
2184 * mountpoint_last - look up last component for umount
2185 * @nd: pathwalk nameidata - currently pointing at parent directory of "last"
2186 * @path: pointer to container for result
2187 *
2188 * This is a special lookup_last function just for umount. In this case, we
2189 * need to resolve the path without doing any revalidation.
2190 *
2191 * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
2192 * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
2193 * in almost all cases, this lookup will be served out of the dcache. The only
2194 * cases where it won't are if nd->last refers to a symlink or the path is
2195 * bogus and it doesn't exist.
2196 *
2197 * Returns:
2198 * -error: if there was an error during lookup. This includes -ENOENT if the
2199 * lookup found a negative dentry. The nd->path reference will also be
2200 * put in this case.
2201 *
2202 * 0: if we successfully resolved nd->path and found it to not to be a
2203 * symlink that needs to be followed. "path" will also be populated.
2204 * The nd->path reference will also be put.
2205 *
2206 * 1: if we successfully resolved nd->last and found it to be a symlink
2207 * that needs to be followed. "path" will be populated with the path
2208 * to the link, and nd->path will *not* be put.
2209 */
2210static int
2211mountpoint_last(struct nameidata *nd, struct path *path)
2212{
2213 int error = 0;
2214 struct dentry *dentry;
2215 struct dentry *dir = nd->path.dentry;
2216
2217 /* If we're in rcuwalk, drop out of it to handle last component */
2218 if (nd->flags & LOOKUP_RCU) {
2219 if (unlazy_walk(nd, NULL)) {
2220 error = -ECHILD;
2221 goto out;
2222 }
2223 }
2224
2225 nd->flags &= ~LOOKUP_PARENT;
2226
2227 if (unlikely(nd->last_type != LAST_NORM)) {
2228 error = handle_dots(nd, nd->last_type);
2229 if (error)
2230 goto out;
2231 dentry = dget(nd->path.dentry);
2232 goto done;
2233 }
2234
2235 mutex_lock(&dir->d_inode->i_mutex);
2236 dentry = d_lookup(dir, &nd->last);
2237 if (!dentry) {
2238 /*
2239 * No cached dentry. Mounted dentries are pinned in the cache,
2240 * so that means that this dentry is probably a symlink or the
2241 * path doesn't actually point to a mounted dentry.
2242 */
2243 dentry = d_alloc(dir, &nd->last);
2244 if (!dentry) {
2245 error = -ENOMEM;
2246 mutex_unlock(&dir->d_inode->i_mutex);
2247 goto out;
2248 }
2249 dentry = lookup_real(dir->d_inode, dentry, nd->flags);
2250 error = PTR_ERR(dentry);
2251 if (IS_ERR(dentry)) {
2252 mutex_unlock(&dir->d_inode->i_mutex);
2253 goto out;
2254 }
2255 }
2256 mutex_unlock(&dir->d_inode->i_mutex);
2257
2258done:
2259 if (!dentry->d_inode || d_is_negative(dentry)) {
2260 error = -ENOENT;
2261 dput(dentry);
2262 goto out;
2263 }
2264 path->dentry = dentry;
2265 path->mnt = nd->path.mnt;
2266 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
2267 return 1;
2268 mntget(path->mnt);
2269 follow_mount(path);
2270 error = 0;
2271out:
2272 terminate_walk(nd);
2273 return error;
2274}
2275
2276/**
2277 * path_mountpoint - look up a path to be umounted
2278 * @dfd: directory file descriptor to start walk from
2279 * @name: full pathname to walk
2280 * @path: pointer to container for result
2281 * @flags: lookup flags
2282 *
2283 * Look up the given name, but don't attempt to revalidate the last component.
2284 * Returns 0 and "path" will be valid on success; Returns error otherwise.
2285 */
2286static int
2287path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
2288{
2289 struct file *base = NULL;
2290 struct nameidata nd;
2291 int err;
2292
2293 err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
2294 if (unlikely(err))
2295 return err;
2296
2297 current->total_link_count = 0;
2298 err = link_path_walk(name, &nd);
2299 if (err)
2300 goto out;
2301
2302 err = mountpoint_last(&nd, path);
2303 while (err > 0) {
2304 void *cookie;
2305 struct path link = *path;
2306 err = may_follow_link(&link, &nd);
2307 if (unlikely(err))
2308 break;
2309 nd.flags |= LOOKUP_PARENT;
2310 err = follow_link(&link, &nd, &cookie);
2311 if (err)
2312 break;
2313 err = mountpoint_last(&nd, path);
2314 put_link(&nd, &link, cookie);
2315 }
2316out:
2317 if (base)
2318 fput(base);
2319
2320 if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
2321 path_put(&nd.root);
2322
2323 return err;
2324}
2325
2326static int
2327filename_mountpoint(int dfd, struct filename *s, struct path *path,
2328 unsigned int flags)
2329{
2330 int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
2331 if (unlikely(error == -ECHILD))
2332 error = path_mountpoint(dfd, s->name, path, flags);
2333 if (unlikely(error == -ESTALE))
2334 error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
2335 if (likely(!error))
2336 audit_inode(s, path->dentry, 0);
2337 return error;
2338}
2339
2340/**
2341 * user_path_mountpoint_at - lookup a path from userland in order to umount it
2342 * @dfd: directory file descriptor
2343 * @name: pathname from userland
2344 * @flags: lookup flags
2345 * @path: pointer to container to hold result
2346 *
2347 * A umount is a special case for path walking. We're not actually interested
2348 * in the inode in this situation, and ESTALE errors can be a problem. We
2349 * simply want track down the dentry and vfsmount attached at the mountpoint
2350 * and avoid revalidating the last component.
2351 *
2352 * Returns 0 and populates "path" on success.
2353 */
2354int
2355user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
2356 struct path *path)
2357{
2358 struct filename *s = getname(name);
2359 int error;
2360 if (IS_ERR(s))
2361 return PTR_ERR(s);
2362 error = filename_mountpoint(dfd, s, path, flags);
2363 putname(s);
2364 return error;
2365}
2366
2367int
2368kern_path_mountpoint(int dfd, const char *name, struct path *path,
2369 unsigned int flags)
2370{
2371 struct filename s = {.name = name};
2372 return filename_mountpoint(dfd, &s, path, flags);
2373}
2374EXPORT_SYMBOL(kern_path_mountpoint);
2375
2376/*
2377 * It's inline, so penalty for filesystems that don't use sticky bit is
2378 * minimal.
2379 */
2380static inline int check_sticky(struct inode *dir, struct inode *inode)
2381{
2382 kuid_t fsuid = current_fsuid();
2383
2384 if (!(dir->i_mode & S_ISVTX))
2385 return 0;
2386 if (uid_eq(inode->i_uid, fsuid))
2387 return 0;
2388 if (uid_eq(dir->i_uid, fsuid))
2389 return 0;
2390 return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
2391}
2392
2393/*
2394 * Check whether we can remove a link victim from directory dir, check
2395 * whether the type of victim is right.
2396 * 1. We can't do it if dir is read-only (done in permission())
2397 * 2. We should have write and exec permissions on dir
2398 * 3. We can't remove anything from append-only dir
2399 * 4. We can't do anything with immutable dir (done in permission())
2400 * 5. If the sticky bit on dir is set we should either
2401 * a. be owner of dir, or
2402 * b. be owner of victim, or
2403 * c. have CAP_FOWNER capability
2404 * 6. If the victim is append-only or immutable we can't do antyhing with
2405 * links pointing to it.
2406 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
2407 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
2408 * 9. We can't remove a root or mountpoint.
2409 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
2410 * nfs_async_unlink().
2411 */
2412static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
2413{
2414 struct inode *inode = victim->d_inode;
2415 int error;
2416
2417 if (d_is_negative(victim))
2418 return -ENOENT;
2419 BUG_ON(!inode);
2420
2421 BUG_ON(victim->d_parent->d_inode != dir);
2422 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
2423
2424 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
2425 if (error)
2426 return error;
2427 if (IS_APPEND(dir))
2428 return -EPERM;
2429
2430 if (check_sticky(dir, inode) || IS_APPEND(inode) ||
2431 IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
2432 return -EPERM;
2433 if (isdir) {
2434 if (!d_is_dir(victim))
2435 return -ENOTDIR;
2436 if (IS_ROOT(victim))
2437 return -EBUSY;
2438 } else if (d_is_dir(victim))
2439 return -EISDIR;
2440 if (IS_DEADDIR(dir))
2441 return -ENOENT;
2442 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
2443 return -EBUSY;
2444 return 0;
2445}
2446
2447/* Check whether we can create an object with dentry child in directory
2448 * dir.
2449 * 1. We can't do it if child already exists (open has special treatment for
2450 * this case, but since we are inlined it's OK)
2451 * 2. We can't do it if dir is read-only (done in permission())
2452 * 3. We should have write and exec permissions on dir
2453 * 4. We can't do it if dir is immutable (done in permission())
2454 */
2455static inline int may_create(struct inode *dir, struct dentry *child)
2456{
2457 audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
2458 if (child->d_inode)
2459 return -EEXIST;
2460 if (IS_DEADDIR(dir))
2461 return -ENOENT;
2462 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
2463}
2464
2465/*
2466 * p1 and p2 should be directories on the same fs.
2467 */
2468struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2469{
2470 struct dentry *p;
2471
2472 if (p1 == p2) {
2473 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2474 return NULL;
2475 }
2476
2477 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2478
2479 p = d_ancestor(p2, p1);
2480 if (p) {
2481 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
2482 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
2483 return p;
2484 }
2485
2486 p = d_ancestor(p1, p2);
2487 if (p) {
2488 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2489 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2490 return p;
2491 }
2492
2493 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2494 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2495 return NULL;
2496}
2497EXPORT_SYMBOL(lock_rename);
2498
2499void unlock_rename(struct dentry *p1, struct dentry *p2)
2500{
2501 mutex_unlock(&p1->d_inode->i_mutex);
2502 if (p1 != p2) {
2503 mutex_unlock(&p2->d_inode->i_mutex);
2504 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2505 }
2506}
2507EXPORT_SYMBOL(unlock_rename);
2508
2509int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2510 bool want_excl)
2511{
2512 int error = may_create(dir, dentry);
2513 if (error)
2514 return error;
2515
2516 if (!dir->i_op->create)
2517 return -EACCES; /* shouldn't it be ENOSYS? */
2518 mode &= S_IALLUGO;
2519 mode |= S_IFREG;
2520 error = security_inode_create(dir, dentry, mode);
2521 if (error)
2522 return error;
2523 error = dir->i_op->create(dir, dentry, mode, want_excl);
2524 if (!error)
2525 fsnotify_create(dir, dentry);
2526 return error;
2527}
2528EXPORT_SYMBOL(vfs_create);
2529
2530static int may_open(struct path *path, int acc_mode, int flag)
2531{
2532 struct dentry *dentry = path->dentry;
2533 struct inode *inode = dentry->d_inode;
2534 int error;
2535
2536 /* O_PATH? */
2537 if (!acc_mode)
2538 return 0;
2539
2540 if (!inode)
2541 return -ENOENT;
2542
2543 switch (inode->i_mode & S_IFMT) {
2544 case S_IFLNK:
2545 return -ELOOP;
2546 case S_IFDIR:
2547 if (acc_mode & MAY_WRITE)
2548 return -EISDIR;
2549 break;
2550 case S_IFBLK:
2551 case S_IFCHR:
2552 if (path->mnt->mnt_flags & MNT_NODEV)
2553 return -EACCES;
2554 /*FALLTHRU*/
2555 case S_IFIFO:
2556 case S_IFSOCK:
2557 flag &= ~O_TRUNC;
2558 break;
2559 }
2560
2561 error = inode_permission(inode, acc_mode);
2562 if (error)
2563 return error;
2564
2565 /*
2566 * An append-only file must be opened in append mode for writing.
2567 */
2568 if (IS_APPEND(inode)) {
2569 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
2570 return -EPERM;
2571 if (flag & O_TRUNC)
2572 return -EPERM;
2573 }
2574
2575 /* O_NOATIME can only be set by the owner or superuser */
2576 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
2577 return -EPERM;
2578
2579 return 0;
2580}
2581
2582static int handle_truncate(struct file *filp)
2583{
2584 struct path *path = &filp->f_path;
2585 struct inode *inode = path->dentry->d_inode;
2586 int error = get_write_access(inode);
2587 if (error)
2588 return error;
2589 /*
2590 * Refuse to truncate files with mandatory locks held on them.
2591 */
2592 error = locks_verify_locked(filp);
2593 if (!error)
2594 error = security_path_truncate(path);
2595 if (!error) {
2596 error = do_truncate(path->dentry, 0,
2597 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
2598 filp);
2599 }
2600 put_write_access(inode);
2601 return error;
2602}
2603
2604static inline int open_to_namei_flags(int flag)
2605{
2606 if ((flag & O_ACCMODE) == 3)
2607 flag--;
2608 return flag;
2609}
2610
2611static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode)
2612{
2613 int error = security_path_mknod(dir, dentry, mode, 0);
2614 if (error)
2615 return error;
2616
2617 error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
2618 if (error)
2619 return error;
2620
2621 return security_inode_create(dir->dentry->d_inode, dentry, mode);
2622}
2623
2624/*
2625 * Attempt to atomically look up, create and open a file from a negative
2626 * dentry.
2627 *
2628 * Returns 0 if successful. The file will have been created and attached to
2629 * @file by the filesystem calling finish_open().
2630 *
2631 * Returns 1 if the file was looked up only or didn't need creating. The
2632 * caller will need to perform the open themselves. @path will have been
2633 * updated to point to the new dentry. This may be negative.
2634 *
2635 * Returns an error code otherwise.
2636 */
2637static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2638 struct path *path, struct file *file,
2639 const struct open_flags *op,
2640 bool got_write, bool need_lookup,
2641 int *opened)
2642{
2643 struct inode *dir = nd->path.dentry->d_inode;
2644 unsigned open_flag = open_to_namei_flags(op->open_flag);
2645 umode_t mode;
2646 int error;
2647 int acc_mode;
2648 int create_error = 0;
2649 struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
2650 bool excl;
2651
2652 BUG_ON(dentry->d_inode);
2653
2654 /* Don't create child dentry for a dead directory. */
2655 if (unlikely(IS_DEADDIR(dir))) {
2656 error = -ENOENT;
2657 goto out;
2658 }
2659
2660 mode = op->mode;
2661 if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
2662 mode &= ~current_umask();
2663
2664 excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
2665 if (excl)
2666 open_flag &= ~O_TRUNC;
2667
2668 /*
2669 * Checking write permission is tricky, bacuse we don't know if we are
2670 * going to actually need it: O_CREAT opens should work as long as the
2671 * file exists. But checking existence breaks atomicity. The trick is
2672 * to check access and if not granted clear O_CREAT from the flags.
2673 *
2674 * Another problem is returing the "right" error value (e.g. for an
2675 * O_EXCL open we want to return EEXIST not EROFS).
2676 */
2677 if (((open_flag & (O_CREAT | O_TRUNC)) ||
2678 (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) {
2679 if (!(open_flag & O_CREAT)) {
2680 /*
2681 * No O_CREATE -> atomicity not a requirement -> fall
2682 * back to lookup + open
2683 */
2684 goto no_open;
2685 } else if (open_flag & (O_EXCL | O_TRUNC)) {
2686 /* Fall back and fail with the right error */
2687 create_error = -EROFS;
2688 goto no_open;
2689 } else {
2690 /* No side effects, safe to clear O_CREAT */
2691 create_error = -EROFS;
2692 open_flag &= ~O_CREAT;
2693 }
2694 }
2695
2696 if (open_flag & O_CREAT) {
2697 error = may_o_create(&nd->path, dentry, mode);
2698 if (error) {
2699 create_error = error;
2700 if (open_flag & O_EXCL)
2701 goto no_open;
2702 open_flag &= ~O_CREAT;
2703 }
2704 }
2705
2706 if (nd->flags & LOOKUP_DIRECTORY)
2707 open_flag |= O_DIRECTORY;
2708
2709 file->f_path.dentry = DENTRY_NOT_SET;
2710 file->f_path.mnt = nd->path.mnt;
2711 error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode,
2712 opened);
2713 if (error < 0) {
2714 if (create_error && error == -ENOENT)
2715 error = create_error;
2716 goto out;
2717 }
2718
2719 if (error) { /* returned 1, that is */
2720 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
2721 error = -EIO;
2722 goto out;
2723 }
2724 if (file->f_path.dentry) {
2725 dput(dentry);
2726 dentry = file->f_path.dentry;
2727 }
2728 if (*opened & FILE_CREATED)
2729 fsnotify_create(dir, dentry);
2730 if (!dentry->d_inode) {
2731 WARN_ON(*opened & FILE_CREATED);
2732 if (create_error) {
2733 error = create_error;
2734 goto out;
2735 }
2736 } else {
2737 if (excl && !(*opened & FILE_CREATED)) {
2738 error = -EEXIST;
2739 goto out;
2740 }
2741 }
2742 goto looked_up;
2743 }
2744
2745 /*
2746 * We didn't have the inode before the open, so check open permission
2747 * here.
2748 */
2749 acc_mode = op->acc_mode;
2750 if (*opened & FILE_CREATED) {
2751 WARN_ON(!(open_flag & O_CREAT));
2752 fsnotify_create(dir, dentry);
2753 acc_mode = MAY_OPEN;
2754 }
2755 error = may_open(&file->f_path, acc_mode, open_flag);
2756 if (error)
2757 fput(file);
2758
2759out:
2760 dput(dentry);
2761 return error;
2762
2763no_open:
2764 if (need_lookup) {
2765 dentry = lookup_real(dir, dentry, nd->flags);
2766 if (IS_ERR(dentry))
2767 return PTR_ERR(dentry);
2768
2769 if (create_error) {
2770 int open_flag = op->open_flag;
2771
2772 error = create_error;
2773 if ((open_flag & O_EXCL)) {
2774 if (!dentry->d_inode)
2775 goto out;
2776 } else if (!dentry->d_inode) {
2777 goto out;
2778 } else if ((open_flag & O_TRUNC) &&
2779 S_ISREG(dentry->d_inode->i_mode)) {
2780 goto out;
2781 }
2782 /* will fail later, go on to get the right error */
2783 }
2784 }
2785looked_up:
2786 path->dentry = dentry;
2787 path->mnt = nd->path.mnt;
2788 return 1;
2789}
2790
2791/*
2792 * Look up and maybe create and open the last component.
2793 *
2794 * Must be called with i_mutex held on parent.
2795 *
2796 * Returns 0 if the file was successfully atomically created (if necessary) and
2797 * opened. In this case the file will be returned attached to @file.
2798 *
2799 * Returns 1 if the file was not completely opened at this time, though lookups
2800 * and creations will have been performed and the dentry returned in @path will
2801 * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
2802 * specified then a negative dentry may be returned.
2803 *
2804 * An error code is returned otherwise.
2805 *
2806 * FILE_CREATE will be set in @*opened if the dentry was created and will be
2807 * cleared otherwise prior to returning.
2808 */
2809static int lookup_open(struct nameidata *nd, struct path *path,
2810 struct file *file,
2811 const struct open_flags *op,
2812 bool got_write, int *opened)
2813{
2814 struct dentry *dir = nd->path.dentry;
2815 struct inode *dir_inode = dir->d_inode;
2816 struct dentry *dentry;
2817 int error;
2818 bool need_lookup;
2819
2820 *opened &= ~FILE_CREATED;
2821 dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup);
2822 if (IS_ERR(dentry))
2823 return PTR_ERR(dentry);
2824
2825 /* Cached positive dentry: will open in f_op->open */
2826 if (!need_lookup && dentry->d_inode)
2827 goto out_no_open;
2828
2829 if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
2830 return atomic_open(nd, dentry, path, file, op, got_write,
2831 need_lookup, opened);
2832 }
2833
2834 if (need_lookup) {
2835 BUG_ON(dentry->d_inode);
2836
2837 dentry = lookup_real(dir_inode, dentry, nd->flags);
2838 if (IS_ERR(dentry))
2839 return PTR_ERR(dentry);
2840 }
2841
2842 /* Negative dentry, just create the file */
2843 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
2844 umode_t mode = op->mode;
2845 if (!IS_POSIXACL(dir->d_inode))
2846 mode &= ~current_umask();
2847 /*
2848 * This write is needed to ensure that a
2849 * rw->ro transition does not occur between
2850 * the time when the file is created and when
2851 * a permanent write count is taken through
2852 * the 'struct file' in finish_open().
2853 */
2854 if (!got_write) {
2855 error = -EROFS;
2856 goto out_dput;
2857 }
2858 *opened |= FILE_CREATED;
2859 error = security_path_mknod(&nd->path, dentry, mode, 0);
2860 if (error)
2861 goto out_dput;
2862 error = vfs_create(dir->d_inode, dentry, mode,
2863 nd->flags & LOOKUP_EXCL);
2864 if (error)
2865 goto out_dput;
2866 }
2867out_no_open:
2868 path->dentry = dentry;
2869 path->mnt = nd->path.mnt;
2870 return 1;
2871
2872out_dput:
2873 dput(dentry);
2874 return error;
2875}
2876
2877/*
2878 * Handle the last step of open()
2879 */
2880static int do_last(struct nameidata *nd, struct path *path,
2881 struct file *file, const struct open_flags *op,
2882 int *opened, struct filename *name)
2883{
2884 struct dentry *dir = nd->path.dentry;
2885 int open_flag = op->open_flag;
2886 bool will_truncate = (open_flag & O_TRUNC) != 0;
2887 bool got_write = false;
2888 int acc_mode = op->acc_mode;
2889 struct inode *inode;
2890 bool symlink_ok = false;
2891 struct path save_parent = { .dentry = NULL, .mnt = NULL };
2892 bool retried = false;
2893 int error;
2894
2895 nd->flags &= ~LOOKUP_PARENT;
2896 nd->flags |= op->intent;
2897
2898 if (nd->last_type != LAST_NORM) {
2899 error = handle_dots(nd, nd->last_type);
2900 if (error)
2901 return error;
2902 goto finish_open;
2903 }
2904
2905 if (!(open_flag & O_CREAT)) {
2906 if (nd->last.name[nd->last.len])
2907 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2908 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
2909 symlink_ok = true;
2910 /* we _can_ be in RCU mode here */
2911 error = lookup_fast(nd, path, &inode);
2912 if (likely(!error))
2913 goto finish_lookup;
2914
2915 if (error < 0)
2916 goto out;
2917
2918 BUG_ON(nd->inode != dir->d_inode);
2919 } else {
2920 /* create side of things */
2921 /*
2922 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
2923 * has been cleared when we got to the last component we are
2924 * about to look up
2925 */
2926 error = complete_walk(nd);
2927 if (error)
2928 return error;
2929
2930 audit_inode(name, dir, LOOKUP_PARENT);
2931 error = -EISDIR;
2932 /* trailing slashes? */
2933 if (nd->last.name[nd->last.len])
2934 goto out;
2935 }
2936
2937retry_lookup:
2938 if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
2939 error = mnt_want_write(nd->path.mnt);
2940 if (!error)
2941 got_write = true;
2942 /*
2943 * do _not_ fail yet - we might not need that or fail with
2944 * a different error; let lookup_open() decide; we'll be
2945 * dropping this one anyway.
2946 */
2947 }
2948 mutex_lock(&dir->d_inode->i_mutex);
2949 error = lookup_open(nd, path, file, op, got_write, opened);
2950 mutex_unlock(&dir->d_inode->i_mutex);
2951
2952 if (error <= 0) {
2953 if (error)
2954 goto out;
2955
2956 if ((*opened & FILE_CREATED) ||
2957 !S_ISREG(file_inode(file)->i_mode))
2958 will_truncate = false;
2959
2960 audit_inode(name, file->f_path.dentry, 0);
2961 goto opened;
2962 }
2963
2964 if (*opened & FILE_CREATED) {
2965 /* Don't check for write permission, don't truncate */
2966 open_flag &= ~O_TRUNC;
2967 will_truncate = false;
2968 acc_mode = MAY_OPEN;
2969 path_to_nameidata(path, nd);
2970 goto finish_open_created;
2971 }
2972
2973 /*
2974 * create/update audit record if it already exists.
2975 */
2976 if (d_is_positive(path->dentry))
2977 audit_inode(name, path->dentry, 0);
2978
2979 /*
2980 * If atomic_open() acquired write access it is dropped now due to
2981 * possible mount and symlink following (this might be optimized away if
2982 * necessary...)
2983 */
2984 if (got_write) {
2985 mnt_drop_write(nd->path.mnt);
2986 got_write = false;
2987 }
2988
2989 error = -EEXIST;
2990 if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
2991 goto exit_dput;
2992
2993 error = follow_managed(path, nd->flags);
2994 if (error < 0)
2995 goto exit_dput;
2996
2997 if (error)
2998 nd->flags |= LOOKUP_JUMPED;
2999
3000 BUG_ON(nd->flags & LOOKUP_RCU);
3001 inode = path->dentry->d_inode;
3002finish_lookup:
3003 /* we _can_ be in RCU mode here */
3004 error = -ENOENT;
3005 if (!inode || d_is_negative(path->dentry)) {
3006 path_to_nameidata(path, nd);
3007 goto out;
3008 }
3009
3010 if (should_follow_link(path->dentry, !symlink_ok)) {
3011 if (nd->flags & LOOKUP_RCU) {
3012 if (unlikely(unlazy_walk(nd, path->dentry))) {
3013 error = -ECHILD;
3014 goto out;
3015 }
3016 }
3017 BUG_ON(inode != path->dentry->d_inode);
3018 return 1;
3019 }
3020
3021 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
3022 path_to_nameidata(path, nd);
3023 } else {
3024 save_parent.dentry = nd->path.dentry;
3025 save_parent.mnt = mntget(path->mnt);
3026 nd->path.dentry = path->dentry;
3027
3028 }
3029 nd->inode = inode;
3030 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
3031finish_open:
3032 error = complete_walk(nd);
3033 if (error) {
3034 path_put(&save_parent);
3035 return error;
3036 }
3037 audit_inode(name, nd->path.dentry, 0);
3038 error = -EISDIR;
3039 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
3040 goto out;
3041 error = -ENOTDIR;
3042 if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
3043 goto out;
3044 if (!S_ISREG(nd->inode->i_mode))
3045 will_truncate = false;
3046
3047 if (will_truncate) {
3048 error = mnt_want_write(nd->path.mnt);
3049 if (error)
3050 goto out;
3051 got_write = true;
3052 }
3053finish_open_created:
3054 error = may_open(&nd->path, acc_mode, open_flag);
3055 if (error)
3056 goto out;
3057 file->f_path.mnt = nd->path.mnt;
3058 error = finish_open(file, nd->path.dentry, NULL, opened);
3059 if (error) {
3060 if (error == -EOPENSTALE)
3061 goto stale_open;
3062 goto out;
3063 }
3064opened:
3065 error = open_check_o_direct(file);
3066 if (error)
3067 goto exit_fput;
3068 error = ima_file_check(file, op->acc_mode);
3069 if (error)
3070 goto exit_fput;
3071
3072 if (will_truncate) {
3073 error = handle_truncate(file);
3074 if (error)
3075 goto exit_fput;
3076 }
3077out:
3078 if (got_write)
3079 mnt_drop_write(nd->path.mnt);
3080 path_put(&save_parent);
3081 terminate_walk(nd);
3082 return error;
3083
3084exit_dput:
3085 path_put_conditional(path, nd);
3086 goto out;
3087exit_fput:
3088 fput(file);
3089 goto out;
3090
3091stale_open:
3092 /* If no saved parent or already retried then can't retry */
3093 if (!save_parent.dentry || retried)
3094 goto out;
3095
3096 BUG_ON(save_parent.dentry != dir);
3097 path_put(&nd->path);
3098 nd->path = save_parent;
3099 nd->inode = dir->d_inode;
3100 save_parent.mnt = NULL;
3101 save_parent.dentry = NULL;
3102 if (got_write) {
3103 mnt_drop_write(nd->path.mnt);
3104 got_write = false;
3105 }
3106 retried = true;
3107 goto retry_lookup;
3108}
3109
3110static int do_tmpfile(int dfd, struct filename *pathname,
3111 struct nameidata *nd, int flags,
3112 const struct open_flags *op,
3113 struct file *file, int *opened)
3114{
3115 static const struct qstr name = QSTR_INIT("/", 1);
3116 struct dentry *dentry, *child;
3117 struct inode *dir;
3118 int error = path_lookupat(dfd, pathname->name,
3119 flags | LOOKUP_DIRECTORY, nd);
3120 if (unlikely(error))
3121 return error;
3122 error = mnt_want_write(nd->path.mnt);
3123 if (unlikely(error))
3124 goto out;
3125 /* we want directory to be writable */
3126 error = inode_permission(nd->inode, MAY_WRITE | MAY_EXEC);
3127 if (error)
3128 goto out2;
3129 dentry = nd->path.dentry;
3130 dir = dentry->d_inode;
3131 if (!dir->i_op->tmpfile) {
3132 error = -EOPNOTSUPP;
3133 goto out2;
3134 }
3135 child = d_alloc(dentry, &name);
3136 if (unlikely(!child)) {
3137 error = -ENOMEM;
3138 goto out2;
3139 }
3140 nd->flags &= ~LOOKUP_DIRECTORY;
3141 nd->flags |= op->intent;
3142 dput(nd->path.dentry);
3143 nd->path.dentry = child;
3144 error = dir->i_op->tmpfile(dir, nd->path.dentry, op->mode);
3145 if (error)
3146 goto out2;
3147 audit_inode(pathname, nd->path.dentry, 0);
3148 error = may_open(&nd->path, op->acc_mode, op->open_flag);
3149 if (error)
3150 goto out2;
3151 file->f_path.mnt = nd->path.mnt;
3152 error = finish_open(file, nd->path.dentry, NULL, opened);
3153 if (error)
3154 goto out2;
3155 error = open_check_o_direct(file);
3156 if (error) {
3157 fput(file);
3158 } else if (!(op->open_flag & O_EXCL)) {
3159 struct inode *inode = file_inode(file);
3160 spin_lock(&inode->i_lock);
3161 inode->i_state |= I_LINKABLE;
3162 spin_unlock(&inode->i_lock);
3163 }
3164out2:
3165 mnt_drop_write(nd->path.mnt);
3166out:
3167 path_put(&nd->path);
3168 return error;
3169}
3170
3171static struct file *path_openat(int dfd, struct filename *pathname,
3172 struct nameidata *nd, const struct open_flags *op, int flags)
3173{
3174 struct file *base = NULL;
3175 struct file *file;
3176 struct path path;
3177 int opened = 0;
3178 int error;
3179
3180 file = get_empty_filp();
3181 if (IS_ERR(file))
3182 return file;
3183
3184 file->f_flags = op->open_flag;
3185
3186 if (unlikely(file->f_flags & __O_TMPFILE)) {
3187 error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
3188 goto out;
3189 }
3190
3191 error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
3192 if (unlikely(error))
3193 goto out;
3194
3195 current->total_link_count = 0;
3196 error = link_path_walk(pathname->name, nd);
3197 if (unlikely(error))
3198 goto out;
3199
3200 error = do_last(nd, &path, file, op, &opened, pathname);
3201 while (unlikely(error > 0)) { /* trailing symlink */
3202 struct path link = path;
3203 void *cookie;
3204 if (!(nd->flags & LOOKUP_FOLLOW)) {
3205 path_put_conditional(&path, nd);
3206 path_put(&nd->path);
3207 error = -ELOOP;
3208 break;
3209 }
3210 error = may_follow_link(&link, nd);
3211 if (unlikely(error))
3212 break;
3213 nd->flags |= LOOKUP_PARENT;
3214 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
3215 error = follow_link(&link, nd, &cookie);
3216 if (unlikely(error))
3217 break;
3218 error = do_last(nd, &path, file, op, &opened, pathname);
3219 put_link(nd, &link, cookie);
3220 }
3221out:
3222 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
3223 path_put(&nd->root);
3224 if (base)
3225 fput(base);
3226 if (!(opened & FILE_OPENED)) {
3227 BUG_ON(!error);
3228 put_filp(file);
3229 }
3230 if (unlikely(error)) {
3231 if (error == -EOPENSTALE) {
3232 if (flags & LOOKUP_RCU)
3233 error = -ECHILD;
3234 else
3235 error = -ESTALE;
3236 }
3237 file = ERR_PTR(error);
3238 }
3239 return file;
3240}
3241
3242struct file *do_filp_open(int dfd, struct filename *pathname,
3243 const struct open_flags *op)
3244{
3245 struct nameidata nd;
3246 int flags = op->lookup_flags;
3247 struct file *filp;
3248
3249 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
3250 if (unlikely(filp == ERR_PTR(-ECHILD)))
3251 filp = path_openat(dfd, pathname, &nd, op, flags);
3252 if (unlikely(filp == ERR_PTR(-ESTALE)))
3253 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
3254 return filp;
3255}
3256
3257struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
3258 const char *name, const struct open_flags *op)
3259{
3260 struct nameidata nd;
3261 struct file *file;
3262 struct filename filename = { .name = name };
3263 int flags = op->lookup_flags | LOOKUP_ROOT;
3264
3265 nd.root.mnt = mnt;
3266 nd.root.dentry = dentry;
3267
3268 if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
3269 return ERR_PTR(-ELOOP);
3270
3271 file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_RCU);
3272 if (unlikely(file == ERR_PTR(-ECHILD)))
3273 file = path_openat(-1, &filename, &nd, op, flags);
3274 if (unlikely(file == ERR_PTR(-ESTALE)))
3275 file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_REVAL);
3276 return file;
3277}
3278
3279struct dentry *kern_path_create(int dfd, const char *pathname,
3280 struct path *path, unsigned int lookup_flags)
3281{
3282 struct dentry *dentry = ERR_PTR(-EEXIST);
3283 struct nameidata nd;
3284 int err2;
3285 int error;
3286 bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
3287
3288 /*
3289 * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
3290 * other flags passed in are ignored!
3291 */
3292 lookup_flags &= LOOKUP_REVAL;
3293
3294 error = do_path_lookup(dfd, pathname, LOOKUP_PARENT|lookup_flags, &nd);
3295 if (error)
3296 return ERR_PTR(error);
3297
3298 /*
3299 * Yucky last component or no last component at all?
3300 * (foo/., foo/.., /////)
3301 */
3302 if (nd.last_type != LAST_NORM)
3303 goto out;
3304 nd.flags &= ~LOOKUP_PARENT;
3305 nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
3306
3307 /* don't fail immediately if it's r/o, at least try to report other errors */
3308 err2 = mnt_want_write(nd.path.mnt);
3309 /*
3310 * Do the final lookup.
3311 */
3312 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
3313 dentry = lookup_hash(&nd);
3314 if (IS_ERR(dentry))
3315 goto unlock;
3316
3317 error = -EEXIST;
3318 if (d_is_positive(dentry))
3319 goto fail;
3320
3321 /*
3322 * Special case - lookup gave negative, but... we had foo/bar/
3323 * From the vfs_mknod() POV we just have a negative dentry -
3324 * all is fine. Let's be bastards - you had / on the end, you've
3325 * been asking for (non-existent) directory. -ENOENT for you.
3326 */
3327 if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
3328 error = -ENOENT;
3329 goto fail;
3330 }
3331 if (unlikely(err2)) {
3332 error = err2;
3333 goto fail;
3334 }
3335 *path = nd.path;
3336 return dentry;
3337fail:
3338 dput(dentry);
3339 dentry = ERR_PTR(error);
3340unlock:
3341 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
3342 if (!err2)
3343 mnt_drop_write(nd.path.mnt);
3344out:
3345 path_put(&nd.path);
3346 return dentry;
3347}
3348EXPORT_SYMBOL(kern_path_create);
3349
3350void done_path_create(struct path *path, struct dentry *dentry)
3351{
3352 dput(dentry);
3353 mutex_unlock(&path->dentry->d_inode->i_mutex);
3354 mnt_drop_write(path->mnt);
3355 path_put(path);
3356}
3357EXPORT_SYMBOL(done_path_create);
3358
3359struct dentry *user_path_create(int dfd, const char __user *pathname,
3360 struct path *path, unsigned int lookup_flags)
3361{
3362 struct filename *tmp = getname(pathname);
3363 struct dentry *res;
3364 if (IS_ERR(tmp))
3365 return ERR_CAST(tmp);
3366 res = kern_path_create(dfd, tmp->name, path, lookup_flags);
3367 putname(tmp);
3368 return res;
3369}
3370EXPORT_SYMBOL(user_path_create);
3371
3372int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3373{
3374 int error = may_create(dir, dentry);
3375
3376 if (error)
3377 return error;
3378
3379 if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
3380 return -EPERM;
3381
3382 if (!dir->i_op->mknod)
3383 return -EPERM;
3384
3385 error = devcgroup_inode_mknod(mode, dev);
3386 if (error)
3387 return error;
3388
3389 error = security_inode_mknod(dir, dentry, mode, dev);
3390 if (error)
3391 return error;
3392
3393 error = dir->i_op->mknod(dir, dentry, mode, dev);
3394 if (!error)
3395 fsnotify_create(dir, dentry);
3396 return error;
3397}
3398EXPORT_SYMBOL(vfs_mknod);
3399
3400static int may_mknod(umode_t mode)
3401{
3402 switch (mode & S_IFMT) {
3403 case S_IFREG:
3404 case S_IFCHR:
3405 case S_IFBLK:
3406 case S_IFIFO:
3407 case S_IFSOCK:
3408 case 0: /* zero mode translates to S_IFREG */
3409 return 0;
3410 case S_IFDIR:
3411 return -EPERM;
3412 default:
3413 return -EINVAL;
3414 }
3415}
3416
3417SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
3418 unsigned, dev)
3419{
3420 struct dentry *dentry;
3421 struct path path;
3422 int error;
3423 unsigned int lookup_flags = 0;
3424
3425 error = may_mknod(mode);
3426 if (error)
3427 return error;
3428retry:
3429 dentry = user_path_create(dfd, filename, &path, lookup_flags);
3430 if (IS_ERR(dentry))
3431 return PTR_ERR(dentry);
3432
3433 if (!IS_POSIXACL(path.dentry->d_inode))
3434 mode &= ~current_umask();
3435 error = security_path_mknod(&path, dentry, mode, dev);
3436 if (error)
3437 goto out;
3438 switch (mode & S_IFMT) {
3439 case 0: case S_IFREG:
3440 error = vfs_create(path.dentry->d_inode,dentry,mode,true);
3441 break;
3442 case S_IFCHR: case S_IFBLK:
3443 error = vfs_mknod(path.dentry->d_inode,dentry,mode,
3444 new_decode_dev(dev));
3445 break;
3446 case S_IFIFO: case S_IFSOCK:
3447 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
3448 break;
3449 }
3450out:
3451 done_path_create(&path, dentry);
3452 if (retry_estale(error, lookup_flags)) {
3453 lookup_flags |= LOOKUP_REVAL;
3454 goto retry;
3455 }
3456 return error;
3457}
3458
3459SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
3460{
3461 return sys_mknodat(AT_FDCWD, filename, mode, dev);
3462}
3463
3464int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3465{
3466 int error = may_create(dir, dentry);
3467 unsigned max_links = dir->i_sb->s_max_links;
3468
3469 if (error)
3470 return error;
3471
3472 if (!dir->i_op->mkdir)
3473 return -EPERM;
3474
3475 mode &= (S_IRWXUGO|S_ISVTX);
3476 error = security_inode_mkdir(dir, dentry, mode);
3477 if (error)
3478 return error;
3479
3480 if (max_links && dir->i_nlink >= max_links)
3481 return -EMLINK;
3482
3483 error = dir->i_op->mkdir(dir, dentry, mode);
3484 if (!error)
3485 fsnotify_mkdir(dir, dentry);
3486 return error;
3487}
3488EXPORT_SYMBOL(vfs_mkdir);
3489
3490SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
3491{
3492 struct dentry *dentry;
3493 struct path path;
3494 int error;
3495 unsigned int lookup_flags = LOOKUP_DIRECTORY;
3496
3497retry:
3498 dentry = user_path_create(dfd, pathname, &path, lookup_flags);
3499 if (IS_ERR(dentry))
3500 return PTR_ERR(dentry);
3501
3502 if (!IS_POSIXACL(path.dentry->d_inode))
3503 mode &= ~current_umask();
3504 error = security_path_mkdir(&path, dentry, mode);
3505 if (!error)
3506 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
3507 done_path_create(&path, dentry);
3508 if (retry_estale(error, lookup_flags)) {
3509 lookup_flags |= LOOKUP_REVAL;
3510 goto retry;
3511 }
3512 return error;
3513}
3514
3515SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
3516{
3517 return sys_mkdirat(AT_FDCWD, pathname, mode);
3518}
3519
3520/*
3521 * The dentry_unhash() helper will try to drop the dentry early: we
3522 * should have a usage count of 1 if we're the only user of this
3523 * dentry, and if that is true (possibly after pruning the dcache),
3524 * then we drop the dentry now.
3525 *
3526 * A low-level filesystem can, if it choses, legally
3527 * do a
3528 *
3529 * if (!d_unhashed(dentry))
3530 * return -EBUSY;
3531 *
3532 * if it cannot handle the case of removing a directory
3533 * that is still in use by something else..
3534 */
3535void dentry_unhash(struct dentry *dentry)
3536{
3537 shrink_dcache_parent(dentry);
3538 spin_lock(&dentry->d_lock);
3539 if (dentry->d_lockref.count == 1)
3540 __d_drop(dentry);
3541 spin_unlock(&dentry->d_lock);
3542}
3543EXPORT_SYMBOL(dentry_unhash);
3544
3545int vfs_rmdir(struct inode *dir, struct dentry *dentry)
3546{
3547 int error = may_delete(dir, dentry, 1);
3548
3549 if (error)
3550 return error;
3551
3552 if (!dir->i_op->rmdir)
3553 return -EPERM;
3554
3555 dget(dentry);
3556 mutex_lock(&dentry->d_inode->i_mutex);
3557
3558 error = -EBUSY;
3559 if (d_mountpoint(dentry))
3560 goto out;
3561
3562 error = security_inode_rmdir(dir, dentry);
3563 if (error)
3564 goto out;
3565
3566 shrink_dcache_parent(dentry);
3567 error = dir->i_op->rmdir(dir, dentry);
3568 if (error)
3569 goto out;
3570
3571 dentry->d_inode->i_flags |= S_DEAD;
3572 dont_mount(dentry);
3573
3574out:
3575 mutex_unlock(&dentry->d_inode->i_mutex);
3576 dput(dentry);
3577 if (!error)
3578 d_delete(dentry);
3579 return error;
3580}
3581EXPORT_SYMBOL(vfs_rmdir);
3582
3583static long do_rmdir(int dfd, const char __user *pathname)
3584{
3585 int error = 0;
3586 struct filename *name;
3587 struct dentry *dentry;
3588 struct nameidata nd;
3589 unsigned int lookup_flags = 0;
3590retry:
3591 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
3592 if (IS_ERR(name))
3593 return PTR_ERR(name);
3594
3595 switch(nd.last_type) {
3596 case LAST_DOTDOT:
3597 error = -ENOTEMPTY;
3598 goto exit1;
3599 case LAST_DOT:
3600 error = -EINVAL;
3601 goto exit1;
3602 case LAST_ROOT:
3603 error = -EBUSY;
3604 goto exit1;
3605 }
3606
3607 nd.flags &= ~LOOKUP_PARENT;
3608 error = mnt_want_write(nd.path.mnt);
3609 if (error)
3610 goto exit1;
3611
3612 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
3613 dentry = lookup_hash(&nd);
3614 error = PTR_ERR(dentry);
3615 if (IS_ERR(dentry))
3616 goto exit2;
3617 if (!dentry->d_inode) {
3618 error = -ENOENT;
3619 goto exit3;
3620 }
3621 error = security_path_rmdir(&nd.path, dentry);
3622 if (error)
3623 goto exit3;
3624 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
3625exit3:
3626 dput(dentry);
3627exit2:
3628 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
3629 mnt_drop_write(nd.path.mnt);
3630exit1:
3631 path_put(&nd.path);
3632 putname(name);
3633 if (retry_estale(error, lookup_flags)) {
3634 lookup_flags |= LOOKUP_REVAL;
3635 goto retry;
3636 }
3637 return error;
3638}
3639
3640SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
3641{
3642 return do_rmdir(AT_FDCWD, pathname);
3643}
3644
3645/**
3646 * vfs_unlink - unlink a filesystem object
3647 * @dir: parent directory
3648 * @dentry: victim
3649 * @delegated_inode: returns victim inode, if the inode is delegated.
3650 *
3651 * The caller must hold dir->i_mutex.
3652 *
3653 * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
3654 * return a reference to the inode in delegated_inode. The caller
3655 * should then break the delegation on that inode and retry. Because
3656 * breaking a delegation may take a long time, the caller should drop
3657 * dir->i_mutex before doing so.
3658 *
3659 * Alternatively, a caller may pass NULL for delegated_inode. This may
3660 * be appropriate for callers that expect the underlying filesystem not
3661 * to be NFS exported.
3662 */
3663int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
3664{
3665 struct inode *target = dentry->d_inode;
3666 int error = may_delete(dir, dentry, 0);
3667
3668 if (error)
3669 return error;
3670
3671 if (!dir->i_op->unlink)
3672 return -EPERM;
3673
3674 mutex_lock(&target->i_mutex);
3675 if (d_mountpoint(dentry))
3676 error = -EBUSY;
3677 else {
3678 error = security_inode_unlink(dir, dentry);
3679 if (!error) {
3680 error = try_break_deleg(target, delegated_inode);
3681 if (error)
3682 goto out;
3683 error = dir->i_op->unlink(dir, dentry);
3684 if (!error)
3685 dont_mount(dentry);
3686 }
3687 }
3688out:
3689 mutex_unlock(&target->i_mutex);
3690
3691 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
3692 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
3693 fsnotify_link_count(target);
3694 d_delete(dentry);
3695 }
3696
3697 return error;
3698}
3699EXPORT_SYMBOL(vfs_unlink);
3700
3701/*
3702 * Make sure that the actual truncation of the file will occur outside its
3703 * directory's i_mutex. Truncate can take a long time if there is a lot of
3704 * writeout happening, and we don't want to prevent access to the directory
3705 * while waiting on the I/O.
3706 */
3707static long do_unlinkat(int dfd, const char __user *pathname)
3708{
3709 int error;
3710 struct filename *name;
3711 struct dentry *dentry;
3712 struct nameidata nd;
3713 struct inode *inode = NULL;
3714 struct inode *delegated_inode = NULL;
3715 unsigned int lookup_flags = 0;
3716retry:
3717 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
3718 if (IS_ERR(name))
3719 return PTR_ERR(name);
3720
3721 error = -EISDIR;
3722 if (nd.last_type != LAST_NORM)
3723 goto exit1;
3724
3725 nd.flags &= ~LOOKUP_PARENT;
3726 error = mnt_want_write(nd.path.mnt);
3727 if (error)
3728 goto exit1;
3729retry_deleg:
3730 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
3731 dentry = lookup_hash(&nd);
3732 error = PTR_ERR(dentry);
3733 if (!IS_ERR(dentry)) {
3734 /* Why not before? Because we want correct error value */
3735 if (nd.last.name[nd.last.len])
3736 goto slashes;
3737 inode = dentry->d_inode;
3738 if (d_is_negative(dentry))
3739 goto slashes;
3740 ihold(inode);
3741 error = security_path_unlink(&nd.path, dentry);
3742 if (error)
3743 goto exit2;
3744 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
3745exit2:
3746 dput(dentry);
3747 }
3748 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
3749 if (inode)
3750 iput(inode); /* truncate the inode here */
3751 inode = NULL;
3752 if (delegated_inode) {
3753 error = break_deleg_wait(&delegated_inode);
3754 if (!error)
3755 goto retry_deleg;
3756 }
3757 mnt_drop_write(nd.path.mnt);
3758exit1:
3759 path_put(&nd.path);
3760 putname(name);
3761 if (retry_estale(error, lookup_flags)) {
3762 lookup_flags |= LOOKUP_REVAL;
3763 inode = NULL;
3764 goto retry;
3765 }
3766 return error;
3767
3768slashes:
3769 if (d_is_negative(dentry))
3770 error = -ENOENT;
3771 else if (d_is_dir(dentry))
3772 error = -EISDIR;
3773 else
3774 error = -ENOTDIR;
3775 goto exit2;
3776}
3777
3778SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
3779{
3780 if ((flag & ~AT_REMOVEDIR) != 0)
3781 return -EINVAL;
3782
3783 if (flag & AT_REMOVEDIR)
3784 return do_rmdir(dfd, pathname);
3785
3786 return do_unlinkat(dfd, pathname);
3787}
3788
3789SYSCALL_DEFINE1(unlink, const char __user *, pathname)
3790{
3791 return do_unlinkat(AT_FDCWD, pathname);
3792}
3793
3794int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
3795{
3796 int error = may_create(dir, dentry);
3797
3798 if (error)
3799 return error;
3800
3801 if (!dir->i_op->symlink)
3802 return -EPERM;
3803
3804 error = security_inode_symlink(dir, dentry, oldname);
3805 if (error)
3806 return error;
3807
3808 error = dir->i_op->symlink(dir, dentry, oldname);
3809 if (!error)
3810 fsnotify_create(dir, dentry);
3811 return error;
3812}
3813EXPORT_SYMBOL(vfs_symlink);
3814
3815SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
3816 int, newdfd, const char __user *, newname)
3817{
3818 int error;
3819 struct filename *from;
3820 struct dentry *dentry;
3821 struct path path;
3822 unsigned int lookup_flags = 0;
3823
3824 from = getname(oldname);
3825 if (IS_ERR(from))
3826 return PTR_ERR(from);
3827retry:
3828 dentry = user_path_create(newdfd, newname, &path, lookup_flags);
3829 error = PTR_ERR(dentry);
3830 if (IS_ERR(dentry))
3831 goto out_putname;
3832
3833 error = security_path_symlink(&path, dentry, from->name);
3834 if (!error)
3835 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
3836 done_path_create(&path, dentry);
3837 if (retry_estale(error, lookup_flags)) {
3838 lookup_flags |= LOOKUP_REVAL;
3839 goto retry;
3840 }
3841out_putname:
3842 putname(from);
3843 return error;
3844}
3845
3846SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
3847{
3848 return sys_symlinkat(oldname, AT_FDCWD, newname);
3849}
3850
3851/**
3852 * vfs_link - create a new link
3853 * @old_dentry: object to be linked
3854 * @dir: new parent
3855 * @new_dentry: where to create the new link
3856 * @delegated_inode: returns inode needing a delegation break
3857 *
3858 * The caller must hold dir->i_mutex
3859 *
3860 * If vfs_link discovers a delegation on the to-be-linked file in need
3861 * of breaking, it will return -EWOULDBLOCK and return a reference to the
3862 * inode in delegated_inode. The caller should then break the delegation
3863 * and retry. Because breaking a delegation may take a long time, the
3864 * caller should drop the i_mutex before doing so.
3865 *
3866 * Alternatively, a caller may pass NULL for delegated_inode. This may
3867 * be appropriate for callers that expect the underlying filesystem not
3868 * to be NFS exported.
3869 */
3870int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
3871{
3872 struct inode *inode = old_dentry->d_inode;
3873 unsigned max_links = dir->i_sb->s_max_links;
3874 int error;
3875
3876 if (!inode)
3877 return -ENOENT;
3878
3879 error = may_create(dir, new_dentry);
3880 if (error)
3881 return error;
3882
3883 if (dir->i_sb != inode->i_sb)
3884 return -EXDEV;
3885
3886 /*
3887 * A link to an append-only or immutable file cannot be created.
3888 */
3889 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3890 return -EPERM;
3891 if (!dir->i_op->link)
3892 return -EPERM;
3893 if (S_ISDIR(inode->i_mode))
3894 return -EPERM;
3895
3896 error = security_inode_link(old_dentry, dir, new_dentry);
3897 if (error)
3898 return error;
3899
3900 mutex_lock(&inode->i_mutex);
3901 /* Make sure we don't allow creating hardlink to an unlinked file */
3902 if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
3903 error = -ENOENT;
3904 else if (max_links && inode->i_nlink >= max_links)
3905 error = -EMLINK;
3906 else {
3907 error = try_break_deleg(inode, delegated_inode);
3908 if (!error)
3909 error = dir->i_op->link(old_dentry, dir, new_dentry);
3910 }
3911
3912 if (!error && (inode->i_state & I_LINKABLE)) {
3913 spin_lock(&inode->i_lock);
3914 inode->i_state &= ~I_LINKABLE;
3915 spin_unlock(&inode->i_lock);
3916 }
3917 mutex_unlock(&inode->i_mutex);
3918 if (!error)
3919 fsnotify_link(dir, inode, new_dentry);
3920 return error;
3921}
3922EXPORT_SYMBOL(vfs_link);
3923
3924/*
3925 * Hardlinks are often used in delicate situations. We avoid
3926 * security-related surprises by not following symlinks on the
3927 * newname. --KAB
3928 *
3929 * We don't follow them on the oldname either to be compatible
3930 * with linux 2.0, and to avoid hard-linking to directories
3931 * and other special files. --ADM
3932 */
3933SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3934 int, newdfd, const char __user *, newname, int, flags)
3935{
3936 struct dentry *new_dentry;
3937 struct path old_path, new_path;
3938 struct inode *delegated_inode = NULL;
3939 int how = 0;
3940 int error;
3941
3942 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3943 return -EINVAL;
3944 /*
3945 * To use null names we require CAP_DAC_READ_SEARCH
3946 * This ensures that not everyone will be able to create
3947 * handlink using the passed filedescriptor.
3948 */
3949 if (flags & AT_EMPTY_PATH) {
3950 if (!capable(CAP_DAC_READ_SEARCH))
3951 return -ENOENT;
3952 how = LOOKUP_EMPTY;
3953 }
3954
3955 if (flags & AT_SYMLINK_FOLLOW)
3956 how |= LOOKUP_FOLLOW;
3957retry:
3958 error = user_path_at(olddfd, oldname, how, &old_path);
3959 if (error)
3960 return error;
3961
3962 new_dentry = user_path_create(newdfd, newname, &new_path,
3963 (how & LOOKUP_REVAL));
3964 error = PTR_ERR(new_dentry);
3965 if (IS_ERR(new_dentry))
3966 goto out;
3967
3968 error = -EXDEV;
3969 if (old_path.mnt != new_path.mnt)
3970 goto out_dput;
3971 error = may_linkat(&old_path);
3972 if (unlikely(error))
3973 goto out_dput;
3974 error = security_path_link(old_path.dentry, &new_path, new_dentry);
3975 if (error)
3976 goto out_dput;
3977 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
3978out_dput:
3979 done_path_create(&new_path, new_dentry);
3980 if (delegated_inode) {
3981 error = break_deleg_wait(&delegated_inode);
3982 if (!error) {
3983 path_put(&old_path);
3984 goto retry;
3985 }
3986 }
3987 if (retry_estale(error, how)) {
3988 path_put(&old_path);
3989 how |= LOOKUP_REVAL;
3990 goto retry;
3991 }
3992out:
3993 path_put(&old_path);
3994
3995 return error;
3996}
3997
3998SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
3999{
4000 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
4001}
4002
4003/**
4004 * vfs_rename - rename a filesystem object
4005 * @old_dir: parent of source
4006 * @old_dentry: source
4007 * @new_dir: parent of destination
4008 * @new_dentry: destination
4009 * @delegated_inode: returns an inode needing a delegation break
4010 * @flags: rename flags
4011 *
4012 * The caller must hold multiple mutexes--see lock_rename()).
4013 *
4014 * If vfs_rename discovers a delegation in need of breaking at either
4015 * the source or destination, it will return -EWOULDBLOCK and return a
4016 * reference to the inode in delegated_inode. The caller should then
4017 * break the delegation and retry. Because breaking a delegation may
4018 * take a long time, the caller should drop all locks before doing
4019 * so.
4020 *
4021 * Alternatively, a caller may pass NULL for delegated_inode. This may
4022 * be appropriate for callers that expect the underlying filesystem not
4023 * to be NFS exported.
4024 *
4025 * The worst of all namespace operations - renaming directory. "Perverted"
4026 * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
4027 * Problems:
4028 * a) we can get into loop creation.
4029 * b) race potential - two innocent renames can create a loop together.
4030 * That's where 4.4 screws up. Current fix: serialization on
4031 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
4032 * story.
4033 * c) we have to lock _four_ objects - parents and victim (if it exists),
4034 * and source (if it is not a directory).
4035 * And that - after we got ->i_mutex on parents (until then we don't know
4036 * whether the target exists). Solution: try to be smart with locking
4037 * order for inodes. We rely on the fact that tree topology may change
4038 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
4039 * move will be locked. Thus we can rank directories by the tree
4040 * (ancestors first) and rank all non-directories after them.
4041 * That works since everybody except rename does "lock parent, lookup,
4042 * lock child" and rename is under ->s_vfs_rename_mutex.
4043 * HOWEVER, it relies on the assumption that any object with ->lookup()
4044 * has no more than 1 dentry. If "hybrid" objects will ever appear,
4045 * we'd better make sure that there's no link(2) for them.
4046 * d) conversion from fhandle to dentry may come in the wrong moment - when
4047 * we are removing the target. Solution: we will have to grab ->i_mutex
4048 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
4049 * ->i_mutex on parents, which works but leads to some truly excessive
4050 * locking].
4051 */
4052int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4053 struct inode *new_dir, struct dentry *new_dentry,
4054 struct inode **delegated_inode, unsigned int flags)
4055{
4056 int error;
4057 bool is_dir = d_is_dir(old_dentry);
4058 const unsigned char *old_name;
4059 struct inode *source = old_dentry->d_inode;
4060 struct inode *target = new_dentry->d_inode;
4061 bool new_is_dir = false;
4062 unsigned max_links = new_dir->i_sb->s_max_links;
4063
4064 if (source == target)
4065 return 0;
4066
4067 error = may_delete(old_dir, old_dentry, is_dir);
4068 if (error)
4069 return error;
4070
4071 if (!target) {
4072 error = may_create(new_dir, new_dentry);
4073 } else {
4074 new_is_dir = d_is_dir(new_dentry);
4075
4076 if (!(flags & RENAME_EXCHANGE))
4077 error = may_delete(new_dir, new_dentry, is_dir);
4078 else
4079 error = may_delete(new_dir, new_dentry, new_is_dir);
4080 }
4081 if (error)
4082 return error;
4083
4084 if (!old_dir->i_op->rename && !old_dir->i_op->rename2)
4085 return -EPERM;
4086
4087 if (flags && !old_dir->i_op->rename2)
4088 return -EINVAL;
4089
4090 /*
4091 * If we are going to change the parent - check write permissions,
4092 * we'll need to flip '..'.
4093 */
4094 if (new_dir != old_dir) {
4095 if (is_dir) {
4096 error = inode_permission(source, MAY_WRITE);
4097 if (error)
4098 return error;
4099 }
4100 if ((flags & RENAME_EXCHANGE) && new_is_dir) {
4101 error = inode_permission(target, MAY_WRITE);
4102 if (error)
4103 return error;
4104 }
4105 }
4106
4107 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
4108 flags);
4109 if (error)
4110 return error;
4111
4112 old_name = fsnotify_oldname_init(old_dentry->d_name.name);
4113 dget(new_dentry);
4114 if (!is_dir || (flags & RENAME_EXCHANGE))
4115 lock_two_nondirectories(source, target);
4116 else if (target)
4117 mutex_lock(&target->i_mutex);
4118
4119 error = -EBUSY;
4120 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
4121 goto out;
4122
4123 if (max_links && new_dir != old_dir) {
4124 error = -EMLINK;
4125 if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
4126 goto out;
4127 if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
4128 old_dir->i_nlink >= max_links)
4129 goto out;
4130 }
4131 if (is_dir && !(flags & RENAME_EXCHANGE) && target)
4132 shrink_dcache_parent(new_dentry);
4133 if (!is_dir) {
4134 error = try_break_deleg(source, delegated_inode);
4135 if (error)
4136 goto out;
4137 }
4138 if (target && !new_is_dir) {
4139 error = try_break_deleg(target, delegated_inode);
4140 if (error)
4141 goto out;
4142 }
4143 if (!old_dir->i_op->rename2) {
4144 error = old_dir->i_op->rename(old_dir, old_dentry,
4145 new_dir, new_dentry);
4146 } else {
4147 WARN_ON(old_dir->i_op->rename != NULL);
4148 error = old_dir->i_op->rename2(old_dir, old_dentry,
4149 new_dir, new_dentry, flags);
4150 }
4151 if (error)
4152 goto out;
4153
4154 if (!(flags & RENAME_EXCHANGE) && target) {
4155 if (is_dir)
4156 target->i_flags |= S_DEAD;
4157 dont_mount(new_dentry);
4158 }
4159 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
4160 if (!(flags & RENAME_EXCHANGE))
4161 d_move(old_dentry, new_dentry);
4162 else
4163 d_exchange(old_dentry, new_dentry);
4164 }
4165out:
4166 if (!is_dir || (flags & RENAME_EXCHANGE))
4167 unlock_two_nondirectories(source, target);
4168 else if (target)
4169 mutex_unlock(&target->i_mutex);
4170 dput(new_dentry);
4171 if (!error) {
4172 fsnotify_move(old_dir, new_dir, old_name, is_dir,
4173 !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
4174 if (flags & RENAME_EXCHANGE) {
4175 fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
4176 new_is_dir, NULL, new_dentry);
4177 }
4178 }
4179 fsnotify_oldname_free(old_name);
4180
4181 return error;
4182}
4183EXPORT_SYMBOL(vfs_rename);
4184
4185SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
4186 int, newdfd, const char __user *, newname, unsigned int, flags)
4187{
4188 struct dentry *old_dir, *new_dir;
4189 struct dentry *old_dentry, *new_dentry;
4190 struct dentry *trap;
4191 struct nameidata oldnd, newnd;
4192 struct inode *delegated_inode = NULL;
4193 struct filename *from;
4194 struct filename *to;
4195 unsigned int lookup_flags = 0;
4196 bool should_retry = false;
4197 int error;
4198
4199 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
4200 return -EINVAL;
4201
4202 if ((flags & RENAME_NOREPLACE) && (flags & RENAME_EXCHANGE))
4203 return -EINVAL;
4204
4205retry:
4206 from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags);
4207 if (IS_ERR(from)) {
4208 error = PTR_ERR(from);
4209 goto exit;
4210 }
4211
4212 to = user_path_parent(newdfd, newname, &newnd, lookup_flags);
4213 if (IS_ERR(to)) {
4214 error = PTR_ERR(to);
4215 goto exit1;
4216 }
4217
4218 error = -EXDEV;
4219 if (oldnd.path.mnt != newnd.path.mnt)
4220 goto exit2;
4221
4222 old_dir = oldnd.path.dentry;
4223 error = -EBUSY;
4224 if (oldnd.last_type != LAST_NORM)
4225 goto exit2;
4226
4227 new_dir = newnd.path.dentry;
4228 if (flags & RENAME_NOREPLACE)
4229 error = -EEXIST;
4230 if (newnd.last_type != LAST_NORM)
4231 goto exit2;
4232
4233 error = mnt_want_write(oldnd.path.mnt);
4234 if (error)
4235 goto exit2;
4236
4237 oldnd.flags &= ~LOOKUP_PARENT;
4238 newnd.flags &= ~LOOKUP_PARENT;
4239 if (!(flags & RENAME_EXCHANGE))
4240 newnd.flags |= LOOKUP_RENAME_TARGET;
4241
4242retry_deleg:
4243 trap = lock_rename(new_dir, old_dir);
4244
4245 old_dentry = lookup_hash(&oldnd);
4246 error = PTR_ERR(old_dentry);
4247 if (IS_ERR(old_dentry))
4248 goto exit3;
4249 /* source must exist */
4250 error = -ENOENT;
4251 if (d_is_negative(old_dentry))
4252 goto exit4;
4253 new_dentry = lookup_hash(&newnd);
4254 error = PTR_ERR(new_dentry);
4255 if (IS_ERR(new_dentry))
4256 goto exit4;
4257 error = -EEXIST;
4258 if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
4259 goto exit5;
4260 if (flags & RENAME_EXCHANGE) {
4261 error = -ENOENT;
4262 if (d_is_negative(new_dentry))
4263 goto exit5;
4264
4265 if (!d_is_dir(new_dentry)) {
4266 error = -ENOTDIR;
4267 if (newnd.last.name[newnd.last.len])
4268 goto exit5;
4269 }
4270 }
4271 /* unless the source is a directory trailing slashes give -ENOTDIR */
4272 if (!d_is_dir(old_dentry)) {
4273 error = -ENOTDIR;
4274 if (oldnd.last.name[oldnd.last.len])
4275 goto exit5;
4276 if (!(flags & RENAME_EXCHANGE) && newnd.last.name[newnd.last.len])
4277 goto exit5;
4278 }
4279 /* source should not be ancestor of target */
4280 error = -EINVAL;
4281 if (old_dentry == trap)
4282 goto exit5;
4283 /* target should not be an ancestor of source */
4284 if (!(flags & RENAME_EXCHANGE))
4285 error = -ENOTEMPTY;
4286 if (new_dentry == trap)
4287 goto exit5;
4288
4289 error = security_path_rename(&oldnd.path, old_dentry,
4290 &newnd.path, new_dentry, flags);
4291 if (error)
4292 goto exit5;
4293 error = vfs_rename(old_dir->d_inode, old_dentry,
4294 new_dir->d_inode, new_dentry,
4295 &delegated_inode, flags);
4296exit5:
4297 dput(new_dentry);
4298exit4:
4299 dput(old_dentry);
4300exit3:
4301 unlock_rename(new_dir, old_dir);
4302 if (delegated_inode) {
4303 error = break_deleg_wait(&delegated_inode);
4304 if (!error)
4305 goto retry_deleg;
4306 }
4307 mnt_drop_write(oldnd.path.mnt);
4308exit2:
4309 if (retry_estale(error, lookup_flags))
4310 should_retry = true;
4311 path_put(&newnd.path);
4312 putname(to);
4313exit1:
4314 path_put(&oldnd.path);
4315 putname(from);
4316 if (should_retry) {
4317 should_retry = false;
4318 lookup_flags |= LOOKUP_REVAL;
4319 goto retry;
4320 }
4321exit:
4322 return error;
4323}
4324
4325SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
4326 int, newdfd, const char __user *, newname)
4327{
4328 return sys_renameat2(olddfd, oldname, newdfd, newname, 0);
4329}
4330
4331SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
4332{
4333 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
4334}
4335
4336int readlink_copy(char __user *buffer, int buflen, const char *link)
4337{
4338 int len = PTR_ERR(link);
4339 if (IS_ERR(link))
4340 goto out;
4341
4342 len = strlen(link);
4343 if (len > (unsigned) buflen)
4344 len = buflen;
4345 if (copy_to_user(buffer, link, len))
4346 len = -EFAULT;
4347out:
4348 return len;
4349}
4350EXPORT_SYMBOL(readlink_copy);
4351
4352/*
4353 * A helper for ->readlink(). This should be used *ONLY* for symlinks that
4354 * have ->follow_link() touching nd only in nd_set_link(). Using (or not
4355 * using) it for any given inode is up to filesystem.
4356 */
4357int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4358{
4359 struct nameidata nd;
4360 void *cookie;
4361 int res;
4362
4363 nd.depth = 0;
4364 cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
4365 if (IS_ERR(cookie))
4366 return PTR_ERR(cookie);
4367
4368 res = readlink_copy(buffer, buflen, nd_get_link(&nd));
4369 if (dentry->d_inode->i_op->put_link)
4370 dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
4371 return res;
4372}
4373EXPORT_SYMBOL(generic_readlink);
4374
4375/* get the link contents into pagecache */
4376static char *page_getlink(struct dentry * dentry, struct page **ppage)
4377{
4378 char *kaddr;
4379 struct page *page;
4380 struct address_space *mapping = dentry->d_inode->i_mapping;
4381 page = read_mapping_page(mapping, 0, NULL);
4382 if (IS_ERR(page))
4383 return (char*)page;
4384 *ppage = page;
4385 kaddr = kmap(page);
4386 nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
4387 return kaddr;
4388}
4389
4390int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4391{
4392 struct page *page = NULL;
4393 int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page));
4394 if (page) {
4395 kunmap(page);
4396 page_cache_release(page);
4397 }
4398 return res;
4399}
4400EXPORT_SYMBOL(page_readlink);
4401
4402void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
4403{
4404 struct page *page = NULL;
4405 nd_set_link(nd, page_getlink(dentry, &page));
4406 return page;
4407}
4408EXPORT_SYMBOL(page_follow_link_light);
4409
4410void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
4411{
4412 struct page *page = cookie;
4413
4414 if (page) {
4415 kunmap(page);
4416 page_cache_release(page);
4417 }
4418}
4419EXPORT_SYMBOL(page_put_link);
4420
4421/*
4422 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
4423 */
4424int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
4425{
4426 struct address_space *mapping = inode->i_mapping;
4427 struct page *page;
4428 void *fsdata;
4429 int err;
4430 char *kaddr;
4431 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
4432 if (nofs)
4433 flags |= AOP_FLAG_NOFS;
4434
4435retry:
4436 err = pagecache_write_begin(NULL, mapping, 0, len-1,
4437 flags, &page, &fsdata);
4438 if (err)
4439 goto fail;
4440
4441 kaddr = kmap_atomic(page);
4442 memcpy(kaddr, symname, len-1);
4443 kunmap_atomic(kaddr);
4444
4445 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
4446 page, fsdata);
4447 if (err < 0)
4448 goto fail;
4449 if (err < len-1)
4450 goto retry;
4451
4452 mark_inode_dirty(inode);
4453 return 0;
4454fail:
4455 return err;
4456}
4457EXPORT_SYMBOL(__page_symlink);
4458
4459int page_symlink(struct inode *inode, const char *symname, int len)
4460{
4461 return __page_symlink(inode, symname, len,
4462 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
4463}
4464EXPORT_SYMBOL(page_symlink);
4465
4466const struct inode_operations page_symlink_inode_operations = {
4467 .readlink = generic_readlink,
4468 .follow_link = page_follow_link_light,
4469 .put_link = page_put_link,
4470};
4471EXPORT_SYMBOL(page_symlink_inode_operations);