4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/cred.h>
16 #include <linux/syscalls.h>
17 #include <linux/pagemap.h>
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
22 void generic_fillattr(struct inode *inode, struct kstat *stat)
24 stat->dev = inode->i_sb->s_dev;
25 stat->ino = inode->i_ino;
26 stat->mode = inode->i_mode;
27 stat->nlink = inode->i_nlink;
28 stat->uid = inode->i_uid;
29 stat->gid = inode->i_gid;
30 stat->rdev = inode->i_rdev;
31 stat->size = i_size_read(inode);
32 stat->atime = inode->i_atime;
33 stat->mtime = inode->i_mtime;
34 stat->ctime = inode->i_ctime;
35 stat->blksize = i_blocksize(inode);
36 stat->blocks = inode->i_blocks;
39 EXPORT_SYMBOL(generic_fillattr);
42 * vfs_getattr_nosec - getattr without security checks
43 * @path: file to get attributes from
44 * @stat: structure to return attributes in
46 * Get attributes without calling security_inode_getattr.
48 * Currently the only caller other than vfs_getattr is internal to the
49 * filehandle lookup code, which uses only the inode number and returns
50 * no attributes to any user. Any other code probably wants
53 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
55 struct inode *inode = d_backing_inode(path->dentry);
57 if (inode->i_op->getattr)
58 return inode->i_op->getattr(path->mnt, path->dentry, stat);
60 generic_fillattr(inode, stat);
64 EXPORT_SYMBOL(vfs_getattr_nosec);
66 int vfs_getattr(struct path *path, struct kstat *stat)
70 retval = security_inode_getattr(path);
73 return vfs_getattr_nosec(path, stat);
76 EXPORT_SYMBOL(vfs_getattr);
78 int vfs_fstat(unsigned int fd, struct kstat *stat)
80 struct fd f = fdget_raw(fd);
84 error = vfs_getattr(&f.file->f_path, stat);
89 EXPORT_SYMBOL(vfs_fstat);
91 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
96 unsigned int lookup_flags = 0;
98 if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
102 if (!(flag & AT_SYMLINK_NOFOLLOW))
103 lookup_flags |= LOOKUP_FOLLOW;
104 if (flag & AT_EMPTY_PATH)
105 lookup_flags |= LOOKUP_EMPTY;
107 error = user_path_at(dfd, filename, lookup_flags, &path);
111 error = vfs_getattr(&path, stat);
113 if (retry_estale(error, lookup_flags)) {
114 lookup_flags |= LOOKUP_REVAL;
120 EXPORT_SYMBOL(vfs_fstatat);
122 int vfs_stat(const char __user *name, struct kstat *stat)
124 return vfs_fstatat(AT_FDCWD, name, stat, 0);
126 EXPORT_SYMBOL(vfs_stat);
128 int vfs_lstat(const char __user *name, struct kstat *stat)
130 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
132 EXPORT_SYMBOL(vfs_lstat);
135 #ifdef __ARCH_WANT_OLD_STAT
138 * For backward compatibility? Maybe this should be moved
139 * into arch/i386 instead?
141 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
143 static int warncount = 5;
144 struct __old_kernel_stat tmp;
148 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
150 } else if (warncount < 0) {
151 /* it's laughable, but... */
155 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
156 tmp.st_dev = old_encode_dev(stat->dev);
157 tmp.st_ino = stat->ino;
158 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
160 tmp.st_mode = stat->mode;
161 tmp.st_nlink = stat->nlink;
162 if (tmp.st_nlink != stat->nlink)
164 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
165 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
166 tmp.st_rdev = old_encode_dev(stat->rdev);
167 #if BITS_PER_LONG == 32
168 if (stat->size > MAX_NON_LFS)
171 tmp.st_size = stat->size;
172 tmp.st_atime = stat->atime.tv_sec;
173 tmp.st_mtime = stat->mtime.tv_sec;
174 tmp.st_ctime = stat->ctime.tv_sec;
175 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
178 SYSCALL_DEFINE2(stat, const char __user *, filename,
179 struct __old_kernel_stat __user *, statbuf)
184 error = vfs_stat(filename, &stat);
188 return cp_old_stat(&stat, statbuf);
191 SYSCALL_DEFINE2(lstat, const char __user *, filename,
192 struct __old_kernel_stat __user *, statbuf)
197 error = vfs_lstat(filename, &stat);
201 return cp_old_stat(&stat, statbuf);
204 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
207 int error = vfs_fstat(fd, &stat);
210 error = cp_old_stat(&stat, statbuf);
215 #endif /* __ARCH_WANT_OLD_STAT */
217 #if BITS_PER_LONG == 32
218 # define choose_32_64(a,b) a
220 # define choose_32_64(a,b) b
223 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
224 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
226 #ifndef INIT_STRUCT_STAT_PADDING
227 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
230 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
234 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
236 #if BITS_PER_LONG == 32
237 if (stat->size > MAX_NON_LFS)
241 INIT_STRUCT_STAT_PADDING(tmp);
242 tmp.st_dev = encode_dev(stat->dev);
243 tmp.st_ino = stat->ino;
244 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
246 tmp.st_mode = stat->mode;
247 tmp.st_nlink = stat->nlink;
248 if (tmp.st_nlink != stat->nlink)
250 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
251 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
252 tmp.st_rdev = encode_dev(stat->rdev);
253 tmp.st_size = stat->size;
254 tmp.st_atime = stat->atime.tv_sec;
255 tmp.st_mtime = stat->mtime.tv_sec;
256 tmp.st_ctime = stat->ctime.tv_sec;
257 #ifdef STAT_HAVE_NSEC
258 tmp.st_atime_nsec = stat->atime.tv_nsec;
259 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
260 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
262 tmp.st_blocks = stat->blocks;
263 tmp.st_blksize = stat->blksize;
264 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
267 SYSCALL_DEFINE2(newstat, const char __user *, filename,
268 struct stat __user *, statbuf)
271 int error = vfs_stat(filename, &stat);
275 return cp_new_stat(&stat, statbuf);
278 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
279 struct stat __user *, statbuf)
284 error = vfs_lstat(filename, &stat);
288 return cp_new_stat(&stat, statbuf);
291 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
292 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
293 struct stat __user *, statbuf, int, flag)
298 error = vfs_fstatat(dfd, filename, &stat, flag);
301 return cp_new_stat(&stat, statbuf);
305 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
308 int error = vfs_fstat(fd, &stat);
311 error = cp_new_stat(&stat, statbuf);
316 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
317 char __user *, buf, int, bufsiz)
322 unsigned int lookup_flags = LOOKUP_EMPTY;
328 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
330 struct inode *inode = d_backing_inode(path.dentry);
332 error = empty ? -ENOENT : -EINVAL;
334 * AFS mountpoints allow readlink(2) but are not symlinks
336 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
337 error = security_inode_readlink(path.dentry);
340 error = vfs_readlink(path.dentry, buf, bufsiz);
344 if (retry_estale(error, lookup_flags)) {
345 lookup_flags |= LOOKUP_REVAL;
352 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
355 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
359 /* ---------- LFS-64 ----------- */
360 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
362 #ifndef INIT_STRUCT_STAT64_PADDING
363 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
366 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
370 INIT_STRUCT_STAT64_PADDING(tmp);
372 /* mips has weird padding, so we don't get 64 bits there */
373 tmp.st_dev = new_encode_dev(stat->dev);
374 tmp.st_rdev = new_encode_dev(stat->rdev);
376 tmp.st_dev = huge_encode_dev(stat->dev);
377 tmp.st_rdev = huge_encode_dev(stat->rdev);
379 tmp.st_ino = stat->ino;
380 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
382 #ifdef STAT64_HAS_BROKEN_ST_INO
383 tmp.__st_ino = stat->ino;
385 tmp.st_mode = stat->mode;
386 tmp.st_nlink = stat->nlink;
387 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
388 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
389 tmp.st_atime = stat->atime.tv_sec;
390 tmp.st_atime_nsec = stat->atime.tv_nsec;
391 tmp.st_mtime = stat->mtime.tv_sec;
392 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
393 tmp.st_ctime = stat->ctime.tv_sec;
394 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
395 tmp.st_size = stat->size;
396 tmp.st_blocks = stat->blocks;
397 tmp.st_blksize = stat->blksize;
398 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
401 SYSCALL_DEFINE2(stat64, const char __user *, filename,
402 struct stat64 __user *, statbuf)
405 int error = vfs_stat(filename, &stat);
408 error = cp_new_stat64(&stat, statbuf);
413 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
414 struct stat64 __user *, statbuf)
417 int error = vfs_lstat(filename, &stat);
420 error = cp_new_stat64(&stat, statbuf);
425 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
428 int error = vfs_fstat(fd, &stat);
431 error = cp_new_stat64(&stat, statbuf);
436 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
437 struct stat64 __user *, statbuf, int, flag)
442 error = vfs_fstatat(dfd, filename, &stat, flag);
445 return cp_new_stat64(&stat, statbuf);
447 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
449 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
450 void __inode_add_bytes(struct inode *inode, loff_t bytes)
452 inode->i_blocks += bytes >> 9;
454 inode->i_bytes += bytes;
455 if (inode->i_bytes >= 512) {
457 inode->i_bytes -= 512;
461 void inode_add_bytes(struct inode *inode, loff_t bytes)
463 spin_lock(&inode->i_lock);
464 __inode_add_bytes(inode, bytes);
465 spin_unlock(&inode->i_lock);
468 EXPORT_SYMBOL(inode_add_bytes);
470 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
472 inode->i_blocks -= bytes >> 9;
474 if (inode->i_bytes < bytes) {
476 inode->i_bytes += 512;
478 inode->i_bytes -= bytes;
481 EXPORT_SYMBOL(__inode_sub_bytes);
483 void inode_sub_bytes(struct inode *inode, loff_t bytes)
485 spin_lock(&inode->i_lock);
486 __inode_sub_bytes(inode, bytes);
487 spin_unlock(&inode->i_lock);
490 EXPORT_SYMBOL(inode_sub_bytes);
492 loff_t inode_get_bytes(struct inode *inode)
496 spin_lock(&inode->i_lock);
497 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
498 spin_unlock(&inode->i_lock);
502 EXPORT_SYMBOL(inode_get_bytes);
504 void inode_set_bytes(struct inode *inode, loff_t bytes)
506 /* Caller is here responsible for sufficient locking
507 * (ie. inode->i_lock) */
508 inode->i_blocks = bytes >> 9;
509 inode->i_bytes = bytes & 511;
512 EXPORT_SYMBOL(inode_set_bytes);