ovl: constant d_ino across copy up
[linux-block.git] / fs / overlayfs / readdir.c
CommitLineData
e9be9d5e
MS
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/namei.h>
13#include <linux/file.h>
14#include <linux/xattr.h>
15#include <linux/rbtree.h>
16#include <linux/security.h>
17#include <linux/cred.h>
b5efccbe 18#include <linux/ratelimit.h>
e9be9d5e
MS
19#include "overlayfs.h"
20
21struct ovl_cache_entry {
e9be9d5e
MS
22 unsigned int len;
23 unsigned int type;
b5efccbe 24 u64 real_ino;
e9be9d5e 25 u64 ino;
e9be9d5e
MS
26 struct list_head l_node;
27 struct rb_node node;
cdb67279 28 struct ovl_cache_entry *next_maybe_whiteout;
c2096537 29 bool is_whiteout;
68bf8611 30 char name[];
e9be9d5e
MS
31};
32
33struct ovl_dir_cache {
34 long refcount;
35 u64 version;
36 struct list_head entries;
37};
38
39struct ovl_readdir_data {
40 struct dir_context ctx;
3fe6e52f 41 struct dentry *dentry;
56656e96 42 bool is_lowest;
49be4fb9 43 struct rb_root root;
e9be9d5e 44 struct list_head *list;
db6ec212 45 struct list_head middle;
cdb67279 46 struct ovl_cache_entry *first_maybe_whiteout;
e9be9d5e
MS
47 int count;
48 int err;
b5efccbe 49 bool is_upper;
45aebeaf 50 bool d_type_supported;
e9be9d5e
MS
51};
52
53struct ovl_dir_file {
54 bool is_real;
55 bool is_upper;
56 struct ovl_dir_cache *cache;
4330397e 57 struct list_head *cursor;
e9be9d5e
MS
58 struct file *realfile;
59 struct file *upperfile;
60};
61
62static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
63{
64 return container_of(n, struct ovl_cache_entry, node);
65}
66
67static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
68 const char *name, int len)
69{
70 struct rb_node *node = root->rb_node;
71 int cmp;
72
73 while (node) {
74 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
75
76 cmp = strncmp(name, p->name, len);
77 if (cmp > 0)
78 node = p->node.rb_right;
79 else if (cmp < 0 || len < p->len)
80 node = p->node.rb_left;
81 else
82 return p;
83 }
84
85 return NULL;
86}
87
b5efccbe
AG
88static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
89 struct ovl_cache_entry *p)
90{
91 /* Don't care if not doing ovl_iter() */
92 if (!rdd->dentry)
93 return false;
94
95 /* Always recalc d_ino for parent */
96 if (strcmp(p->name, "..") == 0)
97 return true;
98
99 /* If this is lower, then native d_ino will do */
100 if (!rdd->is_upper)
101 return false;
102
103 /*
104 * Recalc d_ino for '.' and for all entries if dir is impure (contains
105 * copied up entries)
106 */
107 if ((p->name[0] == '.' && p->len == 1) ||
108 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry)))
109 return true;
110
111 return false;
112}
113
cdb67279 114static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
3e01cee3 115 const char *name, int len,
e9be9d5e
MS
116 u64 ino, unsigned int d_type)
117{
118 struct ovl_cache_entry *p;
68bf8611 119 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
e9be9d5e 120
68bf8611 121 p = kmalloc(size, GFP_KERNEL);
3e01cee3
MS
122 if (!p)
123 return NULL;
124
125 memcpy(p->name, name, len);
126 p->name[len] = '\0';
127 p->len = len;
128 p->type = d_type;
b5efccbe 129 p->real_ino = ino;
3e01cee3 130 p->ino = ino;
b5efccbe
AG
131 /* Defer setting d_ino for upper entry to ovl_iterate() */
132 if (ovl_calc_d_ino(rdd, p))
133 p->ino = 0;
3e01cee3 134 p->is_whiteout = false;
3e01cee3
MS
135
136 if (d_type == DT_CHR) {
cdb67279
MS
137 p->next_maybe_whiteout = rdd->first_maybe_whiteout;
138 rdd->first_maybe_whiteout = p;
3e01cee3 139 }
e9be9d5e
MS
140 return p;
141}
142
143static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
144 const char *name, int len, u64 ino,
145 unsigned int d_type)
146{
49be4fb9 147 struct rb_node **newp = &rdd->root.rb_node;
e9be9d5e
MS
148 struct rb_node *parent = NULL;
149 struct ovl_cache_entry *p;
150
151 while (*newp) {
152 int cmp;
153 struct ovl_cache_entry *tmp;
154
155 parent = *newp;
156 tmp = ovl_cache_entry_from_node(*newp);
157 cmp = strncmp(name, tmp->name, len);
158 if (cmp > 0)
159 newp = &tmp->node.rb_right;
160 else if (cmp < 0 || len < tmp->len)
161 newp = &tmp->node.rb_left;
162 else
163 return 0;
164 }
165
cdb67279 166 p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
31e8ccea
MS
167 if (p == NULL) {
168 rdd->err = -ENOMEM;
e9be9d5e 169 return -ENOMEM;
31e8ccea 170 }
e9be9d5e
MS
171
172 list_add_tail(&p->l_node, rdd->list);
173 rb_link_node(&p->node, parent, newp);
49be4fb9 174 rb_insert_color(&p->node, &rdd->root);
e9be9d5e
MS
175
176 return 0;
177}
178
56656e96
MS
179static int ovl_fill_lowest(struct ovl_readdir_data *rdd,
180 const char *name, int namelen,
181 loff_t offset, u64 ino, unsigned int d_type)
e9be9d5e
MS
182{
183 struct ovl_cache_entry *p;
184
49be4fb9 185 p = ovl_cache_entry_find(&rdd->root, name, namelen);
e9be9d5e 186 if (p) {
db6ec212 187 list_move_tail(&p->l_node, &rdd->middle);
e9be9d5e 188 } else {
cdb67279 189 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
e9be9d5e
MS
190 if (p == NULL)
191 rdd->err = -ENOMEM;
192 else
db6ec212 193 list_add_tail(&p->l_node, &rdd->middle);
e9be9d5e
MS
194 }
195
196 return rdd->err;
197}
198
199void ovl_cache_free(struct list_head *list)
200{
201 struct ovl_cache_entry *p;
202 struct ovl_cache_entry *n;
203
204 list_for_each_entry_safe(p, n, list, l_node)
205 kfree(p);
206
207 INIT_LIST_HEAD(list);
208}
209
210static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
211{
212 struct ovl_dir_cache *cache = od->cache;
213
e9be9d5e
MS
214 WARN_ON(cache->refcount <= 0);
215 cache->refcount--;
216 if (!cache->refcount) {
217 if (ovl_dir_cache(dentry) == cache)
218 ovl_set_dir_cache(dentry, NULL);
219
220 ovl_cache_free(&cache->entries);
221 kfree(cache);
222 }
223}
224
ac7576f4
MS
225static int ovl_fill_merge(struct dir_context *ctx, const char *name,
226 int namelen, loff_t offset, u64 ino,
227 unsigned int d_type)
e9be9d5e 228{
ac7576f4
MS
229 struct ovl_readdir_data *rdd =
230 container_of(ctx, struct ovl_readdir_data, ctx);
e9be9d5e
MS
231
232 rdd->count++;
56656e96 233 if (!rdd->is_lowest)
e9be9d5e
MS
234 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
235 else
56656e96 236 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
e9be9d5e
MS
237}
238
cdb67279
MS
239static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
240{
241 int err;
242 struct ovl_cache_entry *p;
243 struct dentry *dentry;
244 const struct cred *old_cred;
cdb67279 245
3fe6e52f 246 old_cred = ovl_override_creds(rdd->dentry->d_sb);
cdb67279 247
00235411 248 err = down_write_killable(&dir->d_inode->i_rwsem);
cdb67279
MS
249 if (!err) {
250 while (rdd->first_maybe_whiteout) {
251 p = rdd->first_maybe_whiteout;
252 rdd->first_maybe_whiteout = p->next_maybe_whiteout;
253 dentry = lookup_one_len(p->name, dir, p->len);
254 if (!IS_ERR(dentry)) {
255 p->is_whiteout = ovl_is_whiteout(dentry);
256 dput(dentry);
257 }
258 }
5955102c 259 inode_unlock(dir->d_inode);
cdb67279
MS
260 }
261 revert_creds(old_cred);
cdb67279
MS
262
263 return err;
264}
265
e9be9d5e
MS
266static inline int ovl_dir_read(struct path *realpath,
267 struct ovl_readdir_data *rdd)
268{
269 struct file *realfile;
270 int err;
271
272 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
273 if (IS_ERR(realfile))
274 return PTR_ERR(realfile);
275
cdb67279 276 rdd->first_maybe_whiteout = NULL;
e9be9d5e
MS
277 rdd->ctx.pos = 0;
278 do {
279 rdd->count = 0;
280 rdd->err = 0;
281 err = iterate_dir(realfile, &rdd->ctx);
282 if (err >= 0)
283 err = rdd->err;
284 } while (!err && rdd->count);
cdb67279 285
eea2fb48 286 if (!err && rdd->first_maybe_whiteout && rdd->dentry)
cdb67279
MS
287 err = ovl_check_whiteouts(realpath->dentry, rdd);
288
e9be9d5e
MS
289 fput(realfile);
290
291 return err;
292}
293
294static void ovl_dir_reset(struct file *file)
295{
296 struct ovl_dir_file *od = file->private_data;
297 struct ovl_dir_cache *cache = od->cache;
298 struct dentry *dentry = file->f_path.dentry;
299 enum ovl_path_type type = ovl_path_type(dentry);
300
301 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
302 ovl_cache_put(od, dentry);
303 od->cache = NULL;
4330397e 304 od->cursor = NULL;
e9be9d5e 305 }
1afaba1e
MS
306 WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
307 if (od->is_real && OVL_TYPE_MERGE(type))
e9be9d5e
MS
308 od->is_real = false;
309}
310
c9f00fdb 311static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
e9be9d5e
MS
312{
313 int err;
9d7459d8 314 struct path realpath;
e9be9d5e
MS
315 struct ovl_readdir_data rdd = {
316 .ctx.actor = ovl_fill_merge,
3fe6e52f 317 .dentry = dentry,
e9be9d5e 318 .list = list,
49be4fb9 319 .root = RB_ROOT,
56656e96 320 .is_lowest = false,
e9be9d5e 321 };
9d7459d8 322 int idx, next;
e9be9d5e 323
9d7459d8
MS
324 for (idx = 0; idx != -1; idx = next) {
325 next = ovl_path_next(idx, dentry, &realpath);
b5efccbe 326 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
c9f00fdb 327
9d7459d8 328 if (next != -1) {
9d7459d8 329 err = ovl_dir_read(&realpath, &rdd);
e9be9d5e 330 if (err)
9d7459d8
MS
331 break;
332 } else {
333 /*
334 * Insert lowest layer entries before upper ones, this
335 * allows offsets to be reasonably constant
336 */
337 list_add(&rdd.middle, rdd.list);
56656e96 338 rdd.is_lowest = true;
9d7459d8
MS
339 err = ovl_dir_read(&realpath, &rdd);
340 list_del(&rdd.middle);
e9be9d5e
MS
341 }
342 }
e9be9d5e 343 return err;
e9be9d5e
MS
344}
345
346static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
347{
4330397e 348 struct list_head *p;
e9be9d5e
MS
349 loff_t off = 0;
350
4330397e 351 list_for_each(p, &od->cache->entries) {
e9be9d5e
MS
352 if (off >= pos)
353 break;
354 off++;
355 }
4330397e 356 /* Cursor is safe since the cache is stable */
357 od->cursor = p;
e9be9d5e
MS
358}
359
360static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
361{
362 int res;
e9be9d5e
MS
363 struct ovl_dir_cache *cache;
364
365 cache = ovl_dir_cache(dentry);
366 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
367 cache->refcount++;
368 return cache;
369 }
370 ovl_set_dir_cache(dentry, NULL);
371
372 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
373 if (!cache)
374 return ERR_PTR(-ENOMEM);
375
376 cache->refcount = 1;
377 INIT_LIST_HEAD(&cache->entries);
378
c9f00fdb 379 res = ovl_dir_read_merged(dentry, &cache->entries);
e9be9d5e
MS
380 if (res) {
381 ovl_cache_free(&cache->entries);
382 kfree(cache);
383 return ERR_PTR(res);
384 }
385
386 cache->version = ovl_dentry_version_get(dentry);
387 ovl_set_dir_cache(dentry, cache);
388
389 return cache;
390}
391
b5efccbe
AG
392/*
393 * Set d_ino for upper entries. Non-upper entries should always report
394 * the uppermost real inode ino and should not call this function.
395 *
396 * When not all layer are on same fs, report real ino also for upper.
397 *
398 * When all layers are on the same fs, and upper has a reference to
399 * copy up origin, call vfs_getattr() on the overlay entry to make
400 * sure that d_ino will be consistent with st_ino from stat(2).
401 */
402static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
403
404{
405 struct dentry *dir = path->dentry;
406 struct dentry *this = NULL;
407 enum ovl_path_type type;
408 u64 ino = p->real_ino;
409 int err = 0;
410
411 if (!ovl_same_sb(dir->d_sb))
412 goto out;
413
414 if (p->name[0] == '.') {
415 if (p->len == 1) {
416 this = dget(dir);
417 goto get;
418 }
419 if (p->len == 2 && p->name[1] == '.') {
420 /* we shall not be moved */
421 this = dget(dir->d_parent);
422 goto get;
423 }
424 }
425 this = lookup_one_len(p->name, dir, p->len);
426 if (IS_ERR_OR_NULL(this) || !this->d_inode) {
427 if (IS_ERR(this)) {
428 err = PTR_ERR(this);
429 this = NULL;
430 goto fail;
431 }
432 goto out;
433 }
434
435get:
436 type = ovl_path_type(this);
437 if (OVL_TYPE_ORIGIN(type)) {
438 struct kstat stat;
439 struct path statpath = *path;
440
441 statpath.dentry = this;
442 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
443 if (err)
444 goto fail;
445
446 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
447 ino = stat.ino;
448 }
449
450out:
451 p->ino = ino;
452 dput(this);
453 return err;
454
455fail:
456 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
457 p->name, err);
458 goto out;
459}
460
e9be9d5e
MS
461static int ovl_iterate(struct file *file, struct dir_context *ctx)
462{
463 struct ovl_dir_file *od = file->private_data;
464 struct dentry *dentry = file->f_path.dentry;
4330397e 465 struct ovl_cache_entry *p;
b5efccbe 466 int err;
e9be9d5e
MS
467
468 if (!ctx->pos)
469 ovl_dir_reset(file);
470
471 if (od->is_real)
472 return iterate_dir(od->realfile, ctx);
473
474 if (!od->cache) {
475 struct ovl_dir_cache *cache;
476
477 cache = ovl_cache_get(dentry);
478 if (IS_ERR(cache))
479 return PTR_ERR(cache);
480
481 od->cache = cache;
482 ovl_seek_cursor(od, ctx->pos);
483 }
484
4330397e 485 while (od->cursor != &od->cache->entries) {
486 p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
b5efccbe
AG
487 if (!p->is_whiteout) {
488 if (!p->ino) {
489 err = ovl_cache_update_ino(&file->f_path, p);
490 if (err)
491 return err;
492 }
4330397e 493 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
494 break;
b5efccbe 495 }
4330397e 496 od->cursor = p->l_node.next;
497 ctx->pos++;
e9be9d5e
MS
498 }
499 return 0;
500}
501
502static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
503{
504 loff_t res;
505 struct ovl_dir_file *od = file->private_data;
506
5955102c 507 inode_lock(file_inode(file));
e9be9d5e
MS
508 if (!file->f_pos)
509 ovl_dir_reset(file);
510
511 if (od->is_real) {
512 res = vfs_llseek(od->realfile, offset, origin);
513 file->f_pos = od->realfile->f_pos;
514 } else {
515 res = -EINVAL;
516
517 switch (origin) {
518 case SEEK_CUR:
519 offset += file->f_pos;
520 break;
521 case SEEK_SET:
522 break;
523 default:
524 goto out_unlock;
525 }
526 if (offset < 0)
527 goto out_unlock;
528
529 if (offset != file->f_pos) {
530 file->f_pos = offset;
531 if (od->cache)
532 ovl_seek_cursor(od, offset);
533 }
534 res = offset;
535 }
536out_unlock:
5955102c 537 inode_unlock(file_inode(file));
e9be9d5e
MS
538
539 return res;
540}
541
542static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
543 int datasync)
544{
545 struct ovl_dir_file *od = file->private_data;
546 struct dentry *dentry = file->f_path.dentry;
547 struct file *realfile = od->realfile;
548
549 /*
550 * Need to check if we started out being a lower dir, but got copied up
551 */
1afaba1e 552 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
e9be9d5e
MS
553 struct inode *inode = file_inode(file);
554
7676895f 555 realfile = lockless_dereference(od->upperfile);
e9be9d5e
MS
556 if (!realfile) {
557 struct path upperpath;
558
559 ovl_path_upper(dentry, &upperpath);
560 realfile = ovl_path_open(&upperpath, O_RDONLY);
d45f00ae 561 smp_mb__before_spinlock();
5955102c 562 inode_lock(inode);
3d268c9b
AV
563 if (!od->upperfile) {
564 if (IS_ERR(realfile)) {
5955102c 565 inode_unlock(inode);
3d268c9b
AV
566 return PTR_ERR(realfile);
567 }
568 od->upperfile = realfile;
569 } else {
570 /* somebody has beaten us to it */
571 if (!IS_ERR(realfile))
572 fput(realfile);
573 realfile = od->upperfile;
e9be9d5e 574 }
5955102c 575 inode_unlock(inode);
e9be9d5e 576 }
e9be9d5e
MS
577 }
578
579 return vfs_fsync_range(realfile, start, end, datasync);
580}
581
582static int ovl_dir_release(struct inode *inode, struct file *file)
583{
584 struct ovl_dir_file *od = file->private_data;
585
586 if (od->cache) {
5955102c 587 inode_lock(inode);
e9be9d5e 588 ovl_cache_put(od, file->f_path.dentry);
5955102c 589 inode_unlock(inode);
e9be9d5e
MS
590 }
591 fput(od->realfile);
592 if (od->upperfile)
593 fput(od->upperfile);
594 kfree(od);
595
596 return 0;
597}
598
599static int ovl_dir_open(struct inode *inode, struct file *file)
600{
601 struct path realpath;
602 struct file *realfile;
603 struct ovl_dir_file *od;
604 enum ovl_path_type type;
605
606 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
607 if (!od)
608 return -ENOMEM;
609
610 type = ovl_path_real(file->f_path.dentry, &realpath);
611 realfile = ovl_path_open(&realpath, file->f_flags);
612 if (IS_ERR(realfile)) {
613 kfree(od);
614 return PTR_ERR(realfile);
615 }
e9be9d5e 616 od->realfile = realfile;
1afaba1e
MS
617 od->is_real = !OVL_TYPE_MERGE(type);
618 od->is_upper = OVL_TYPE_UPPER(type);
e9be9d5e
MS
619 file->private_data = od;
620
621 return 0;
622}
623
624const struct file_operations ovl_dir_operations = {
625 .read = generic_read_dir,
626 .open = ovl_dir_open,
627 .iterate = ovl_iterate,
628 .llseek = ovl_dir_llseek,
629 .fsync = ovl_dir_fsync,
630 .release = ovl_dir_release,
631};
632
633int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
634{
635 int err;
e9be9d5e
MS
636 struct ovl_cache_entry *p;
637
c9f00fdb 638 err = ovl_dir_read_merged(dentry, list);
e9be9d5e
MS
639 if (err)
640 return err;
641
642 err = 0;
643
644 list_for_each_entry(p, list, l_node) {
645 if (p->is_whiteout)
646 continue;
647
648 if (p->name[0] == '.') {
649 if (p->len == 1)
650 continue;
651 if (p->len == 2 && p->name[1] == '.')
652 continue;
653 }
654 err = -ENOTEMPTY;
655 break;
656 }
657
658 return err;
659}
660
661void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
662{
663 struct ovl_cache_entry *p;
664
5955102c 665 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
e9be9d5e
MS
666 list_for_each_entry(p, list, l_node) {
667 struct dentry *dentry;
668
669 if (!p->is_whiteout)
670 continue;
671
672 dentry = lookup_one_len(p->name, upper, p->len);
673 if (IS_ERR(dentry)) {
674 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
675 upper->d_name.name, p->len, p->name,
676 (int) PTR_ERR(dentry));
677 continue;
678 }
84889d49
KK
679 if (dentry->d_inode)
680 ovl_cleanup(upper->d_inode, dentry);
e9be9d5e
MS
681 dput(dentry);
682 }
5955102c 683 inode_unlock(upper->d_inode);
e9be9d5e 684}
45aebeaf
VG
685
686static int ovl_check_d_type(struct dir_context *ctx, const char *name,
687 int namelen, loff_t offset, u64 ino,
688 unsigned int d_type)
689{
690 struct ovl_readdir_data *rdd =
691 container_of(ctx, struct ovl_readdir_data, ctx);
692
693 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
694 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
695 return 0;
696
697 if (d_type != DT_UNKNOWN)
698 rdd->d_type_supported = true;
699
700 return 0;
701}
702
703/*
704 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
705 * if error is encountered.
706 */
707int ovl_check_d_type_supported(struct path *realpath)
708{
709 int err;
710 struct ovl_readdir_data rdd = {
711 .ctx.actor = ovl_check_d_type,
712 .d_type_supported = false,
713 };
714
715 err = ovl_dir_read(realpath, &rdd);
716 if (err)
717 return err;
718
719 return rdd.d_type_supported;
720}
eea2fb48
MS
721
722static void ovl_workdir_cleanup_recurse(struct path *path, int level)
723{
724 int err;
725 struct inode *dir = path->dentry->d_inode;
726 LIST_HEAD(list);
727 struct ovl_cache_entry *p;
728 struct ovl_readdir_data rdd = {
729 .ctx.actor = ovl_fill_merge,
730 .dentry = NULL,
731 .list = &list,
732 .root = RB_ROOT,
733 .is_lowest = false,
734 };
735
736 err = ovl_dir_read(path, &rdd);
737 if (err)
738 goto out;
739
740 inode_lock_nested(dir, I_MUTEX_PARENT);
741 list_for_each_entry(p, &list, l_node) {
742 struct dentry *dentry;
743
744 if (p->name[0] == '.') {
745 if (p->len == 1)
746 continue;
747 if (p->len == 2 && p->name[1] == '.')
748 continue;
749 }
750 dentry = lookup_one_len(p->name, path->dentry, p->len);
751 if (IS_ERR(dentry))
752 continue;
753 if (dentry->d_inode)
754 ovl_workdir_cleanup(dir, path->mnt, dentry, level);
755 dput(dentry);
756 }
757 inode_unlock(dir);
758out:
759 ovl_cache_free(&list);
760}
761
762void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
763 struct dentry *dentry, int level)
764{
765 int err;
766
767 if (!d_is_dir(dentry) || level > 1) {
768 ovl_cleanup(dir, dentry);
769 return;
770 }
771
772 err = ovl_do_rmdir(dir, dentry);
773 if (err) {
774 struct path path = { .mnt = mnt, .dentry = dentry };
775
776 inode_unlock(dir);
777 ovl_workdir_cleanup_recurse(&path, level + 1);
778 inode_lock_nested(dir, I_MUTEX_PARENT);
779 ovl_cleanup(dir, dentry);
780 }
781}
415543d5
AG
782
783int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
784 struct path *lowerstack, unsigned int numlower)
785{
786 int err;
787 struct inode *dir = dentry->d_inode;
788 struct path path = { .mnt = mnt, .dentry = dentry };
789 LIST_HEAD(list);
790 struct ovl_cache_entry *p;
791 struct ovl_readdir_data rdd = {
792 .ctx.actor = ovl_fill_merge,
793 .dentry = NULL,
794 .list = &list,
795 .root = RB_ROOT,
796 .is_lowest = false,
797 };
798
799 err = ovl_dir_read(&path, &rdd);
800 if (err)
801 goto out;
802
803 inode_lock_nested(dir, I_MUTEX_PARENT);
804 list_for_each_entry(p, &list, l_node) {
805 struct dentry *index;
806
807 if (p->name[0] == '.') {
808 if (p->len == 1)
809 continue;
810 if (p->len == 2 && p->name[1] == '.')
811 continue;
812 }
813 index = lookup_one_len(p->name, dentry, p->len);
814 if (IS_ERR(index)) {
815 err = PTR_ERR(index);
816 break;
817 }
61b67471
AG
818 err = ovl_verify_index(index, lowerstack, numlower);
819 if (err) {
820 if (err == -EROFS)
821 break;
415543d5
AG
822 err = ovl_cleanup(dir, index);
823 if (err)
824 break;
825 }
826 dput(index);
827 }
828 inode_unlock(dir);
829out:
830 ovl_cache_free(&list);
831 if (err)
832 pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
833 return err;
834}