In preparation for moving lowerstack into ovl_inode.
Note that in ovl_lookup() the temp stack dentry refs are now cloned
into the final ovl_lowerstack instead of being transferred, so cleanup
always needs to call ovl_stack_free(stack).
Reviewed-by: Alexander Larsson <alexl@redhat.com>
Signed-off-by: Amir Goldstein <amir73il@gmail.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
if (!d.stop && ovl_numlower(poe)) {
err = -ENOMEM;
- stack = kcalloc(ofs->numlayer - 1, sizeof(struct ovl_path),
- GFP_KERNEL);
+ stack = ovl_stack_alloc(ofs->numlayer - 1);
if (!stack)
goto out_put_upper;
}
if (!oe)
goto out_put;
- memcpy(ovl_lowerstack(oe), stack, sizeof(struct ovl_path) * ctr);
+ ovl_stack_cpy(ovl_lowerstack(oe), stack, ctr);
dentry->d_fsdata = oe;
if (upperopaque)
kfree(origin_path);
}
dput(index);
- kfree(stack);
+ ovl_stack_free(stack, ctr);
kfree(d.redirect);
return d_splice_alias(inode, dentry);
out_free_oe:
dentry->d_fsdata = NULL;
- kfree(oe);
+ ovl_free_entry(oe);
out_put:
dput(index);
- for (i = 0; i < ctr; i++)
- dput(stack[i].dentry);
- kfree(stack);
+ ovl_stack_free(stack, ctr);
out_put_upper:
if (origin_path) {
dput(origin_path->dentry);
struct dentry *ovl_indexdir(struct super_block *sb);
bool ovl_index_all(struct super_block *sb);
bool ovl_verify_lower(struct super_block *sb);
+struct ovl_path *ovl_stack_alloc(unsigned int n);
+void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n);
+void ovl_stack_put(struct ovl_path *stack, unsigned int n);
+void ovl_stack_free(struct ovl_path *stack, unsigned int n);
struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
+void ovl_free_entry(struct ovl_entry *oe);
bool ovl_dentry_remote(struct dentry *dentry);
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry);
void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry);
return ovl_numlower(oe) ? oe->__lowerstack : NULL;
}
-struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
-
static inline struct ovl_entry *OVL_E(struct dentry *dentry)
{
return (struct ovl_entry *) dentry->d_fsdata;
MODULE_PARM_DESC(xino_auto,
"Auto enable xino feature");
-static void ovl_entry_stack_free(struct ovl_entry *oe)
-{
- struct ovl_path *lowerstack = ovl_lowerstack(oe);
- unsigned int i;
-
- for (i = 0; i < ovl_numlower(oe); i++)
- dput(lowerstack[i].dentry);
-}
-
static bool ovl_metacopy_def = IS_ENABLED(CONFIG_OVERLAY_FS_METACOPY);
module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
MODULE_PARM_DESC(metacopy,
struct ovl_entry *oe = dentry->d_fsdata;
if (oe) {
- ovl_entry_stack_free(oe);
+ ovl_stack_put(ovl_lowerstack(oe), ovl_numlower(oe));
kfree_rcu(oe, rcu);
}
}
return 0;
out_free_oe:
- ovl_entry_stack_free(oe);
- kfree(oe);
+ ovl_free_entry(oe);
out_err:
kfree(splitlower);
path_put(&upperpath);
return ofs->config.nfs_export && ofs->config.index;
}
+struct ovl_path *ovl_stack_alloc(unsigned int n)
+{
+ return kcalloc(n, sizeof(struct ovl_path), GFP_KERNEL);
+}
+
+void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n)
+{
+ unsigned int i;
+
+ memcpy(dst, src, sizeof(struct ovl_path) * n);
+ for (i = 0; i < n; i++)
+ dget(src[i].dentry);
+}
+
+void ovl_stack_put(struct ovl_path *stack, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; stack && i < n; i++)
+ dput(stack[i].dentry);
+}
+
+void ovl_stack_free(struct ovl_path *stack, unsigned int n)
+{
+ ovl_stack_put(stack, n);
+ kfree(stack);
+}
+
struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
{
size_t size = offsetof(struct ovl_entry, __lowerstack[numlower]);
return oe;
}
+void ovl_free_entry(struct ovl_entry *oe)
+{
+ ovl_stack_put(ovl_lowerstack(oe), ovl_numlower(oe));
+ kfree(oe);
+}
+
#define OVL_D_REVALIDATE (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE)
bool ovl_dentry_remote(struct dentry *dentry)