ceph: kill ceph_empty_snapc
authorIlya Dryomov <idryomov@gmail.com>
Tue, 16 Feb 2016 14:00:24 +0000 (15:00 +0100)
committerIlya Dryomov <idryomov@gmail.com>
Fri, 25 Mar 2016 17:51:52 +0000 (18:51 +0100)
ceph_empty_snapc->num_snaps == 0 at all times.  Passing such a snapc to
ceph_osdc_alloc_request() (possibly through ceph_osdc_new_request()) is
equivalent to passing NULL, as ceph_osdc_alloc_request() uses it only
for sizing the request message.

Further, in all four cases the subsequent ceph_osdc_build_request() is
passed NULL for snapc, meaning that 0 is encoded for seq and num_snaps
and making ceph_empty_snapc entirely useless.  The two cases where it
actually mattered were removed in commits 860560904962 ("ceph: avoid
sending unnessesary FLUSHSNAP message") and 23078637e054 ("ceph: fix
queuing inode to mdsdir's snaprealm").

Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
fs/ceph/addr.c
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h

index c9f3050899812068c69c2232ef3e7369a45ab758..888674c311c511ebb6d5f410e4e26e8ca238cab0 100644 (file)
@@ -1609,7 +1609,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                    ceph_vino(inode), 0, &len, 0, 1,
                                    CEPH_OSD_OP_CREATE,
                                    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
-                                   ceph_empty_snapc, 0, 0, false);
+                                   NULL, 0, 0, false);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out;
@@ -1627,9 +1627,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                    ceph_vino(inode), 0, &len, 1, 3,
                                    CEPH_OSD_OP_WRITE,
                                    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
-                                   ceph_empty_snapc,
-                                   ci->i_truncate_seq, ci->i_truncate_size,
-                                   false);
+                                   NULL, ci->i_truncate_seq,
+                                   ci->i_truncate_size, false);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out;
@@ -1750,8 +1749,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
                goto out;
        }
 
-       rd_req = ceph_osdc_alloc_request(&fsc->client->osdc,
-                                        ceph_empty_snapc,
+       rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
                                         1, false, GFP_NOFS);
        if (!rd_req) {
                err = -ENOMEM;
@@ -1765,8 +1763,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
                 "%llx.00000000", ci->i_vino.ino);
        rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
 
-       wr_req = ceph_osdc_alloc_request(&fsc->client->osdc,
-                                        ceph_empty_snapc,
+       wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
                                         1, false, GFP_NOFS);
        if (!wr_req) {
                err = -ENOMEM;
index 4aa7122a8d38c18dd4fe7443fb64f46765b83280..9caaa7ffc93fe06876283367d78c3573512c7549 100644 (file)
@@ -296,8 +296,6 @@ static int cmpu64_rev(const void *a, const void *b)
 }
 
 
-struct ceph_snap_context *ceph_empty_snapc;
-
 /*
  * build the snap context for a given realm.
  */
@@ -987,17 +985,3 @@ out:
                up_write(&mdsc->snap_rwsem);
        return;
 }
-
-int __init ceph_snap_init(void)
-{
-       ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
-       if (!ceph_empty_snapc)
-               return -ENOMEM;
-       ceph_empty_snapc->seq = 1;
-       return 0;
-}
-
-void ceph_snap_exit(void)
-{
-       ceph_put_snap_context(ceph_empty_snapc);
-}
index e82acc6f3ac344a1a158216b5d1164192626b6f0..715282a92a0782dc913961470815cc360de64e6f 100644 (file)
@@ -1042,19 +1042,14 @@ static int __init init_ceph(void)
 
        ceph_flock_init();
        ceph_xattr_init();
-       ret = ceph_snap_init();
-       if (ret)
-               goto out_xattr;
        ret = register_filesystem(&ceph_fs_type);
        if (ret)
-               goto out_snap;
+               goto out_xattr;
 
        pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
 
        return 0;
 
-out_snap:
-       ceph_snap_exit();
 out_xattr:
        ceph_xattr_exit();
        destroy_caches();
@@ -1066,7 +1061,6 @@ static void __exit exit_ceph(void)
 {
        dout("exit_ceph\n");
        unregister_filesystem(&ceph_fs_type);
-       ceph_snap_exit();
        ceph_xattr_exit();
        destroy_caches();
 }
index d4425b172bb669f251313654e75d9c7fc37d2208..57ac43d64322cc04e9222502d7553ac2d4e316a6 100644 (file)
@@ -720,7 +720,6 @@ static inline int default_congestion_kb(void)
 
 
 /* snap.c */
-extern struct ceph_snap_context *ceph_empty_snapc;
 struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
                                               u64 ino);
 extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
@@ -737,8 +736,6 @@ extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
 extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                                  struct ceph_cap_snap *capsnap);
 extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
-extern int ceph_snap_init(void);
-extern void ceph_snap_exit(void);
 
 /*
  * a cap_snap is "pending" if it is still awaiting an in-progress