rhashtable: simplify nested_table_alloc() and rht_bucket_nested_insert()
authorNeilBrown <neilb@suse.com>
Mon, 18 Jun 2018 02:52:50 +0000 (12:52 +1000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 22 Jun 2018 04:43:27 +0000 (13:43 +0900)
Now that we don't use the hash value or shift in nested_table_alloc()
there is room for simplification.
We only need to pass a "is this a leaf" flag to nested_table_alloc(),
and don't need to track as much information in
rht_bucket_nested_insert().

Note there is another minor cleanup in nested_table_alloc() here.
The number of elements in a page of "union nested_tables" is most naturally

  PAGE_SIZE / sizeof(ntbl[0])

The previous code had

  PAGE_SIZE / sizeof(ntbl[0].bucket)

which happens to be the correct value only because the bucket uses all
the space in the union.

Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
lib/rhashtable.c

index a81cd27d518c7a1748a55cd9d704ccf4956e7c35..2aa41c15df17e9dd5b9e98304bb6f16dd8e328c7 100644 (file)
@@ -116,7 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
 
 static union nested_table *nested_table_alloc(struct rhashtable *ht,
                                              union nested_table __rcu **prev,
-                                             unsigned int shifted)
+                                             bool leaf)
 {
        union nested_table *ntbl;
        int i;
@@ -127,8 +127,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
 
        ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
 
-       if (ntbl && shifted) {
-               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
+       if (ntbl && leaf) {
+               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
                        INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
        }
 
@@ -155,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
                return NULL;
 
        if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
-                               0)) {
+                               false)) {
                kfree(tbl);
                return NULL;
        }
@@ -1207,24 +1207,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
        unsigned int index = hash & ((1 << tbl->nest) - 1);
        unsigned int size = tbl->size >> tbl->nest;
        union nested_table *ntbl;
-       unsigned int shifted;
-       unsigned int nhash;
 
        ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
        hash >>= tbl->nest;
-       nhash = index;
-       shifted = tbl->nest;
        ntbl = nested_table_alloc(ht, &ntbl[index].table,
-                                 size <= (1 << shift) ? shifted : 0);
+                                 size <= (1 << shift));
 
        while (ntbl && size > (1 << shift)) {
                index = hash & ((1 << shift) - 1);
                size >>= shift;
                hash >>= shift;
-               nhash |= index << shifted;
-               shifted += shift;
                ntbl = nested_table_alloc(ht, &ntbl[index].table,
-                                         size <= (1 << shift) ? shifted : 0);
+                                         size <= (1 << shift));
        }
 
        if (!ntbl)