radix tree test suite: fix allocation calculation in kmem_cache_alloc_bulk()
[linux-block.git] / tools / testing / radix-tree / linux.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1366c37e
MW
2#include <stdlib.h>
3#include <string.h>
4#include <malloc.h>
bbe9d71f 5#include <pthread.h>
1366c37e
MW
6#include <unistd.h>
7#include <assert.h>
8
12ea6539 9#include <linux/gfp.h>
bbe9d71f 10#include <linux/poison.h>
1366c37e 11#include <linux/slab.h>
bbe9d71f 12#include <linux/radix-tree.h>
1366c37e
MW
13#include <urcu/uatomic.h>
14
15int nr_allocated;
847d3576 16int preempt_count;
73bc029b 17int test_verbose;
1366c37e 18
bbe9d71f
MW
19struct kmem_cache {
20 pthread_mutex_t lock;
34eee836
MWO
21 unsigned int size;
22 unsigned int align;
bbe9d71f
MW
23 int nr_objs;
24 void *objs;
25 void (*ctor)(void *);
e73cb368 26 unsigned int non_kernel;
000a4493
LH
27 unsigned long nr_allocated;
28 unsigned long nr_tallocated;
bbe9d71f
MW
29};
30
e73cb368
LH
31void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
32{
33 cachep->non_kernel = val;
34}
35
000a4493
LH
36unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
37{
38 return cachep->size * cachep->nr_allocated;
39}
40
41unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
42{
43 return cachep->nr_allocated;
44}
45
46unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
47{
48 return cachep->nr_tallocated;
49}
50
51void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
52{
53 cachep->nr_tallocated = 0;
54}
55
b9663a6f
MWO
56void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
57 int gfp)
1366c37e 58{
34eee836 59 void *p;
31023cd6 60
e73cb368
LH
61 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
62 if (!cachep->non_kernel)
63 return NULL;
64
65 cachep->non_kernel--;
66 }
31023cd6 67
bbe9d71f
MW
68 pthread_mutex_lock(&cachep->lock);
69 if (cachep->nr_objs) {
34eee836 70 struct radix_tree_node *node = cachep->objs;
bbe9d71f 71 cachep->nr_objs--;
1293d5c5 72 cachep->objs = node->parent;
bbe9d71f 73 pthread_mutex_unlock(&cachep->lock);
1293d5c5 74 node->parent = NULL;
34eee836 75 p = node;
bbe9d71f
MW
76 } else {
77 pthread_mutex_unlock(&cachep->lock);
34eee836
MWO
78 if (cachep->align)
79 posix_memalign(&p, cachep->align, cachep->size);
80 else
81 p = malloc(cachep->size);
bbe9d71f 82 if (cachep->ctor)
34eee836
MWO
83 cachep->ctor(p);
84 else if (gfp & __GFP_ZERO)
85 memset(p, 0, cachep->size);
bbe9d71f
MW
86 }
87
000a4493 88 uatomic_inc(&cachep->nr_allocated);
1366c37e 89 uatomic_inc(&nr_allocated);
000a4493 90 uatomic_inc(&cachep->nr_tallocated);
5eeb2d23 91 if (kmalloc_verbose)
34eee836
MWO
92 printf("Allocating %p from slab\n", p);
93 return p;
1366c37e
MW
94}
95
cc86e0c2 96void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
1366c37e
MW
97{
98 assert(objp);
99 uatomic_dec(&nr_allocated);
000a4493 100 uatomic_dec(&cachep->nr_allocated);
5eeb2d23
MW
101 if (kmalloc_verbose)
102 printf("Freeing %p to slab\n", objp);
34eee836 103 if (cachep->nr_objs > 10 || cachep->align) {
bbe9d71f
MW
104 memset(objp, POISON_FREE, cachep->size);
105 free(objp);
106 } else {
107 struct radix_tree_node *node = objp;
108 cachep->nr_objs++;
1293d5c5 109 node->parent = cachep->objs;
bbe9d71f
MW
110 cachep->objs = node;
111 }
cc86e0c2
LH
112}
113
114void kmem_cache_free(struct kmem_cache *cachep, void *objp)
115{
116 pthread_mutex_lock(&cachep->lock);
117 kmem_cache_free_locked(cachep, objp);
bbe9d71f 118 pthread_mutex_unlock(&cachep->lock);
1366c37e
MW
119}
120
cc86e0c2
LH
121void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
122{
123 if (kmalloc_verbose)
124 pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
125
126 pthread_mutex_lock(&cachep->lock);
127 for (int i = 0; i < size; i++)
128 kmem_cache_free_locked(cachep, list[i]);
129 pthread_mutex_unlock(&cachep->lock);
130}
131
120b1162
LH
132void kmem_cache_shrink(struct kmem_cache *cachep)
133{
134}
135
cc86e0c2
LH
136int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
137 void **p)
138{
139 size_t i;
140
141 if (kmalloc_verbose)
142 pr_debug("Bulk alloc %lu\n", size);
143
144 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
145 if (cachep->non_kernel < size)
146 return 0;
147
148 cachep->non_kernel -= size;
149 }
150
151 pthread_mutex_lock(&cachep->lock);
152 if (cachep->nr_objs >= size) {
153 struct radix_tree_node *node;
154
155 for (i = 0; i < size; i++) {
156 node = cachep->objs;
157 cachep->nr_objs--;
158 cachep->objs = node->parent;
159 p[i] = node;
160 node->parent = NULL;
161 }
162 pthread_mutex_unlock(&cachep->lock);
163 } else {
164 pthread_mutex_unlock(&cachep->lock);
165 for (i = 0; i < size; i++) {
166 if (cachep->align) {
167 posix_memalign(&p[i], cachep->align,
7771dcf0 168 cachep->size);
cc86e0c2 169 } else {
7771dcf0 170 p[i] = malloc(cachep->size);
cc86e0c2
LH
171 }
172 if (cachep->ctor)
173 cachep->ctor(p[i]);
174 else if (gfp & __GFP_ZERO)
175 memset(p[i], 0, cachep->size);
176 }
177 }
178
179 for (i = 0; i < size; i++) {
180 uatomic_inc(&nr_allocated);
181 uatomic_inc(&cachep->nr_allocated);
182 uatomic_inc(&cachep->nr_tallocated);
183 if (kmalloc_verbose)
184 printf("Allocating %p from slab\n", p[i]);
185 }
186
187 return size;
188}
189
1366c37e 190struct kmem_cache *
34eee836
MWO
191kmem_cache_create(const char *name, unsigned int size, unsigned int align,
192 unsigned int flags, void (*ctor)(void *))
1366c37e
MW
193{
194 struct kmem_cache *ret = malloc(sizeof(*ret));
195
bbe9d71f 196 pthread_mutex_init(&ret->lock, NULL);
1366c37e 197 ret->size = size;
34eee836 198 ret->align = align;
bbe9d71f 199 ret->nr_objs = 0;
000a4493
LH
200 ret->nr_allocated = 0;
201 ret->nr_tallocated = 0;
bbe9d71f 202 ret->objs = NULL;
1366c37e 203 ret->ctor = ctor;
e73cb368 204 ret->non_kernel = 0;
1366c37e
MW
205 return ret;
206}
cc86e0c2
LH
207
208/*
209 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
210 */
211void test_kmem_cache_bulk(void)
212{
213 int i;
214 void *list[12];
215 static struct kmem_cache *test_cache, *test_cache2;
216
217 /*
218 * Testing the bulk allocators without aligned kmem_cache to force the
219 * bulk alloc/free to reuse
220 */
221 test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
222
223 for (i = 0; i < 5; i++)
224 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
225
226 for (i = 0; i < 5; i++)
227 kmem_cache_free(test_cache, list[i]);
228 assert(test_cache->nr_objs == 5);
229
230 kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
231 kmem_cache_free_bulk(test_cache, 5, list);
232
233 for (i = 0; i < 12 ; i++)
234 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
235
236 for (i = 0; i < 12; i++)
237 kmem_cache_free(test_cache, list[i]);
238
239 /* The last free will not be kept around */
240 assert(test_cache->nr_objs == 11);
241
242 /* Aligned caches will immediately free */
243 test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
244
245 kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
246 kmem_cache_free_bulk(test_cache2, 10, list);
247 assert(!test_cache2->nr_objs);
248
249
250}