Commit | Line | Data |
---|---|---|
2238959b MB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // | |
3 | // regmap KUnit tests | |
4 | // | |
5 | // Copyright 2023 Arm Ltd | |
6 | ||
7b7982f1 | 7 | #include <kunit/device.h> |
2238959b MB |
8 | #include <kunit/test.h> |
9 | #include "internal.h" | |
10 | ||
11 | #define BLOCK_TEST_SIZE 12 | |
12 | ||
7b7982f1 RF |
13 | struct regmap_test_priv { |
14 | struct device *dev; | |
15 | }; | |
16 | ||
2f0dbb24 MB |
17 | static void get_changed_bytes(void *orig, void *new, size_t size) |
18 | { | |
19 | char *o = orig; | |
20 | char *n = new; | |
21 | int i; | |
22 | ||
23 | get_random_bytes(new, size); | |
24 | ||
25 | /* | |
26 | * This could be nicer and more efficient but we shouldn't | |
27 | * super care. | |
28 | */ | |
29 | for (i = 0; i < size; i++) | |
30 | while (n[i] == o[i]) | |
31 | get_random_bytes(&n[i], 1); | |
32 | } | |
33 | ||
2238959b MB |
34 | static const struct regmap_config test_regmap_config = { |
35 | .max_register = BLOCK_TEST_SIZE, | |
36 | .reg_stride = 1, | |
37 | .val_bits = sizeof(unsigned int) * 8, | |
38 | }; | |
39 | ||
40 | struct regcache_types { | |
41 | enum regcache_type type; | |
42 | const char *name; | |
43 | }; | |
44 | ||
45 | static void case_to_desc(const struct regcache_types *t, char *desc) | |
46 | { | |
47 | strcpy(desc, t->name); | |
48 | } | |
49 | ||
50 | static const struct regcache_types regcache_types_list[] = { | |
51 | { REGCACHE_NONE, "none" }, | |
52 | { REGCACHE_FLAT, "flat" }, | |
53 | { REGCACHE_RBTREE, "rbtree" }, | |
f033c26d | 54 | { REGCACHE_MAPLE, "maple" }, |
2238959b MB |
55 | }; |
56 | ||
57 | KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc); | |
58 | ||
59 | static const struct regcache_types real_cache_types_list[] = { | |
60 | { REGCACHE_FLAT, "flat" }, | |
61 | { REGCACHE_RBTREE, "rbtree" }, | |
f033c26d | 62 | { REGCACHE_MAPLE, "maple" }, |
2238959b MB |
63 | }; |
64 | ||
65 | KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc); | |
66 | ||
67 | static const struct regcache_types sparse_cache_types_list[] = { | |
68 | { REGCACHE_RBTREE, "rbtree" }, | |
f033c26d | 69 | { REGCACHE_MAPLE, "maple" }, |
2238959b MB |
70 | }; |
71 | ||
72 | KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc); | |
73 | ||
7b7982f1 RF |
74 | static struct regmap *gen_regmap(struct kunit *test, |
75 | struct regmap_config *config, | |
2238959b MB |
76 | struct regmap_ram_data **data) |
77 | { | |
7b7982f1 | 78 | struct regmap_test_priv *priv = test->priv; |
2238959b MB |
79 | unsigned int *buf; |
80 | struct regmap *ret; | |
81 | size_t size = (config->max_register + 1) * sizeof(unsigned int); | |
82 | int i; | |
83 | struct reg_default *defaults; | |
84 | ||
a9e26169 GR |
85 | config->disable_locking = config->cache_type == REGCACHE_RBTREE || |
86 | config->cache_type == REGCACHE_MAPLE; | |
87 | ||
2238959b MB |
88 | buf = kmalloc(size, GFP_KERNEL); |
89 | if (!buf) | |
90 | return ERR_PTR(-ENOMEM); | |
91 | ||
92 | get_random_bytes(buf, size); | |
93 | ||
94 | *data = kzalloc(sizeof(**data), GFP_KERNEL); | |
95 | if (!(*data)) | |
96 | return ERR_PTR(-ENOMEM); | |
97 | (*data)->vals = buf; | |
98 | ||
99 | if (config->num_reg_defaults) { | |
100 | defaults = kcalloc(config->num_reg_defaults, | |
101 | sizeof(struct reg_default), | |
102 | GFP_KERNEL); | |
103 | if (!defaults) | |
104 | return ERR_PTR(-ENOMEM); | |
105 | config->reg_defaults = defaults; | |
106 | ||
107 | for (i = 0; i < config->num_reg_defaults; i++) { | |
108 | defaults[i].reg = i * config->reg_stride; | |
109 | defaults[i].def = buf[i * config->reg_stride]; | |
110 | } | |
111 | } | |
112 | ||
7b7982f1 | 113 | ret = regmap_init_ram(priv->dev, config, *data); |
2238959b MB |
114 | if (IS_ERR(ret)) { |
115 | kfree(buf); | |
116 | kfree(*data); | |
117 | } | |
118 | ||
119 | return ret; | |
120 | } | |
121 | ||
18003306 MB |
122 | static bool reg_5_false(struct device *context, unsigned int reg) |
123 | { | |
124 | return reg != 5; | |
125 | } | |
126 | ||
2238959b MB |
127 | static void basic_read_write(struct kunit *test) |
128 | { | |
129 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
130 | struct regmap *map; | |
131 | struct regmap_config config; | |
132 | struct regmap_ram_data *data; | |
133 | unsigned int val, rval; | |
134 | ||
135 | config = test_regmap_config; | |
136 | config.cache_type = t->type; | |
137 | ||
7b7982f1 | 138 | map = gen_regmap(test, &config, &data); |
2238959b MB |
139 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
140 | if (IS_ERR(map)) | |
141 | return; | |
142 | ||
143 | get_random_bytes(&val, sizeof(val)); | |
144 | ||
145 | /* If we write a value to a register we can read it back */ | |
146 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); | |
147 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); | |
148 | KUNIT_EXPECT_EQ(test, val, rval); | |
149 | ||
150 | /* If using a cache the cache satisfied the read */ | |
151 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]); | |
152 | ||
153 | regmap_exit(map); | |
154 | } | |
155 | ||
156 | static void bulk_write(struct kunit *test) | |
157 | { | |
158 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
159 | struct regmap *map; | |
160 | struct regmap_config config; | |
161 | struct regmap_ram_data *data; | |
162 | unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; | |
163 | int i; | |
164 | ||
165 | config = test_regmap_config; | |
166 | config.cache_type = t->type; | |
167 | ||
7b7982f1 | 168 | map = gen_regmap(test, &config, &data); |
2238959b MB |
169 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
170 | if (IS_ERR(map)) | |
171 | return; | |
172 | ||
173 | get_random_bytes(&val, sizeof(val)); | |
174 | ||
175 | /* | |
176 | * Data written via the bulk API can be read back with single | |
177 | * reads. | |
178 | */ | |
179 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, | |
180 | BLOCK_TEST_SIZE)); | |
181 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
182 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); | |
183 | ||
184 | KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); | |
185 | ||
186 | /* If using a cache the cache satisfied the read */ | |
187 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
188 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); | |
189 | ||
190 | regmap_exit(map); | |
191 | } | |
192 | ||
193 | static void bulk_read(struct kunit *test) | |
194 | { | |
195 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
196 | struct regmap *map; | |
197 | struct regmap_config config; | |
198 | struct regmap_ram_data *data; | |
199 | unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; | |
200 | int i; | |
201 | ||
202 | config = test_regmap_config; | |
203 | config.cache_type = t->type; | |
204 | ||
7b7982f1 | 205 | map = gen_regmap(test, &config, &data); |
2238959b MB |
206 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
207 | if (IS_ERR(map)) | |
208 | return; | |
209 | ||
210 | get_random_bytes(&val, sizeof(val)); | |
211 | ||
212 | /* Data written as single writes can be read via the bulk API */ | |
213 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
214 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); | |
215 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
216 | BLOCK_TEST_SIZE)); | |
217 | KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); | |
218 | ||
219 | /* If using a cache the cache satisfied the read */ | |
220 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
221 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); | |
222 | ||
223 | regmap_exit(map); | |
224 | } | |
225 | ||
18003306 MB |
226 | static void write_readonly(struct kunit *test) |
227 | { | |
228 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
229 | struct regmap *map; | |
230 | struct regmap_config config; | |
231 | struct regmap_ram_data *data; | |
232 | unsigned int val; | |
233 | int i; | |
234 | ||
235 | config = test_regmap_config; | |
236 | config.cache_type = t->type; | |
237 | config.num_reg_defaults = BLOCK_TEST_SIZE; | |
238 | config.writeable_reg = reg_5_false; | |
239 | ||
7b7982f1 | 240 | map = gen_regmap(test, &config, &data); |
18003306 MB |
241 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
242 | if (IS_ERR(map)) | |
243 | return; | |
244 | ||
245 | get_random_bytes(&val, sizeof(val)); | |
246 | ||
247 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
248 | data->written[i] = false; | |
249 | ||
250 | /* Change the value of all registers, readonly should fail */ | |
251 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
252 | KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); | |
253 | ||
254 | /* Did that match what we see on the device? */ | |
255 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
256 | KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); | |
257 | ||
258 | regmap_exit(map); | |
259 | } | |
260 | ||
a07bff40 MB |
261 | static void read_writeonly(struct kunit *test) |
262 | { | |
263 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
264 | struct regmap *map; | |
265 | struct regmap_config config; | |
266 | struct regmap_ram_data *data; | |
267 | unsigned int val; | |
268 | int i; | |
269 | ||
270 | config = test_regmap_config; | |
271 | config.cache_type = t->type; | |
272 | config.readable_reg = reg_5_false; | |
273 | ||
7b7982f1 | 274 | map = gen_regmap(test, &config, &data); |
a07bff40 MB |
275 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
276 | if (IS_ERR(map)) | |
277 | return; | |
278 | ||
279 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
280 | data->read[i] = false; | |
281 | ||
d0c99ffe MB |
282 | /* |
283 | * Try to read all the registers, the writeonly one should | |
284 | * fail if we aren't using the flat cache. | |
285 | */ | |
286 | for (i = 0; i < BLOCK_TEST_SIZE; i++) { | |
287 | if (t->type != REGCACHE_FLAT) { | |
288 | KUNIT_EXPECT_EQ(test, i != 5, | |
289 | regmap_read(map, i, &val) == 0); | |
290 | } else { | |
291 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); | |
292 | } | |
293 | } | |
a07bff40 MB |
294 | |
295 | /* Did we trigger a hardware access? */ | |
296 | KUNIT_EXPECT_FALSE(test, data->read[5]); | |
297 | ||
298 | regmap_exit(map); | |
299 | } | |
300 | ||
2238959b MB |
301 | static void reg_defaults(struct kunit *test) |
302 | { | |
303 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
304 | struct regmap *map; | |
305 | struct regmap_config config; | |
306 | struct regmap_ram_data *data; | |
307 | unsigned int rval[BLOCK_TEST_SIZE]; | |
308 | int i; | |
309 | ||
310 | config = test_regmap_config; | |
311 | config.cache_type = t->type; | |
312 | config.num_reg_defaults = BLOCK_TEST_SIZE; | |
313 | ||
7b7982f1 | 314 | map = gen_regmap(test, &config, &data); |
2238959b MB |
315 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
316 | if (IS_ERR(map)) | |
317 | return; | |
318 | ||
319 | /* Read back the expected default data */ | |
320 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
321 | BLOCK_TEST_SIZE)); | |
322 | KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); | |
323 | ||
324 | /* The data should have been read from cache if there was one */ | |
325 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
326 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); | |
327 | } | |
328 | ||
329 | static void reg_defaults_read_dev(struct kunit *test) | |
330 | { | |
331 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
332 | struct regmap *map; | |
333 | struct regmap_config config; | |
334 | struct regmap_ram_data *data; | |
335 | unsigned int rval[BLOCK_TEST_SIZE]; | |
336 | int i; | |
337 | ||
338 | config = test_regmap_config; | |
339 | config.cache_type = t->type; | |
340 | config.num_reg_defaults_raw = BLOCK_TEST_SIZE; | |
341 | ||
7b7982f1 | 342 | map = gen_regmap(test, &config, &data); |
2238959b MB |
343 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
344 | if (IS_ERR(map)) | |
345 | return; | |
346 | ||
347 | /* We should have read the cache defaults back from the map */ | |
348 | for (i = 0; i < BLOCK_TEST_SIZE; i++) { | |
349 | KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]); | |
350 | data->read[i] = false; | |
351 | } | |
352 | ||
353 | /* Read back the expected default data */ | |
354 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
355 | BLOCK_TEST_SIZE)); | |
356 | KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); | |
357 | ||
358 | /* The data should have been read from cache if there was one */ | |
359 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
360 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); | |
361 | } | |
362 | ||
363 | static void register_patch(struct kunit *test) | |
364 | { | |
365 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
366 | struct regmap *map; | |
367 | struct regmap_config config; | |
368 | struct regmap_ram_data *data; | |
369 | struct reg_sequence patch[2]; | |
370 | unsigned int rval[BLOCK_TEST_SIZE]; | |
371 | int i; | |
372 | ||
373 | /* We need defaults so readback works */ | |
374 | config = test_regmap_config; | |
375 | config.cache_type = t->type; | |
376 | config.num_reg_defaults = BLOCK_TEST_SIZE; | |
377 | ||
7b7982f1 | 378 | map = gen_regmap(test, &config, &data); |
2238959b MB |
379 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
380 | if (IS_ERR(map)) | |
381 | return; | |
382 | ||
383 | /* Stash the original values */ | |
384 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
385 | BLOCK_TEST_SIZE)); | |
386 | ||
387 | /* Patch a couple of values */ | |
388 | patch[0].reg = 2; | |
389 | patch[0].def = rval[2] + 1; | |
390 | patch[0].delay_us = 0; | |
391 | patch[1].reg = 5; | |
392 | patch[1].def = rval[5] + 1; | |
393 | patch[1].delay_us = 0; | |
394 | KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, | |
395 | ARRAY_SIZE(patch))); | |
396 | ||
397 | /* Only the patched registers are written */ | |
398 | for (i = 0; i < BLOCK_TEST_SIZE; i++) { | |
399 | switch (i) { | |
400 | case 2: | |
401 | case 5: | |
402 | KUNIT_EXPECT_TRUE(test, data->written[i]); | |
403 | KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); | |
404 | break; | |
405 | default: | |
406 | KUNIT_EXPECT_FALSE(test, data->written[i]); | |
407 | KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); | |
408 | break; | |
409 | } | |
410 | } | |
411 | ||
412 | regmap_exit(map); | |
413 | } | |
414 | ||
415 | static void stride(struct kunit *test) | |
416 | { | |
417 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
418 | struct regmap *map; | |
419 | struct regmap_config config; | |
420 | struct regmap_ram_data *data; | |
421 | unsigned int rval; | |
422 | int i; | |
423 | ||
424 | config = test_regmap_config; | |
425 | config.cache_type = t->type; | |
426 | config.reg_stride = 2; | |
427 | config.num_reg_defaults = BLOCK_TEST_SIZE / 2; | |
428 | ||
7b7982f1 | 429 | map = gen_regmap(test, &config, &data); |
2238959b MB |
430 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
431 | if (IS_ERR(map)) | |
432 | return; | |
433 | ||
434 | /* Only even registers can be accessed, try both read and write */ | |
435 | for (i = 0; i < BLOCK_TEST_SIZE; i++) { | |
436 | data->read[i] = false; | |
437 | data->written[i] = false; | |
438 | ||
439 | if (i % 2) { | |
440 | KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval)); | |
441 | KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval)); | |
442 | KUNIT_EXPECT_FALSE(test, data->read[i]); | |
443 | KUNIT_EXPECT_FALSE(test, data->written[i]); | |
444 | } else { | |
445 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); | |
446 | KUNIT_EXPECT_EQ(test, data->vals[i], rval); | |
447 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, | |
448 | data->read[i]); | |
449 | ||
450 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); | |
451 | KUNIT_EXPECT_TRUE(test, data->written[i]); | |
452 | } | |
453 | } | |
454 | ||
455 | regmap_exit(map); | |
456 | } | |
457 | ||
458 | static struct regmap_range_cfg test_range = { | |
459 | .selector_reg = 1, | |
460 | .selector_mask = 0xff, | |
461 | ||
462 | .window_start = 4, | |
463 | .window_len = 10, | |
464 | ||
465 | .range_min = 20, | |
466 | .range_max = 40, | |
467 | }; | |
468 | ||
6a2e332c | 469 | static bool test_range_window_volatile(struct device *dev, unsigned int reg) |
2238959b MB |
470 | { |
471 | if (reg >= test_range.window_start && | |
fabe32cc | 472 | reg <= test_range.window_start + test_range.window_len) |
2238959b MB |
473 | return true; |
474 | ||
6a2e332c MB |
475 | return false; |
476 | } | |
477 | ||
478 | static bool test_range_all_volatile(struct device *dev, unsigned int reg) | |
479 | { | |
480 | if (test_range_window_volatile(dev, reg)) | |
481 | return true; | |
482 | ||
2238959b MB |
483 | if (reg >= test_range.range_min && reg <= test_range.range_max) |
484 | return true; | |
485 | ||
486 | return false; | |
487 | } | |
488 | ||
489 | static void basic_ranges(struct kunit *test) | |
490 | { | |
491 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
492 | struct regmap *map; | |
493 | struct regmap_config config; | |
494 | struct regmap_ram_data *data; | |
495 | unsigned int val; | |
496 | int i; | |
497 | ||
498 | config = test_regmap_config; | |
499 | config.cache_type = t->type; | |
6a2e332c | 500 | config.volatile_reg = test_range_all_volatile; |
2238959b MB |
501 | config.ranges = &test_range; |
502 | config.num_ranges = 1; | |
503 | config.max_register = test_range.range_max; | |
504 | ||
7b7982f1 | 505 | map = gen_regmap(test, &config, &data); |
2238959b MB |
506 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
507 | if (IS_ERR(map)) | |
508 | return; | |
509 | ||
510 | for (i = test_range.range_min; i < test_range.range_max; i++) { | |
511 | data->read[i] = false; | |
512 | data->written[i] = false; | |
513 | } | |
514 | ||
515 | /* Reset the page to a non-zero value to trigger a change */ | |
516 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, | |
517 | test_range.range_max)); | |
518 | ||
519 | /* Check we set the page and use the window for writes */ | |
520 | data->written[test_range.selector_reg] = false; | |
521 | data->written[test_range.window_start] = false; | |
522 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); | |
523 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
524 | KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); | |
525 | ||
526 | data->written[test_range.selector_reg] = false; | |
527 | data->written[test_range.window_start] = false; | |
528 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, | |
529 | test_range.range_min + | |
530 | test_range.window_len, | |
531 | 0)); | |
532 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
533 | KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); | |
534 | ||
535 | /* Same for reads */ | |
536 | data->written[test_range.selector_reg] = false; | |
537 | data->read[test_range.window_start] = false; | |
538 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); | |
539 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
540 | KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); | |
541 | ||
542 | data->written[test_range.selector_reg] = false; | |
543 | data->read[test_range.window_start] = false; | |
544 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, | |
545 | test_range.range_min + | |
546 | test_range.window_len, | |
547 | &val)); | |
548 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
549 | KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); | |
550 | ||
551 | /* No physical access triggered in the virtual range */ | |
552 | for (i = test_range.range_min; i < test_range.range_max; i++) { | |
553 | KUNIT_EXPECT_FALSE(test, data->read[i]); | |
554 | KUNIT_EXPECT_FALSE(test, data->written[i]); | |
555 | } | |
556 | ||
557 | regmap_exit(map); | |
558 | } | |
559 | ||
560 | /* Try to stress dynamic creation of cache data structures */ | |
561 | static void stress_insert(struct kunit *test) | |
562 | { | |
563 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
564 | struct regmap *map; | |
565 | struct regmap_config config; | |
566 | struct regmap_ram_data *data; | |
567 | unsigned int rval, *vals; | |
568 | size_t buf_sz; | |
569 | int i; | |
570 | ||
571 | config = test_regmap_config; | |
572 | config.cache_type = t->type; | |
573 | config.max_register = 300; | |
574 | ||
7b7982f1 | 575 | map = gen_regmap(test, &config, &data); |
2238959b MB |
576 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
577 | if (IS_ERR(map)) | |
578 | return; | |
579 | ||
580 | vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register, | |
581 | GFP_KERNEL); | |
582 | KUNIT_ASSERT_FALSE(test, vals == NULL); | |
583 | buf_sz = sizeof(unsigned long) * config.max_register; | |
584 | ||
585 | get_random_bytes(vals, buf_sz); | |
586 | ||
587 | /* Write data into the map/cache in ever decreasing strides */ | |
588 | for (i = 0; i < config.max_register; i += 100) | |
589 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
590 | for (i = 0; i < config.max_register; i += 50) | |
591 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
592 | for (i = 0; i < config.max_register; i += 25) | |
593 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
594 | for (i = 0; i < config.max_register; i += 10) | |
595 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
596 | for (i = 0; i < config.max_register; i += 5) | |
597 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
598 | for (i = 0; i < config.max_register; i += 3) | |
599 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
600 | for (i = 0; i < config.max_register; i += 2) | |
601 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
602 | for (i = 0; i < config.max_register; i++) | |
603 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); | |
604 | ||
605 | /* Do reads from the cache (if there is one) match? */ | |
606 | for (i = 0; i < config.max_register; i ++) { | |
607 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); | |
608 | KUNIT_EXPECT_EQ(test, rval, vals[i]); | |
609 | KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); | |
610 | } | |
611 | ||
612 | regmap_exit(map); | |
613 | } | |
614 | ||
615 | static void cache_bypass(struct kunit *test) | |
616 | { | |
617 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
618 | struct regmap *map; | |
619 | struct regmap_config config; | |
620 | struct regmap_ram_data *data; | |
621 | unsigned int val, rval; | |
622 | ||
623 | config = test_regmap_config; | |
624 | config.cache_type = t->type; | |
625 | ||
7b7982f1 | 626 | map = gen_regmap(test, &config, &data); |
2238959b MB |
627 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
628 | if (IS_ERR(map)) | |
629 | return; | |
630 | ||
631 | get_random_bytes(&val, sizeof(val)); | |
632 | ||
633 | /* Ensure the cache has a value in it */ | |
634 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); | |
635 | ||
636 | /* Bypass then write a different value */ | |
637 | regcache_cache_bypass(map, true); | |
638 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1)); | |
639 | ||
640 | /* Read the bypassed value */ | |
641 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); | |
642 | KUNIT_EXPECT_EQ(test, val + 1, rval); | |
643 | KUNIT_EXPECT_EQ(test, data->vals[0], rval); | |
644 | ||
645 | /* Disable bypass, the cache should still return the original value */ | |
646 | regcache_cache_bypass(map, false); | |
647 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); | |
648 | KUNIT_EXPECT_EQ(test, val, rval); | |
649 | ||
650 | regmap_exit(map); | |
651 | } | |
652 | ||
653 | static void cache_sync(struct kunit *test) | |
654 | { | |
655 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
656 | struct regmap *map; | |
657 | struct regmap_config config; | |
658 | struct regmap_ram_data *data; | |
659 | unsigned int val[BLOCK_TEST_SIZE]; | |
660 | int i; | |
661 | ||
662 | config = test_regmap_config; | |
663 | config.cache_type = t->type; | |
664 | ||
7b7982f1 | 665 | map = gen_regmap(test, &config, &data); |
2238959b MB |
666 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
667 | if (IS_ERR(map)) | |
668 | return; | |
669 | ||
670 | get_random_bytes(&val, sizeof(val)); | |
671 | ||
672 | /* Put some data into the cache */ | |
673 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, | |
674 | BLOCK_TEST_SIZE)); | |
675 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
676 | data->written[i] = false; | |
677 | ||
678 | /* Trash the data on the device itself then resync */ | |
679 | regcache_mark_dirty(map); | |
680 | memset(data->vals, 0, sizeof(val)); | |
681 | KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); | |
682 | ||
683 | /* Did we just write the correct data out? */ | |
684 | KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val)); | |
685 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
686 | KUNIT_EXPECT_EQ(test, true, data->written[i]); | |
687 | ||
688 | regmap_exit(map); | |
689 | } | |
690 | ||
691 | static void cache_sync_defaults(struct kunit *test) | |
692 | { | |
693 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
694 | struct regmap *map; | |
695 | struct regmap_config config; | |
696 | struct regmap_ram_data *data; | |
697 | unsigned int val; | |
698 | int i; | |
699 | ||
700 | config = test_regmap_config; | |
701 | config.cache_type = t->type; | |
702 | config.num_reg_defaults = BLOCK_TEST_SIZE; | |
703 | ||
7b7982f1 | 704 | map = gen_regmap(test, &config, &data); |
2238959b MB |
705 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
706 | if (IS_ERR(map)) | |
707 | return; | |
708 | ||
709 | get_random_bytes(&val, sizeof(val)); | |
710 | ||
711 | /* Change the value of one register */ | |
712 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val)); | |
713 | ||
714 | /* Resync */ | |
715 | regcache_mark_dirty(map); | |
716 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
717 | data->written[i] = false; | |
718 | KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); | |
719 | ||
720 | /* Did we just sync the one register we touched? */ | |
721 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
722 | KUNIT_EXPECT_EQ(test, i == 2, data->written[i]); | |
723 | ||
724 | regmap_exit(map); | |
725 | } | |
726 | ||
357a1ebd MB |
727 | static void cache_sync_readonly(struct kunit *test) |
728 | { | |
729 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
730 | struct regmap *map; | |
731 | struct regmap_config config; | |
732 | struct regmap_ram_data *data; | |
733 | unsigned int val; | |
734 | int i; | |
735 | ||
736 | config = test_regmap_config; | |
737 | config.cache_type = t->type; | |
738 | config.writeable_reg = reg_5_false; | |
739 | ||
7b7982f1 | 740 | map = gen_regmap(test, &config, &data); |
357a1ebd MB |
741 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
742 | if (IS_ERR(map)) | |
743 | return; | |
744 | ||
745 | /* Read all registers to fill the cache */ | |
746 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
747 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); | |
748 | ||
749 | /* Change the value of all registers, readonly should fail */ | |
750 | get_random_bytes(&val, sizeof(val)); | |
751 | regcache_cache_only(map, true); | |
752 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
753 | KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); | |
754 | regcache_cache_only(map, false); | |
755 | ||
756 | /* Resync */ | |
757 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
758 | data->written[i] = false; | |
759 | KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); | |
760 | ||
761 | /* Did that match what we see on the device? */ | |
762 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
763 | KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); | |
764 | ||
765 | regmap_exit(map); | |
766 | } | |
767 | ||
2238959b MB |
768 | static void cache_sync_patch(struct kunit *test) |
769 | { | |
770 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
771 | struct regmap *map; | |
772 | struct regmap_config config; | |
773 | struct regmap_ram_data *data; | |
774 | struct reg_sequence patch[2]; | |
775 | unsigned int rval[BLOCK_TEST_SIZE], val; | |
776 | int i; | |
777 | ||
778 | /* We need defaults so readback works */ | |
779 | config = test_regmap_config; | |
780 | config.cache_type = t->type; | |
781 | config.num_reg_defaults = BLOCK_TEST_SIZE; | |
782 | ||
7b7982f1 | 783 | map = gen_regmap(test, &config, &data); |
2238959b MB |
784 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
785 | if (IS_ERR(map)) | |
786 | return; | |
787 | ||
788 | /* Stash the original values */ | |
789 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
790 | BLOCK_TEST_SIZE)); | |
791 | ||
792 | /* Patch a couple of values */ | |
793 | patch[0].reg = 2; | |
794 | patch[0].def = rval[2] + 1; | |
795 | patch[0].delay_us = 0; | |
796 | patch[1].reg = 5; | |
797 | patch[1].def = rval[5] + 1; | |
798 | patch[1].delay_us = 0; | |
799 | KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, | |
800 | ARRAY_SIZE(patch))); | |
801 | ||
802 | /* Sync the cache */ | |
803 | regcache_mark_dirty(map); | |
804 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
805 | data->written[i] = false; | |
806 | KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); | |
807 | ||
808 | /* The patch should be on the device but not in the cache */ | |
809 | for (i = 0; i < BLOCK_TEST_SIZE; i++) { | |
810 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); | |
811 | KUNIT_EXPECT_EQ(test, val, rval[i]); | |
812 | ||
813 | switch (i) { | |
814 | case 2: | |
815 | case 5: | |
816 | KUNIT_EXPECT_EQ(test, true, data->written[i]); | |
817 | KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); | |
818 | break; | |
819 | default: | |
820 | KUNIT_EXPECT_EQ(test, false, data->written[i]); | |
821 | KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); | |
822 | break; | |
823 | } | |
824 | } | |
825 | ||
826 | regmap_exit(map); | |
827 | } | |
828 | ||
829 | static void cache_drop(struct kunit *test) | |
830 | { | |
831 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
832 | struct regmap *map; | |
833 | struct regmap_config config; | |
834 | struct regmap_ram_data *data; | |
835 | unsigned int rval[BLOCK_TEST_SIZE]; | |
836 | int i; | |
837 | ||
838 | config = test_regmap_config; | |
839 | config.cache_type = t->type; | |
840 | config.num_reg_defaults = BLOCK_TEST_SIZE; | |
841 | ||
7b7982f1 | 842 | map = gen_regmap(test, &config, &data); |
2238959b MB |
843 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
844 | if (IS_ERR(map)) | |
845 | return; | |
846 | ||
847 | /* Ensure the data is read from the cache */ | |
848 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
849 | data->read[i] = false; | |
850 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
851 | BLOCK_TEST_SIZE)); | |
852 | for (i = 0; i < BLOCK_TEST_SIZE; i++) { | |
853 | KUNIT_EXPECT_FALSE(test, data->read[i]); | |
854 | data->read[i] = false; | |
855 | } | |
856 | KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); | |
857 | ||
858 | /* Drop some registers */ | |
859 | KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5)); | |
860 | ||
861 | /* Reread and check only the dropped registers hit the device. */ | |
862 | KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, | |
863 | BLOCK_TEST_SIZE)); | |
864 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
865 | KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5); | |
866 | KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); | |
867 | ||
868 | regmap_exit(map); | |
869 | } | |
870 | ||
d881ee5a MB |
871 | static void cache_present(struct kunit *test) |
872 | { | |
873 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
874 | struct regmap *map; | |
875 | struct regmap_config config; | |
876 | struct regmap_ram_data *data; | |
877 | unsigned int val; | |
878 | int i; | |
879 | ||
880 | config = test_regmap_config; | |
881 | config.cache_type = t->type; | |
882 | ||
7b7982f1 | 883 | map = gen_regmap(test, &config, &data); |
d881ee5a MB |
884 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
885 | if (IS_ERR(map)) | |
886 | return; | |
887 | ||
888 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
889 | data->read[i] = false; | |
890 | ||
891 | /* No defaults so no registers cached. */ | |
892 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
893 | KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i)); | |
894 | ||
895 | /* We didn't trigger any reads */ | |
896 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
897 | KUNIT_ASSERT_FALSE(test, data->read[i]); | |
898 | ||
899 | /* Fill the cache */ | |
900 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
901 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); | |
902 | ||
903 | /* Now everything should be cached */ | |
904 | for (i = 0; i < BLOCK_TEST_SIZE; i++) | |
905 | KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i)); | |
906 | ||
907 | regmap_exit(map); | |
908 | } | |
909 | ||
6a2e332c MB |
910 | /* Check that caching the window register works with sync */ |
911 | static void cache_range_window_reg(struct kunit *test) | |
912 | { | |
913 | struct regcache_types *t = (struct regcache_types *)test->param_value; | |
914 | struct regmap *map; | |
915 | struct regmap_config config; | |
916 | struct regmap_ram_data *data; | |
917 | unsigned int val; | |
918 | int i; | |
919 | ||
920 | config = test_regmap_config; | |
921 | config.cache_type = t->type; | |
922 | config.volatile_reg = test_range_window_volatile; | |
923 | config.ranges = &test_range; | |
924 | config.num_ranges = 1; | |
925 | config.max_register = test_range.range_max; | |
926 | ||
7b7982f1 | 927 | map = gen_regmap(test, &config, &data); |
6a2e332c MB |
928 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
929 | if (IS_ERR(map)) | |
930 | return; | |
931 | ||
932 | /* Write new values to the entire range */ | |
933 | for (i = test_range.range_min; i <= test_range.range_max; i++) | |
934 | KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0)); | |
935 | ||
936 | val = data->vals[test_range.selector_reg] & test_range.selector_mask; | |
937 | KUNIT_ASSERT_EQ(test, val, 2); | |
938 | ||
939 | /* Write to the first register in the range to reset the page */ | |
940 | KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); | |
941 | val = data->vals[test_range.selector_reg] & test_range.selector_mask; | |
942 | KUNIT_ASSERT_EQ(test, val, 0); | |
943 | ||
944 | /* Trigger a cache sync */ | |
945 | regcache_mark_dirty(map); | |
946 | KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); | |
947 | ||
948 | /* Write to the first register again, the page should be reset */ | |
949 | KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); | |
950 | val = data->vals[test_range.selector_reg] & test_range.selector_mask; | |
951 | KUNIT_ASSERT_EQ(test, val, 0); | |
952 | ||
953 | /* Trigger another cache sync */ | |
954 | regcache_mark_dirty(map); | |
955 | KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); | |
956 | ||
957 | /* Write to the last register again, the page should be reset */ | |
958 | KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0)); | |
959 | val = data->vals[test_range.selector_reg] & test_range.selector_mask; | |
960 | KUNIT_ASSERT_EQ(test, val, 2); | |
961 | } | |
962 | ||
155a6bd6 MB |
963 | struct raw_test_types { |
964 | const char *name; | |
965 | ||
966 | enum regcache_type cache_type; | |
967 | enum regmap_endian val_endian; | |
968 | }; | |
969 | ||
970 | static void raw_to_desc(const struct raw_test_types *t, char *desc) | |
971 | { | |
972 | strcpy(desc, t->name); | |
973 | } | |
974 | ||
975 | static const struct raw_test_types raw_types_list[] = { | |
976 | { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE }, | |
977 | { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG }, | |
978 | { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE }, | |
979 | { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG }, | |
980 | { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE }, | |
981 | { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG }, | |
982 | { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE }, | |
983 | { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG }, | |
984 | }; | |
985 | ||
986 | KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc); | |
987 | ||
988 | static const struct raw_test_types raw_cache_types_list[] = { | |
989 | { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE }, | |
990 | { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG }, | |
991 | { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE }, | |
992 | { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG }, | |
993 | { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE }, | |
994 | { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG }, | |
995 | }; | |
996 | ||
997 | KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc); | |
998 | ||
999 | static const struct regmap_config raw_regmap_config = { | |
1000 | .max_register = BLOCK_TEST_SIZE, | |
1001 | ||
1002 | .reg_format_endian = REGMAP_ENDIAN_LITTLE, | |
1003 | .reg_bits = 16, | |
1004 | .val_bits = 16, | |
1005 | }; | |
1006 | ||
7b7982f1 RF |
1007 | static struct regmap *gen_raw_regmap(struct kunit *test, |
1008 | struct regmap_config *config, | |
155a6bd6 MB |
1009 | struct raw_test_types *test_type, |
1010 | struct regmap_ram_data **data) | |
1011 | { | |
7b7982f1 | 1012 | struct regmap_test_priv *priv = test->priv; |
155a6bd6 MB |
1013 | u16 *buf; |
1014 | struct regmap *ret; | |
1015 | size_t size = (config->max_register + 1) * config->reg_bits / 8; | |
1016 | int i; | |
1017 | struct reg_default *defaults; | |
1018 | ||
1019 | config->cache_type = test_type->cache_type; | |
1020 | config->val_format_endian = test_type->val_endian; | |
a9e26169 GR |
1021 | config->disable_locking = config->cache_type == REGCACHE_RBTREE || |
1022 | config->cache_type == REGCACHE_MAPLE; | |
155a6bd6 MB |
1023 | |
1024 | buf = kmalloc(size, GFP_KERNEL); | |
1025 | if (!buf) | |
1026 | return ERR_PTR(-ENOMEM); | |
1027 | ||
1028 | get_random_bytes(buf, size); | |
1029 | ||
1030 | *data = kzalloc(sizeof(**data), GFP_KERNEL); | |
1031 | if (!(*data)) | |
1032 | return ERR_PTR(-ENOMEM); | |
1033 | (*data)->vals = (void *)buf; | |
1034 | ||
1035 | config->num_reg_defaults = config->max_register + 1; | |
1036 | defaults = kcalloc(config->num_reg_defaults, | |
1037 | sizeof(struct reg_default), | |
1038 | GFP_KERNEL); | |
1039 | if (!defaults) | |
1040 | return ERR_PTR(-ENOMEM); | |
1041 | config->reg_defaults = defaults; | |
1042 | ||
1043 | for (i = 0; i < config->num_reg_defaults; i++) { | |
1044 | defaults[i].reg = i; | |
1045 | switch (test_type->val_endian) { | |
1046 | case REGMAP_ENDIAN_LITTLE: | |
1047 | defaults[i].def = le16_to_cpu(buf[i]); | |
1048 | break; | |
1049 | case REGMAP_ENDIAN_BIG: | |
1050 | defaults[i].def = be16_to_cpu(buf[i]); | |
1051 | break; | |
1052 | default: | |
1053 | return ERR_PTR(-EINVAL); | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * We use the defaults in the tests but they don't make sense | |
1059 | * to the core if there's no cache. | |
1060 | */ | |
1061 | if (config->cache_type == REGCACHE_NONE) | |
1062 | config->num_reg_defaults = 0; | |
1063 | ||
7b7982f1 | 1064 | ret = regmap_init_raw_ram(priv->dev, config, *data); |
155a6bd6 MB |
1065 | if (IS_ERR(ret)) { |
1066 | kfree(buf); | |
1067 | kfree(*data); | |
1068 | } | |
1069 | ||
1070 | return ret; | |
1071 | } | |
1072 | ||
1073 | static void raw_read_defaults_single(struct kunit *test) | |
1074 | { | |
1075 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1076 | struct regmap *map; | |
1077 | struct regmap_config config; | |
1078 | struct regmap_ram_data *data; | |
1079 | unsigned int rval; | |
1080 | int i; | |
1081 | ||
1082 | config = raw_regmap_config; | |
1083 | ||
7b7982f1 | 1084 | map = gen_raw_regmap(test, &config, t, &data); |
155a6bd6 MB |
1085 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1086 | if (IS_ERR(map)) | |
1087 | return; | |
1088 | ||
1089 | /* Check that we can read the defaults via the API */ | |
1090 | for (i = 0; i < config.max_register + 1; i++) { | |
1091 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); | |
1092 | KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); | |
1093 | } | |
1094 | ||
1095 | regmap_exit(map); | |
1096 | } | |
1097 | ||
1098 | static void raw_read_defaults(struct kunit *test) | |
1099 | { | |
1100 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1101 | struct regmap *map; | |
1102 | struct regmap_config config; | |
1103 | struct regmap_ram_data *data; | |
1104 | u16 *rval; | |
1105 | u16 def; | |
1106 | size_t val_len; | |
1107 | int i; | |
1108 | ||
1109 | config = raw_regmap_config; | |
1110 | ||
7b7982f1 | 1111 | map = gen_raw_regmap(test, &config, t, &data); |
155a6bd6 MB |
1112 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1113 | if (IS_ERR(map)) | |
1114 | return; | |
1115 | ||
1116 | val_len = sizeof(*rval) * (config.max_register + 1); | |
1117 | rval = kmalloc(val_len, GFP_KERNEL); | |
1118 | KUNIT_ASSERT_TRUE(test, rval != NULL); | |
1119 | if (!rval) | |
1120 | return; | |
7b7982f1 | 1121 | |
155a6bd6 MB |
1122 | /* Check that we can read the defaults via the API */ |
1123 | KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); | |
1124 | for (i = 0; i < config.max_register + 1; i++) { | |
1125 | def = config.reg_defaults[i].def; | |
1126 | if (config.val_format_endian == REGMAP_ENDIAN_BIG) { | |
866f7021 | 1127 | KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i])); |
155a6bd6 | 1128 | } else { |
866f7021 | 1129 | KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i])); |
155a6bd6 MB |
1130 | } |
1131 | } | |
866f7021 | 1132 | |
155a6bd6 MB |
1133 | kfree(rval); |
1134 | regmap_exit(map); | |
1135 | } | |
1136 | ||
1137 | static void raw_write_read_single(struct kunit *test) | |
1138 | { | |
1139 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1140 | struct regmap *map; | |
1141 | struct regmap_config config; | |
1142 | struct regmap_ram_data *data; | |
1143 | u16 val; | |
1144 | unsigned int rval; | |
1145 | ||
1146 | config = raw_regmap_config; | |
1147 | ||
7b7982f1 | 1148 | map = gen_raw_regmap(test, &config, t, &data); |
155a6bd6 MB |
1149 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1150 | if (IS_ERR(map)) | |
1151 | return; | |
1152 | ||
1153 | get_random_bytes(&val, sizeof(val)); | |
1154 | ||
1155 | /* If we write a value to a register we can read it back */ | |
1156 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); | |
1157 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); | |
1158 | KUNIT_EXPECT_EQ(test, val, rval); | |
1159 | ||
1160 | regmap_exit(map); | |
1161 | } | |
1162 | ||
1163 | static void raw_write(struct kunit *test) | |
1164 | { | |
1165 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1166 | struct regmap *map; | |
1167 | struct regmap_config config; | |
1168 | struct regmap_ram_data *data; | |
1169 | u16 *hw_buf; | |
1170 | u16 val[2]; | |
1171 | unsigned int rval; | |
1172 | int i; | |
1173 | ||
1174 | config = raw_regmap_config; | |
1175 | ||
7b7982f1 | 1176 | map = gen_raw_regmap(test, &config, t, &data); |
155a6bd6 MB |
1177 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1178 | if (IS_ERR(map)) | |
1179 | return; | |
1180 | ||
1181 | hw_buf = (u16 *)data->vals; | |
1182 | ||
1183 | get_random_bytes(&val, sizeof(val)); | |
1184 | ||
1185 | /* Do a raw write */ | |
1186 | KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); | |
1187 | ||
1188 | /* We should read back the new values, and defaults for the rest */ | |
1189 | for (i = 0; i < config.max_register + 1; i++) { | |
1190 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); | |
1191 | ||
1192 | switch (i) { | |
1193 | case 2: | |
1194 | case 3: | |
1195 | if (config.val_format_endian == REGMAP_ENDIAN_BIG) { | |
1196 | KUNIT_EXPECT_EQ(test, rval, | |
866f7021 | 1197 | be16_to_cpu((__force __be16)val[i % 2])); |
155a6bd6 MB |
1198 | } else { |
1199 | KUNIT_EXPECT_EQ(test, rval, | |
866f7021 | 1200 | le16_to_cpu((__force __le16)val[i % 2])); |
155a6bd6 MB |
1201 | } |
1202 | break; | |
1203 | default: | |
1204 | KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); | |
1205 | break; | |
1206 | } | |
1207 | } | |
1208 | ||
1209 | /* The values should appear in the "hardware" */ | |
1210 | KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); | |
1211 | ||
1212 | regmap_exit(map); | |
1213 | } | |
1214 | ||
d958d978 BW |
1215 | static bool reg_zero(struct device *dev, unsigned int reg) |
1216 | { | |
1217 | return reg == 0; | |
1218 | } | |
1219 | ||
1220 | static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg) | |
1221 | { | |
1222 | return reg == 0; | |
1223 | } | |
1224 | ||
1225 | static void raw_noinc_write(struct kunit *test) | |
1226 | { | |
1227 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1228 | struct regmap *map; | |
1229 | struct regmap_config config; | |
1230 | struct regmap_ram_data *data; | |
7011b51f BW |
1231 | unsigned int val; |
1232 | u16 val_test, val_last; | |
d958d978 BW |
1233 | u16 val_array[BLOCK_TEST_SIZE]; |
1234 | ||
1235 | config = raw_regmap_config; | |
1236 | config.volatile_reg = reg_zero; | |
1237 | config.writeable_noinc_reg = reg_zero; | |
1238 | config.readable_noinc_reg = reg_zero; | |
1239 | ||
7b7982f1 | 1240 | map = gen_raw_regmap(test, &config, t, &data); |
d958d978 BW |
1241 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1242 | if (IS_ERR(map)) | |
1243 | return; | |
1244 | ||
1245 | data->noinc_reg = ram_reg_zero; | |
1246 | ||
1247 | get_random_bytes(&val_array, sizeof(val_array)); | |
1248 | ||
1249 | if (config.val_format_endian == REGMAP_ENDIAN_BIG) { | |
1250 | val_test = be16_to_cpu(val_array[1]) + 100; | |
1251 | val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); | |
1252 | } else { | |
1253 | val_test = le16_to_cpu(val_array[1]) + 100; | |
1254 | val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); | |
1255 | } | |
1256 | ||
1257 | /* Put some data into the register following the noinc register */ | |
1258 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test)); | |
1259 | ||
1260 | /* Write some data to the noinc register */ | |
1261 | KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array, | |
1262 | sizeof(val_array))); | |
1263 | ||
1264 | /* We should read back the last value written */ | |
1265 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val)); | |
1266 | KUNIT_ASSERT_EQ(test, val_last, val); | |
1267 | ||
1268 | /* Make sure we didn't touch the register after the noinc register */ | |
1269 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); | |
1270 | KUNIT_ASSERT_EQ(test, val_test, val); | |
1271 | ||
1272 | regmap_exit(map); | |
1273 | } | |
1274 | ||
155a6bd6 MB |
1275 | static void raw_sync(struct kunit *test) |
1276 | { | |
1277 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1278 | struct regmap *map; | |
1279 | struct regmap_config config; | |
1280 | struct regmap_ram_data *data; | |
2f0dbb24 | 1281 | u16 val[3]; |
155a6bd6 MB |
1282 | u16 *hw_buf; |
1283 | unsigned int rval; | |
1284 | int i; | |
1285 | ||
1286 | config = raw_regmap_config; | |
1287 | ||
7b7982f1 | 1288 | map = gen_raw_regmap(test, &config, t, &data); |
155a6bd6 MB |
1289 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1290 | if (IS_ERR(map)) | |
1291 | return; | |
1292 | ||
1293 | hw_buf = (u16 *)data->vals; | |
1294 | ||
2f0dbb24 | 1295 | get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); |
155a6bd6 MB |
1296 | |
1297 | /* Do a regular write and a raw write in cache only mode */ | |
1298 | regcache_cache_only(map, true); | |
2f0dbb24 MB |
1299 | KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, |
1300 | sizeof(u16) * 2)); | |
1301 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); | |
155a6bd6 MB |
1302 | |
1303 | /* We should read back the new values, and defaults for the rest */ | |
1304 | for (i = 0; i < config.max_register + 1; i++) { | |
1305 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); | |
1306 | ||
1307 | switch (i) { | |
1308 | case 2: | |
1309 | case 3: | |
155a6bd6 MB |
1310 | if (config.val_format_endian == REGMAP_ENDIAN_BIG) { |
1311 | KUNIT_EXPECT_EQ(test, rval, | |
866f7021 | 1312 | be16_to_cpu((__force __be16)val[i - 2])); |
155a6bd6 MB |
1313 | } else { |
1314 | KUNIT_EXPECT_EQ(test, rval, | |
866f7021 | 1315 | le16_to_cpu((__force __le16)val[i - 2])); |
155a6bd6 MB |
1316 | } |
1317 | break; | |
2f0dbb24 MB |
1318 | case 4: |
1319 | KUNIT_EXPECT_EQ(test, rval, val[i - 2]); | |
1320 | break; | |
155a6bd6 MB |
1321 | default: |
1322 | KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); | |
1323 | break; | |
1324 | } | |
1325 | } | |
2f0dbb24 MB |
1326 | |
1327 | /* | |
1328 | * The value written via _write() was translated by the core, | |
1329 | * translate the original copy for comparison purposes. | |
1330 | */ | |
1331 | if (config.val_format_endian == REGMAP_ENDIAN_BIG) | |
1332 | val[2] = cpu_to_be16(val[2]); | |
1333 | else | |
1334 | val[2] = cpu_to_le16(val[2]); | |
7b7982f1 | 1335 | |
155a6bd6 | 1336 | /* The values should not appear in the "hardware" */ |
2f0dbb24 | 1337 | KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); |
155a6bd6 MB |
1338 | |
1339 | for (i = 0; i < config.max_register + 1; i++) | |
1340 | data->written[i] = false; | |
1341 | ||
1342 | /* Do the sync */ | |
1343 | regcache_cache_only(map, false); | |
1344 | regcache_mark_dirty(map); | |
1345 | KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); | |
1346 | ||
1347 | /* The values should now appear in the "hardware" */ | |
2f0dbb24 | 1348 | KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); |
155a6bd6 MB |
1349 | |
1350 | regmap_exit(map); | |
1351 | } | |
1352 | ||
bb92804b MB |
1353 | static void raw_ranges(struct kunit *test) |
1354 | { | |
1355 | struct raw_test_types *t = (struct raw_test_types *)test->param_value; | |
1356 | struct regmap *map; | |
1357 | struct regmap_config config; | |
1358 | struct regmap_ram_data *data; | |
1359 | unsigned int val; | |
1360 | int i; | |
1361 | ||
1362 | config = raw_regmap_config; | |
1363 | config.volatile_reg = test_range_all_volatile; | |
1364 | config.ranges = &test_range; | |
1365 | config.num_ranges = 1; | |
1366 | config.max_register = test_range.range_max; | |
1367 | ||
7b7982f1 | 1368 | map = gen_raw_regmap(test, &config, t, &data); |
bb92804b MB |
1369 | KUNIT_ASSERT_FALSE(test, IS_ERR(map)); |
1370 | if (IS_ERR(map)) | |
1371 | return; | |
1372 | ||
1373 | /* Reset the page to a non-zero value to trigger a change */ | |
1374 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, | |
1375 | test_range.range_max)); | |
1376 | ||
1377 | /* Check we set the page and use the window for writes */ | |
1378 | data->written[test_range.selector_reg] = false; | |
1379 | data->written[test_range.window_start] = false; | |
1380 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); | |
1381 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
1382 | KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); | |
1383 | ||
1384 | data->written[test_range.selector_reg] = false; | |
1385 | data->written[test_range.window_start] = false; | |
1386 | KUNIT_EXPECT_EQ(test, 0, regmap_write(map, | |
1387 | test_range.range_min + | |
1388 | test_range.window_len, | |
1389 | 0)); | |
1390 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
1391 | KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); | |
1392 | ||
1393 | /* Same for reads */ | |
1394 | data->written[test_range.selector_reg] = false; | |
1395 | data->read[test_range.window_start] = false; | |
1396 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); | |
1397 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
1398 | KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); | |
1399 | ||
1400 | data->written[test_range.selector_reg] = false; | |
1401 | data->read[test_range.window_start] = false; | |
1402 | KUNIT_EXPECT_EQ(test, 0, regmap_read(map, | |
1403 | test_range.range_min + | |
1404 | test_range.window_len, | |
1405 | &val)); | |
1406 | KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); | |
1407 | KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); | |
1408 | ||
1409 | /* No physical access triggered in the virtual range */ | |
1410 | for (i = test_range.range_min; i < test_range.range_max; i++) { | |
1411 | KUNIT_EXPECT_FALSE(test, data->read[i]); | |
1412 | KUNIT_EXPECT_FALSE(test, data->written[i]); | |
1413 | } | |
1414 | ||
1415 | regmap_exit(map); | |
1416 | } | |
1417 | ||
2238959b MB |
1418 | static struct kunit_case regmap_test_cases[] = { |
1419 | KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), | |
1420 | KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), | |
1421 | KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), | |
18003306 | 1422 | KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), |
a07bff40 | 1423 | KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params), |
2238959b MB |
1424 | KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), |
1425 | KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params), | |
1426 | KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params), | |
1427 | KUNIT_CASE_PARAM(stride, regcache_types_gen_params), | |
1428 | KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), | |
1429 | KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), | |
1430 | KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), | |
1431 | KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params), | |
1432 | KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params), | |
357a1ebd | 1433 | KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params), |
2238959b MB |
1434 | KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), |
1435 | KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), | |
d881ee5a | 1436 | KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), |
6a2e332c | 1437 | KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params), |
155a6bd6 MB |
1438 | |
1439 | KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), | |
1440 | KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), | |
1441 | KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params), | |
1442 | KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params), | |
d958d978 | 1443 | KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params), |
155a6bd6 | 1444 | KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params), |
bb92804b | 1445 | KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params), |
2238959b MB |
1446 | {} |
1447 | }; | |
1448 | ||
7b7982f1 RF |
1449 | static int regmap_test_init(struct kunit *test) |
1450 | { | |
1451 | struct regmap_test_priv *priv; | |
1452 | struct device *dev; | |
1453 | ||
1454 | priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); | |
1455 | if (!priv) | |
1456 | return -ENOMEM; | |
1457 | ||
1458 | test->priv = priv; | |
1459 | ||
1460 | dev = kunit_device_register(test, "regmap_test"); | |
1461 | priv->dev = get_device(dev); | |
1462 | if (!priv->dev) | |
1463 | return -ENODEV; | |
1464 | ||
1465 | dev_set_drvdata(dev, test); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | static void regmap_test_exit(struct kunit *test) | |
1471 | { | |
1472 | struct regmap_test_priv *priv = test->priv; | |
1473 | ||
1474 | /* Destroy the dummy struct device */ | |
1475 | if (priv && priv->dev) | |
1476 | put_device(priv->dev); | |
1477 | } | |
1478 | ||
2238959b MB |
1479 | static struct kunit_suite regmap_test_suite = { |
1480 | .name = "regmap", | |
7b7982f1 RF |
1481 | .init = regmap_test_init, |
1482 | .exit = regmap_test_exit, | |
2238959b MB |
1483 | .test_cases = regmap_test_cases, |
1484 | }; | |
1485 | kunit_test_suite(regmap_test_suite); | |
1486 | ||
1487 | MODULE_LICENSE("GPL v2"); |