regmap: kunit: Replace a kmalloc/kfree() pair with KUnit-managed alloc
[linux-2.6-block.git] / drivers / base / regmap / regmap-kunit.c
CommitLineData
2238959b
MB
1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7b7982f1 7#include <kunit/device.h>
ce75e06e 8#include <kunit/resource.h>
2238959b
MB
9#include <kunit/test.h>
10#include "internal.h"
11
12#define BLOCK_TEST_SIZE 12
13
ce75e06e
RF
14KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15
7b7982f1
RF
16struct regmap_test_priv {
17 struct device *dev;
18};
19
48bccea9
RF
20struct regmap_test_param {
21 enum regcache_type cache;
22 enum regmap_endian val_endian;
71091574
RF
23
24 unsigned int from_reg;
48bccea9
RF
25};
26
2f0dbb24
MB
27static void get_changed_bytes(void *orig, void *new, size_t size)
28{
29 char *o = orig;
30 char *n = new;
31 int i;
32
33 get_random_bytes(new, size);
34
35 /*
36 * This could be nicer and more efficient but we shouldn't
37 * super care.
38 */
39 for (i = 0; i < size; i++)
40 while (n[i] == o[i])
41 get_random_bytes(&n[i], 1);
42}
43
2238959b 44static const struct regmap_config test_regmap_config = {
2238959b
MB
45 .reg_stride = 1,
46 .val_bits = sizeof(unsigned int) * 8,
47};
48
48bccea9
RF
49static const char *regcache_type_name(enum regcache_type type)
50{
51 switch (type) {
52 case REGCACHE_NONE:
53 return "none";
54 case REGCACHE_FLAT:
55 return "flat";
56 case REGCACHE_RBTREE:
57 return "rbtree";
58 case REGCACHE_MAPLE:
59 return "maple";
60 default:
61 return NULL;
62 }
63}
64
65static const char *regmap_endian_name(enum regmap_endian endian)
66{
67 switch (endian) {
68 case REGMAP_ENDIAN_BIG:
69 return "big";
70 case REGMAP_ENDIAN_LITTLE:
71 return "little";
72 case REGMAP_ENDIAN_DEFAULT:
73 return "default";
74 case REGMAP_ENDIAN_NATIVE:
75 return "native";
76 default:
77 return NULL;
78 }
79}
2238959b 80
48bccea9 81static void param_to_desc(const struct regmap_test_param *param, char *desc)
2238959b 82{
71091574 83 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
48bccea9 84 regcache_type_name(param->cache),
71091574
RF
85 regmap_endian_name(param->val_endian),
86 param->from_reg);
2238959b
MB
87}
88
48bccea9
RF
89static const struct regmap_test_param regcache_types_list[] = {
90 { .cache = REGCACHE_NONE },
91 { .cache = REGCACHE_FLAT },
92 { .cache = REGCACHE_RBTREE },
93 { .cache = REGCACHE_MAPLE },
2238959b
MB
94};
95
48bccea9 96KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
2238959b 97
ac4394bf 98static const struct regmap_test_param real_cache_types_only_list[] = {
48bccea9
RF
99 { .cache = REGCACHE_FLAT },
100 { .cache = REGCACHE_RBTREE },
101 { .cache = REGCACHE_MAPLE },
2238959b
MB
102};
103
ac4394bf
RF
104KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
105
106static const struct regmap_test_param real_cache_types_list[] = {
107 { .cache = REGCACHE_FLAT, .from_reg = 0 },
108 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
109 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
110 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
111 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
112 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
113 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
114 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
115 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
116 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
117 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
118 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
119 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
120 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
121 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
122};
123
48bccea9 124KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
2238959b 125
48bccea9 126static const struct regmap_test_param sparse_cache_types_list[] = {
71091574
RF
127 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
128 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
129 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
130 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
131 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
132 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
134 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
135 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
136 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
2238959b
MB
137};
138
48bccea9 139KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
2238959b 140
7b7982f1
RF
141static struct regmap *gen_regmap(struct kunit *test,
142 struct regmap_config *config,
2238959b
MB
143 struct regmap_ram_data **data)
144{
48bccea9 145 const struct regmap_test_param *param = test->param_value;
7b7982f1 146 struct regmap_test_priv *priv = test->priv;
2238959b
MB
147 unsigned int *buf;
148 struct regmap *ret;
71091574 149 size_t size;
2238959b
MB
150 int i;
151 struct reg_default *defaults;
152
48bccea9 153 config->cache_type = param->cache;
a9e26169
GR
154 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
155 config->cache_type == REGCACHE_MAPLE;
156
71091574
RF
157 if (config->max_register == 0) {
158 config->max_register = param->from_reg;
159 if (config->num_reg_defaults)
160 config->max_register += (config->num_reg_defaults - 1) *
161 config->reg_stride;
162 else
163 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
164 }
165
166 size = (config->max_register + 1) * sizeof(unsigned int);
2238959b
MB
167 buf = kmalloc(size, GFP_KERNEL);
168 if (!buf)
169 return ERR_PTR(-ENOMEM);
170
171 get_random_bytes(buf, size);
172
173 *data = kzalloc(sizeof(**data), GFP_KERNEL);
174 if (!(*data))
175 return ERR_PTR(-ENOMEM);
176 (*data)->vals = buf;
177
178 if (config->num_reg_defaults) {
179 defaults = kcalloc(config->num_reg_defaults,
180 sizeof(struct reg_default),
181 GFP_KERNEL);
182 if (!defaults)
183 return ERR_PTR(-ENOMEM);
184 config->reg_defaults = defaults;
185
186 for (i = 0; i < config->num_reg_defaults; i++) {
71091574
RF
187 defaults[i].reg = param->from_reg + (i * config->reg_stride);
188 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
2238959b
MB
189 }
190 }
191
7b7982f1 192 ret = regmap_init_ram(priv->dev, config, *data);
2238959b
MB
193 if (IS_ERR(ret)) {
194 kfree(buf);
195 kfree(*data);
ce75e06e
RF
196 } else {
197 kunit_add_action(test, regmap_exit_action, ret);
2238959b
MB
198 }
199
200 return ret;
201}
202
ac4394bf 203static bool reg_5_false(struct device *dev, unsigned int reg)
18003306 204{
ac4394bf
RF
205 struct kunit *test = dev_get_drvdata(dev);
206 const struct regmap_test_param *param = test->param_value;
207
208 return reg != (param->from_reg + 5);
18003306
MB
209}
210
2238959b
MB
211static void basic_read_write(struct kunit *test)
212{
2238959b
MB
213 struct regmap *map;
214 struct regmap_config config;
215 struct regmap_ram_data *data;
216 unsigned int val, rval;
217
218 config = test_regmap_config;
2238959b 219
7b7982f1 220 map = gen_regmap(test, &config, &data);
2238959b
MB
221 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
222 if (IS_ERR(map))
223 return;
224
225 get_random_bytes(&val, sizeof(val));
226
227 /* If we write a value to a register we can read it back */
228 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
229 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
230 KUNIT_EXPECT_EQ(test, val, rval);
231
232 /* If using a cache the cache satisfied the read */
48bccea9 233 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
2238959b
MB
234}
235
236static void bulk_write(struct kunit *test)
237{
2238959b
MB
238 struct regmap *map;
239 struct regmap_config config;
240 struct regmap_ram_data *data;
241 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
242 int i;
243
244 config = test_regmap_config;
2238959b 245
7b7982f1 246 map = gen_regmap(test, &config, &data);
2238959b
MB
247 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
248 if (IS_ERR(map))
249 return;
250
251 get_random_bytes(&val, sizeof(val));
252
253 /*
254 * Data written via the bulk API can be read back with single
255 * reads.
256 */
257 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
258 BLOCK_TEST_SIZE));
259 for (i = 0; i < BLOCK_TEST_SIZE; i++)
260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
261
262 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
263
264 /* If using a cache the cache satisfied the read */
265 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 266 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
267}
268
269static void bulk_read(struct kunit *test)
270{
2238959b
MB
271 struct regmap *map;
272 struct regmap_config config;
273 struct regmap_ram_data *data;
274 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
275 int i;
276
277 config = test_regmap_config;
2238959b 278
7b7982f1 279 map = gen_regmap(test, &config, &data);
2238959b
MB
280 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
281 if (IS_ERR(map))
282 return;
283
284 get_random_bytes(&val, sizeof(val));
285
286 /* Data written as single writes can be read via the bulk API */
287 for (i = 0; i < BLOCK_TEST_SIZE; i++)
288 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
289 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
290 BLOCK_TEST_SIZE));
291 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
292
293 /* If using a cache the cache satisfied the read */
294 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 295 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
296}
297
18003306
MB
298static void write_readonly(struct kunit *test)
299{
18003306
MB
300 struct regmap *map;
301 struct regmap_config config;
302 struct regmap_ram_data *data;
303 unsigned int val;
304 int i;
305
306 config = test_regmap_config;
18003306
MB
307 config.num_reg_defaults = BLOCK_TEST_SIZE;
308 config.writeable_reg = reg_5_false;
309
7b7982f1 310 map = gen_regmap(test, &config, &data);
18003306
MB
311 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
312 if (IS_ERR(map))
313 return;
314
315 get_random_bytes(&val, sizeof(val));
316
317 for (i = 0; i < BLOCK_TEST_SIZE; i++)
318 data->written[i] = false;
319
320 /* Change the value of all registers, readonly should fail */
321 for (i = 0; i < BLOCK_TEST_SIZE; i++)
322 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
323
324 /* Did that match what we see on the device? */
325 for (i = 0; i < BLOCK_TEST_SIZE; i++)
326 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
18003306
MB
327}
328
a07bff40
MB
329static void read_writeonly(struct kunit *test)
330{
a07bff40
MB
331 struct regmap *map;
332 struct regmap_config config;
333 struct regmap_ram_data *data;
334 unsigned int val;
335 int i;
336
337 config = test_regmap_config;
a07bff40
MB
338 config.readable_reg = reg_5_false;
339
7b7982f1 340 map = gen_regmap(test, &config, &data);
a07bff40
MB
341 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
342 if (IS_ERR(map))
343 return;
344
345 for (i = 0; i < BLOCK_TEST_SIZE; i++)
346 data->read[i] = false;
347
d0c99ffe
MB
348 /*
349 * Try to read all the registers, the writeonly one should
350 * fail if we aren't using the flat cache.
351 */
352 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
48bccea9 353 if (config.cache_type != REGCACHE_FLAT) {
d0c99ffe
MB
354 KUNIT_EXPECT_EQ(test, i != 5,
355 regmap_read(map, i, &val) == 0);
356 } else {
357 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
358 }
359 }
a07bff40
MB
360
361 /* Did we trigger a hardware access? */
362 KUNIT_EXPECT_FALSE(test, data->read[5]);
a07bff40
MB
363}
364
2238959b
MB
365static void reg_defaults(struct kunit *test)
366{
2238959b
MB
367 struct regmap *map;
368 struct regmap_config config;
369 struct regmap_ram_data *data;
370 unsigned int rval[BLOCK_TEST_SIZE];
371 int i;
372
373 config = test_regmap_config;
2238959b
MB
374 config.num_reg_defaults = BLOCK_TEST_SIZE;
375
7b7982f1 376 map = gen_regmap(test, &config, &data);
2238959b
MB
377 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
378 if (IS_ERR(map))
379 return;
380
381 /* Read back the expected default data */
382 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
383 BLOCK_TEST_SIZE));
384 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
385
386 /* The data should have been read from cache if there was one */
387 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 388 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
389}
390
391static void reg_defaults_read_dev(struct kunit *test)
392{
2238959b
MB
393 struct regmap *map;
394 struct regmap_config config;
395 struct regmap_ram_data *data;
396 unsigned int rval[BLOCK_TEST_SIZE];
397 int i;
398
399 config = test_regmap_config;
2238959b
MB
400 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
401
7b7982f1 402 map = gen_regmap(test, &config, &data);
2238959b
MB
403 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
404 if (IS_ERR(map))
405 return;
406
407 /* We should have read the cache defaults back from the map */
408 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
48bccea9 409 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
2238959b
MB
410 data->read[i] = false;
411 }
412
413 /* Read back the expected default data */
414 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
415 BLOCK_TEST_SIZE));
416 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
417
418 /* The data should have been read from cache if there was one */
419 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 420 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
421}
422
423static void register_patch(struct kunit *test)
424{
2238959b
MB
425 struct regmap *map;
426 struct regmap_config config;
427 struct regmap_ram_data *data;
428 struct reg_sequence patch[2];
429 unsigned int rval[BLOCK_TEST_SIZE];
430 int i;
431
432 /* We need defaults so readback works */
433 config = test_regmap_config;
2238959b
MB
434 config.num_reg_defaults = BLOCK_TEST_SIZE;
435
7b7982f1 436 map = gen_regmap(test, &config, &data);
2238959b
MB
437 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
438 if (IS_ERR(map))
439 return;
440
441 /* Stash the original values */
442 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
443 BLOCK_TEST_SIZE));
444
445 /* Patch a couple of values */
446 patch[0].reg = 2;
447 patch[0].def = rval[2] + 1;
448 patch[0].delay_us = 0;
449 patch[1].reg = 5;
450 patch[1].def = rval[5] + 1;
451 patch[1].delay_us = 0;
452 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
453 ARRAY_SIZE(patch)));
454
455 /* Only the patched registers are written */
456 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
457 switch (i) {
458 case 2:
459 case 5:
460 KUNIT_EXPECT_TRUE(test, data->written[i]);
461 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
462 break;
463 default:
464 KUNIT_EXPECT_FALSE(test, data->written[i]);
465 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
466 break;
467 }
468 }
2238959b
MB
469}
470
471static void stride(struct kunit *test)
472{
2238959b
MB
473 struct regmap *map;
474 struct regmap_config config;
475 struct regmap_ram_data *data;
476 unsigned int rval;
477 int i;
478
479 config = test_regmap_config;
2238959b
MB
480 config.reg_stride = 2;
481 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
482
7b7982f1 483 map = gen_regmap(test, &config, &data);
2238959b
MB
484 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
485 if (IS_ERR(map))
486 return;
487
488 /* Only even registers can be accessed, try both read and write */
489 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
490 data->read[i] = false;
491 data->written[i] = false;
492
493 if (i % 2) {
494 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
495 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
496 KUNIT_EXPECT_FALSE(test, data->read[i]);
497 KUNIT_EXPECT_FALSE(test, data->written[i]);
498 } else {
499 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
500 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
48bccea9 501 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
2238959b
MB
502 data->read[i]);
503
504 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
505 KUNIT_EXPECT_TRUE(test, data->written[i]);
506 }
507 }
2238959b
MB
508}
509
510static struct regmap_range_cfg test_range = {
511 .selector_reg = 1,
512 .selector_mask = 0xff,
513
514 .window_start = 4,
515 .window_len = 10,
516
517 .range_min = 20,
518 .range_max = 40,
519};
520
6a2e332c 521static bool test_range_window_volatile(struct device *dev, unsigned int reg)
2238959b
MB
522{
523 if (reg >= test_range.window_start &&
fabe32cc 524 reg <= test_range.window_start + test_range.window_len)
2238959b
MB
525 return true;
526
6a2e332c
MB
527 return false;
528}
529
530static bool test_range_all_volatile(struct device *dev, unsigned int reg)
531{
532 if (test_range_window_volatile(dev, reg))
533 return true;
534
2238959b
MB
535 if (reg >= test_range.range_min && reg <= test_range.range_max)
536 return true;
537
538 return false;
539}
540
541static void basic_ranges(struct kunit *test)
542{
2238959b
MB
543 struct regmap *map;
544 struct regmap_config config;
545 struct regmap_ram_data *data;
546 unsigned int val;
547 int i;
548
549 config = test_regmap_config;
6a2e332c 550 config.volatile_reg = test_range_all_volatile;
2238959b
MB
551 config.ranges = &test_range;
552 config.num_ranges = 1;
553 config.max_register = test_range.range_max;
554
7b7982f1 555 map = gen_regmap(test, &config, &data);
2238959b
MB
556 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
557 if (IS_ERR(map))
558 return;
559
560 for (i = test_range.range_min; i < test_range.range_max; i++) {
561 data->read[i] = false;
562 data->written[i] = false;
563 }
564
565 /* Reset the page to a non-zero value to trigger a change */
566 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
567 test_range.range_max));
568
569 /* Check we set the page and use the window for writes */
570 data->written[test_range.selector_reg] = false;
571 data->written[test_range.window_start] = false;
572 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
573 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
574 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
575
576 data->written[test_range.selector_reg] = false;
577 data->written[test_range.window_start] = false;
578 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
579 test_range.range_min +
580 test_range.window_len,
581 0));
582 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
583 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
584
585 /* Same for reads */
586 data->written[test_range.selector_reg] = false;
587 data->read[test_range.window_start] = false;
588 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
589 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
590 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
591
592 data->written[test_range.selector_reg] = false;
593 data->read[test_range.window_start] = false;
594 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
595 test_range.range_min +
596 test_range.window_len,
597 &val));
598 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
599 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
600
601 /* No physical access triggered in the virtual range */
602 for (i = test_range.range_min; i < test_range.range_max; i++) {
603 KUNIT_EXPECT_FALSE(test, data->read[i]);
604 KUNIT_EXPECT_FALSE(test, data->written[i]);
605 }
2238959b
MB
606}
607
608/* Try to stress dynamic creation of cache data structures */
609static void stress_insert(struct kunit *test)
610{
2238959b
MB
611 struct regmap *map;
612 struct regmap_config config;
613 struct regmap_ram_data *data;
614 unsigned int rval, *vals;
615 size_t buf_sz;
616 int i;
617
618 config = test_regmap_config;
2238959b
MB
619 config.max_register = 300;
620
7b7982f1 621 map = gen_regmap(test, &config, &data);
2238959b
MB
622 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
623 if (IS_ERR(map))
624 return;
625
626 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
627 GFP_KERNEL);
628 KUNIT_ASSERT_FALSE(test, vals == NULL);
629 buf_sz = sizeof(unsigned long) * config.max_register;
630
631 get_random_bytes(vals, buf_sz);
632
633 /* Write data into the map/cache in ever decreasing strides */
634 for (i = 0; i < config.max_register; i += 100)
635 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
636 for (i = 0; i < config.max_register; i += 50)
637 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
638 for (i = 0; i < config.max_register; i += 25)
639 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
640 for (i = 0; i < config.max_register; i += 10)
641 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
642 for (i = 0; i < config.max_register; i += 5)
643 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
644 for (i = 0; i < config.max_register; i += 3)
645 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
646 for (i = 0; i < config.max_register; i += 2)
647 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
648 for (i = 0; i < config.max_register; i++)
649 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
650
651 /* Do reads from the cache (if there is one) match? */
652 for (i = 0; i < config.max_register; i ++) {
653 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
654 KUNIT_EXPECT_EQ(test, rval, vals[i]);
48bccea9 655 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b 656 }
2238959b
MB
657}
658
659static void cache_bypass(struct kunit *test)
660{
ac4394bf 661 const struct regmap_test_param *param = test->param_value;
2238959b
MB
662 struct regmap *map;
663 struct regmap_config config;
664 struct regmap_ram_data *data;
665 unsigned int val, rval;
666
667 config = test_regmap_config;
2238959b 668
7b7982f1 669 map = gen_regmap(test, &config, &data);
2238959b
MB
670 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
671 if (IS_ERR(map))
672 return;
673
674 get_random_bytes(&val, sizeof(val));
675
676 /* Ensure the cache has a value in it */
ac4394bf 677 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
2238959b
MB
678
679 /* Bypass then write a different value */
680 regcache_cache_bypass(map, true);
ac4394bf 681 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
2238959b
MB
682
683 /* Read the bypassed value */
ac4394bf 684 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
2238959b 685 KUNIT_EXPECT_EQ(test, val + 1, rval);
ac4394bf 686 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
2238959b
MB
687
688 /* Disable bypass, the cache should still return the original value */
689 regcache_cache_bypass(map, false);
ac4394bf 690 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
2238959b 691 KUNIT_EXPECT_EQ(test, val, rval);
2238959b
MB
692}
693
7903d15f 694static void cache_sync_marked_dirty(struct kunit *test)
2238959b 695{
ac4394bf 696 const struct regmap_test_param *param = test->param_value;
2238959b
MB
697 struct regmap *map;
698 struct regmap_config config;
699 struct regmap_ram_data *data;
700 unsigned int val[BLOCK_TEST_SIZE];
701 int i;
702
703 config = test_regmap_config;
2238959b 704
7b7982f1 705 map = gen_regmap(test, &config, &data);
2238959b
MB
706 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
707 if (IS_ERR(map))
708 return;
709
710 get_random_bytes(&val, sizeof(val));
711
712 /* Put some data into the cache */
ac4394bf 713 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
2238959b
MB
714 BLOCK_TEST_SIZE));
715 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 716 data->written[param->from_reg + i] = false;
2238959b
MB
717
718 /* Trash the data on the device itself then resync */
719 regcache_mark_dirty(map);
720 memset(data->vals, 0, sizeof(val));
721 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
722
723 /* Did we just write the correct data out? */
ac4394bf 724 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
2238959b 725 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 726 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
2238959b
MB
727}
728
7903d15f
RF
729static void cache_sync_after_cache_only(struct kunit *test)
730{
731 const struct regmap_test_param *param = test->param_value;
732 struct regmap *map;
733 struct regmap_config config;
734 struct regmap_ram_data *data;
735 unsigned int val[BLOCK_TEST_SIZE];
736 unsigned int val_mask;
737 int i;
738
739 config = test_regmap_config;
740
741 map = gen_regmap(test, &config, &data);
742 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
743 if (IS_ERR(map))
744 return;
745
746 val_mask = GENMASK(config.val_bits - 1, 0);
747 get_random_bytes(&val, sizeof(val));
748
749 /* Put some data into the cache */
750 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
751 BLOCK_TEST_SIZE));
752 for (i = 0; i < BLOCK_TEST_SIZE; i++)
753 data->written[param->from_reg + i] = false;
754
755 /* Set cache-only and change the values */
756 regcache_cache_only(map, true);
757 for (i = 0; i < ARRAY_SIZE(val); ++i)
758 val[i] = ~val[i] & val_mask;
759
760 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
761 BLOCK_TEST_SIZE));
762 for (i = 0; i < BLOCK_TEST_SIZE; i++)
763 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
764
765 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
766
767 /* Exit cache-only and sync the cache without marking hardware registers dirty */
768 regcache_cache_only(map, false);
769
770 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
771
772 /* Did we just write the correct data out? */
773 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
774 for (i = 0; i < BLOCK_TEST_SIZE; i++)
775 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
7903d15f
RF
776}
777
778static void cache_sync_defaults_marked_dirty(struct kunit *test)
2238959b 779{
ac4394bf 780 const struct regmap_test_param *param = test->param_value;
2238959b
MB
781 struct regmap *map;
782 struct regmap_config config;
783 struct regmap_ram_data *data;
784 unsigned int val;
785 int i;
786
787 config = test_regmap_config;
2238959b
MB
788 config.num_reg_defaults = BLOCK_TEST_SIZE;
789
7b7982f1 790 map = gen_regmap(test, &config, &data);
2238959b
MB
791 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
792 if (IS_ERR(map))
793 return;
794
795 get_random_bytes(&val, sizeof(val));
796
797 /* Change the value of one register */
ac4394bf 798 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
2238959b
MB
799
800 /* Resync */
801 regcache_mark_dirty(map);
802 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 803 data->written[param->from_reg + i] = false;
2238959b
MB
804 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
805
806 /* Did we just sync the one register we touched? */
807 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 808 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
2238959b 809
7903d15f
RF
810 /* Rewrite registers back to their defaults */
811 for (i = 0; i < config.num_reg_defaults; ++i)
812 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
813 config.reg_defaults[i].def));
814
815 /*
816 * Resync after regcache_mark_dirty() should not write out registers
817 * that are at default value
818 */
819 for (i = 0; i < BLOCK_TEST_SIZE; i++)
820 data->written[param->from_reg + i] = false;
821 regcache_mark_dirty(map);
822 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
823 for (i = 0; i < BLOCK_TEST_SIZE; i++)
824 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
7903d15f
RF
825}
826
827static void cache_sync_default_after_cache_only(struct kunit *test)
828{
829 const struct regmap_test_param *param = test->param_value;
830 struct regmap *map;
831 struct regmap_config config;
832 struct regmap_ram_data *data;
833 unsigned int orig_val;
834 int i;
835
836 config = test_regmap_config;
837 config.num_reg_defaults = BLOCK_TEST_SIZE;
838
839 map = gen_regmap(test, &config, &data);
840 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
841 if (IS_ERR(map))
842 return;
843
844 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
845
846 /* Enter cache-only and change the value of one register */
847 regcache_cache_only(map, true);
848 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
849
850 /* Exit cache-only and resync, should write out the changed register */
851 regcache_cache_only(map, false);
852 for (i = 0; i < BLOCK_TEST_SIZE; i++)
853 data->written[param->from_reg + i] = false;
854 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
855
856 /* Was the register written out? */
857 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
858 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
859
860 /* Enter cache-only and write register back to its default value */
861 regcache_cache_only(map, true);
862 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
863
864 /* Resync should write out the new value */
865 regcache_cache_only(map, false);
866 for (i = 0; i < BLOCK_TEST_SIZE; i++)
867 data->written[param->from_reg + i] = false;
868
869 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
870 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
871 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
2238959b
MB
872}
873
357a1ebd
MB
874static void cache_sync_readonly(struct kunit *test)
875{
ac4394bf 876 const struct regmap_test_param *param = test->param_value;
357a1ebd
MB
877 struct regmap *map;
878 struct regmap_config config;
879 struct regmap_ram_data *data;
880 unsigned int val;
881 int i;
882
883 config = test_regmap_config;
357a1ebd
MB
884 config.writeable_reg = reg_5_false;
885
7b7982f1 886 map = gen_regmap(test, &config, &data);
357a1ebd
MB
887 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
888 if (IS_ERR(map))
889 return;
890
891 /* Read all registers to fill the cache */
892 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 893 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
357a1ebd
MB
894
895 /* Change the value of all registers, readonly should fail */
896 get_random_bytes(&val, sizeof(val));
897 regcache_cache_only(map, true);
898 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 899 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
357a1ebd
MB
900 regcache_cache_only(map, false);
901
902 /* Resync */
903 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 904 data->written[param->from_reg + i] = false;
357a1ebd
MB
905 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
906
907 /* Did that match what we see on the device? */
908 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 909 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
357a1ebd
MB
910}
911
2238959b
MB
912static void cache_sync_patch(struct kunit *test)
913{
ac4394bf 914 const struct regmap_test_param *param = test->param_value;
2238959b
MB
915 struct regmap *map;
916 struct regmap_config config;
917 struct regmap_ram_data *data;
918 struct reg_sequence patch[2];
919 unsigned int rval[BLOCK_TEST_SIZE], val;
920 int i;
921
922 /* We need defaults so readback works */
923 config = test_regmap_config;
2238959b
MB
924 config.num_reg_defaults = BLOCK_TEST_SIZE;
925
7b7982f1 926 map = gen_regmap(test, &config, &data);
2238959b
MB
927 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
928 if (IS_ERR(map))
929 return;
930
931 /* Stash the original values */
ac4394bf 932 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
2238959b
MB
933 BLOCK_TEST_SIZE));
934
935 /* Patch a couple of values */
ac4394bf 936 patch[0].reg = param->from_reg + 2;
2238959b
MB
937 patch[0].def = rval[2] + 1;
938 patch[0].delay_us = 0;
ac4394bf 939 patch[1].reg = param->from_reg + 5;
2238959b
MB
940 patch[1].def = rval[5] + 1;
941 patch[1].delay_us = 0;
942 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
943 ARRAY_SIZE(patch)));
944
945 /* Sync the cache */
946 regcache_mark_dirty(map);
947 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 948 data->written[param->from_reg + i] = false;
2238959b
MB
949 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
950
951 /* The patch should be on the device but not in the cache */
952 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
ac4394bf 953 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
2238959b
MB
954 KUNIT_EXPECT_EQ(test, val, rval[i]);
955
956 switch (i) {
957 case 2:
958 case 5:
ac4394bf
RF
959 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
960 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
2238959b
MB
961 break;
962 default:
ac4394bf
RF
963 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
964 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
2238959b
MB
965 break;
966 }
967 }
2238959b
MB
968}
969
970static void cache_drop(struct kunit *test)
971{
71091574 972 const struct regmap_test_param *param = test->param_value;
2238959b
MB
973 struct regmap *map;
974 struct regmap_config config;
975 struct regmap_ram_data *data;
976 unsigned int rval[BLOCK_TEST_SIZE];
977 int i;
978
979 config = test_regmap_config;
2238959b
MB
980 config.num_reg_defaults = BLOCK_TEST_SIZE;
981
7b7982f1 982 map = gen_regmap(test, &config, &data);
2238959b
MB
983 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
984 if (IS_ERR(map))
985 return;
986
987 /* Ensure the data is read from the cache */
988 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574
RF
989 data->read[param->from_reg + i] = false;
990 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
2238959b
MB
991 BLOCK_TEST_SIZE));
992 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
71091574
RF
993 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
994 data->read[param->from_reg + i] = false;
2238959b 995 }
71091574 996 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
2238959b
MB
997
998 /* Drop some registers */
71091574
RF
999 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1000 param->from_reg + 5));
2238959b
MB
1001
1002 /* Reread and check only the dropped registers hit the device. */
71091574 1003 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
2238959b
MB
1004 BLOCK_TEST_SIZE));
1005 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574
RF
1006 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1007 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
2238959b
MB
1008}
1009
7dd52d30
RF
1010static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1011{
1012 const struct regmap_test_param *param = test->param_value;
1013 struct regmap *map;
1014 struct regmap_config config;
1015 struct regmap_ram_data *data;
1016 unsigned int rval[BLOCK_TEST_SIZE];
1017 int i;
1018
1019 config = test_regmap_config;
1020 config.num_reg_defaults = BLOCK_TEST_SIZE;
1021
1022 map = gen_regmap(test, &config, &data);
1023 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1024 if (IS_ERR(map))
1025 return;
1026
1027 /* Ensure the data is read from the cache */
1028 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1029 data->read[param->from_reg + i] = false;
1030 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1031 BLOCK_TEST_SIZE));
1032 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1033
1034 /* Change all values in cache from defaults */
1035 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1036 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1037
1038 /* Drop all registers */
1039 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1040
1041 /* Mark dirty and cache sync should not write anything. */
1042 regcache_mark_dirty(map);
1043 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1044 data->written[param->from_reg + i] = false;
1045
1046 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1047 for (i = 0; i <= config.max_register; i++)
1048 KUNIT_EXPECT_FALSE(test, data->written[i]);
7dd52d30
RF
1049}
1050
1051static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1052{
1053 const struct regmap_test_param *param = test->param_value;
1054 struct regmap *map;
1055 struct regmap_config config;
1056 struct regmap_ram_data *data;
1057 unsigned int rval[BLOCK_TEST_SIZE];
1058 int i;
1059
1060 config = test_regmap_config;
1061
1062 map = gen_regmap(test, &config, &data);
1063 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1064 if (IS_ERR(map))
1065 return;
1066
1067 /* Ensure the data is read from the cache */
1068 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1069 data->read[param->from_reg + i] = false;
1070 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1071 BLOCK_TEST_SIZE));
1072 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1073
1074 /* Change all values in cache */
1075 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1076 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1077
1078 /* Drop all registers */
1079 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1080
1081 /*
1082 * Sync cache without marking it dirty. All registers were dropped
1083 * so the cache should not have any entries to write out.
1084 */
1085 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1086 data->written[param->from_reg + i] = false;
1087
1088 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1089 for (i = 0; i <= config.max_register; i++)
1090 KUNIT_EXPECT_FALSE(test, data->written[i]);
7dd52d30
RF
1091}
1092
1093static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1094{
1095 const struct regmap_test_param *param = test->param_value;
1096 struct regmap *map;
1097 struct regmap_config config;
1098 struct regmap_ram_data *data;
1099 unsigned int rval[BLOCK_TEST_SIZE];
1100 int i;
1101
1102 config = test_regmap_config;
1103 config.num_reg_defaults = BLOCK_TEST_SIZE;
1104
1105 map = gen_regmap(test, &config, &data);
1106 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1107 if (IS_ERR(map))
1108 return;
1109
1110 /* Ensure the data is read from the cache */
1111 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1112 data->read[param->from_reg + i] = false;
1113 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1114 BLOCK_TEST_SIZE));
1115 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1116
1117 /* Change all values in cache from defaults */
1118 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1119 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1120
1121 /* Drop all registers */
1122 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1123
1124 /*
1125 * Sync cache without marking it dirty. All registers were dropped
1126 * so the cache should not have any entries to write out.
1127 */
1128 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1129 data->written[param->from_reg + i] = false;
1130
1131 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1132 for (i = 0; i <= config.max_register; i++)
1133 KUNIT_EXPECT_FALSE(test, data->written[i]);
7dd52d30
RF
1134}
1135
d881ee5a
MB
1136static void cache_present(struct kunit *test)
1137{
71091574 1138 const struct regmap_test_param *param = test->param_value;
d881ee5a
MB
1139 struct regmap *map;
1140 struct regmap_config config;
1141 struct regmap_ram_data *data;
1142 unsigned int val;
1143 int i;
1144
1145 config = test_regmap_config;
d881ee5a 1146
7b7982f1 1147 map = gen_regmap(test, &config, &data);
d881ee5a
MB
1148 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1149 if (IS_ERR(map))
1150 return;
1151
1152 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1153 data->read[param->from_reg + i] = false;
d881ee5a
MB
1154
1155 /* No defaults so no registers cached. */
1156 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1157 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
d881ee5a
MB
1158
1159 /* We didn't trigger any reads */
1160 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1161 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
d881ee5a
MB
1162
1163 /* Fill the cache */
1164 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1165 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
d881ee5a
MB
1166
1167 /* Now everything should be cached */
1168 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1169 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
d881ee5a
MB
1170}
1171
6a2e332c
MB
1172/* Check that caching the window register works with sync */
1173static void cache_range_window_reg(struct kunit *test)
1174{
6a2e332c
MB
1175 struct regmap *map;
1176 struct regmap_config config;
1177 struct regmap_ram_data *data;
1178 unsigned int val;
1179 int i;
1180
1181 config = test_regmap_config;
6a2e332c
MB
1182 config.volatile_reg = test_range_window_volatile;
1183 config.ranges = &test_range;
1184 config.num_ranges = 1;
1185 config.max_register = test_range.range_max;
1186
7b7982f1 1187 map = gen_regmap(test, &config, &data);
6a2e332c
MB
1188 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1189 if (IS_ERR(map))
1190 return;
1191
1192 /* Write new values to the entire range */
1193 for (i = test_range.range_min; i <= test_range.range_max; i++)
1194 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1195
1196 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1197 KUNIT_ASSERT_EQ(test, val, 2);
1198
1199 /* Write to the first register in the range to reset the page */
1200 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1201 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1202 KUNIT_ASSERT_EQ(test, val, 0);
1203
1204 /* Trigger a cache sync */
1205 regcache_mark_dirty(map);
1206 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1207
1208 /* Write to the first register again, the page should be reset */
1209 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1210 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1211 KUNIT_ASSERT_EQ(test, val, 0);
1212
1213 /* Trigger another cache sync */
1214 regcache_mark_dirty(map);
1215 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1216
1217 /* Write to the last register again, the page should be reset */
1218 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1219 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1220 KUNIT_ASSERT_EQ(test, val, 2);
1221}
1222
48bccea9
RF
1223static const struct regmap_test_param raw_types_list[] = {
1224 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1225 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1226 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1227 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1228 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1229 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1230 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1231 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
155a6bd6
MB
1232};
1233
48bccea9 1234KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
155a6bd6 1235
48bccea9
RF
1236static const struct regmap_test_param raw_cache_types_list[] = {
1237 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1238 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1239 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1240 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1241 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1242 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
155a6bd6
MB
1243};
1244
48bccea9 1245KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
155a6bd6
MB
1246
1247static const struct regmap_config raw_regmap_config = {
1248 .max_register = BLOCK_TEST_SIZE,
1249
1250 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1251 .reg_bits = 16,
1252 .val_bits = 16,
1253};
1254
7b7982f1
RF
1255static struct regmap *gen_raw_regmap(struct kunit *test,
1256 struct regmap_config *config,
155a6bd6
MB
1257 struct regmap_ram_data **data)
1258{
7b7982f1 1259 struct regmap_test_priv *priv = test->priv;
48bccea9 1260 const struct regmap_test_param *param = test->param_value;
155a6bd6
MB
1261 u16 *buf;
1262 struct regmap *ret;
1263 size_t size = (config->max_register + 1) * config->reg_bits / 8;
1264 int i;
1265 struct reg_default *defaults;
1266
48bccea9
RF
1267 config->cache_type = param->cache;
1268 config->val_format_endian = param->val_endian;
a9e26169
GR
1269 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1270 config->cache_type == REGCACHE_MAPLE;
155a6bd6
MB
1271
1272 buf = kmalloc(size, GFP_KERNEL);
1273 if (!buf)
1274 return ERR_PTR(-ENOMEM);
1275
1276 get_random_bytes(buf, size);
1277
1278 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1279 if (!(*data))
1280 return ERR_PTR(-ENOMEM);
1281 (*data)->vals = (void *)buf;
1282
1283 config->num_reg_defaults = config->max_register + 1;
1284 defaults = kcalloc(config->num_reg_defaults,
1285 sizeof(struct reg_default),
1286 GFP_KERNEL);
1287 if (!defaults)
1288 return ERR_PTR(-ENOMEM);
1289 config->reg_defaults = defaults;
1290
1291 for (i = 0; i < config->num_reg_defaults; i++) {
1292 defaults[i].reg = i;
48bccea9 1293 switch (param->val_endian) {
155a6bd6
MB
1294 case REGMAP_ENDIAN_LITTLE:
1295 defaults[i].def = le16_to_cpu(buf[i]);
1296 break;
1297 case REGMAP_ENDIAN_BIG:
1298 defaults[i].def = be16_to_cpu(buf[i]);
1299 break;
1300 default:
1301 return ERR_PTR(-EINVAL);
1302 }
1303 }
1304
1305 /*
1306 * We use the defaults in the tests but they don't make sense
1307 * to the core if there's no cache.
1308 */
1309 if (config->cache_type == REGCACHE_NONE)
1310 config->num_reg_defaults = 0;
1311
7b7982f1 1312 ret = regmap_init_raw_ram(priv->dev, config, *data);
155a6bd6
MB
1313 if (IS_ERR(ret)) {
1314 kfree(buf);
1315 kfree(*data);
ce75e06e
RF
1316 } else {
1317 kunit_add_action(test, regmap_exit_action, ret);
155a6bd6
MB
1318 }
1319
1320 return ret;
1321}
1322
1323static void raw_read_defaults_single(struct kunit *test)
1324{
155a6bd6
MB
1325 struct regmap *map;
1326 struct regmap_config config;
1327 struct regmap_ram_data *data;
1328 unsigned int rval;
1329 int i;
1330
1331 config = raw_regmap_config;
1332
48bccea9 1333 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1334 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1335 if (IS_ERR(map))
1336 return;
1337
1338 /* Check that we can read the defaults via the API */
1339 for (i = 0; i < config.max_register + 1; i++) {
1340 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1341 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1342 }
155a6bd6
MB
1343}
1344
1345static void raw_read_defaults(struct kunit *test)
1346{
155a6bd6
MB
1347 struct regmap *map;
1348 struct regmap_config config;
1349 struct regmap_ram_data *data;
1350 u16 *rval;
1351 u16 def;
1352 size_t val_len;
1353 int i;
1354
1355 config = raw_regmap_config;
1356
48bccea9 1357 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1358 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1359 if (IS_ERR(map))
1360 return;
1361
1362 val_len = sizeof(*rval) * (config.max_register + 1);
d6f2fd7a 1363 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
155a6bd6
MB
1364 KUNIT_ASSERT_TRUE(test, rval != NULL);
1365 if (!rval)
1366 return;
7b7982f1 1367
155a6bd6
MB
1368 /* Check that we can read the defaults via the API */
1369 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1370 for (i = 0; i < config.max_register + 1; i++) {
1371 def = config.reg_defaults[i].def;
1372 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
866f7021 1373 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
155a6bd6 1374 } else {
866f7021 1375 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
155a6bd6
MB
1376 }
1377 }
155a6bd6
MB
1378}
1379
1380static void raw_write_read_single(struct kunit *test)
1381{
155a6bd6
MB
1382 struct regmap *map;
1383 struct regmap_config config;
1384 struct regmap_ram_data *data;
1385 u16 val;
1386 unsigned int rval;
1387
1388 config = raw_regmap_config;
1389
48bccea9 1390 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1391 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1392 if (IS_ERR(map))
1393 return;
1394
1395 get_random_bytes(&val, sizeof(val));
1396
1397 /* If we write a value to a register we can read it back */
1398 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1399 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1400 KUNIT_EXPECT_EQ(test, val, rval);
155a6bd6
MB
1401}
1402
1403static void raw_write(struct kunit *test)
1404{
155a6bd6
MB
1405 struct regmap *map;
1406 struct regmap_config config;
1407 struct regmap_ram_data *data;
1408 u16 *hw_buf;
1409 u16 val[2];
1410 unsigned int rval;
1411 int i;
1412
1413 config = raw_regmap_config;
1414
48bccea9 1415 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1416 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1417 if (IS_ERR(map))
1418 return;
1419
1420 hw_buf = (u16 *)data->vals;
1421
1422 get_random_bytes(&val, sizeof(val));
1423
1424 /* Do a raw write */
1425 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1426
1427 /* We should read back the new values, and defaults for the rest */
1428 for (i = 0; i < config.max_register + 1; i++) {
1429 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1430
1431 switch (i) {
1432 case 2:
1433 case 3:
1434 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1435 KUNIT_EXPECT_EQ(test, rval,
866f7021 1436 be16_to_cpu((__force __be16)val[i % 2]));
155a6bd6
MB
1437 } else {
1438 KUNIT_EXPECT_EQ(test, rval,
866f7021 1439 le16_to_cpu((__force __le16)val[i % 2]));
155a6bd6
MB
1440 }
1441 break;
1442 default:
1443 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1444 break;
1445 }
1446 }
1447
1448 /* The values should appear in the "hardware" */
1449 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
155a6bd6
MB
1450}
1451
d958d978
BW
1452static bool reg_zero(struct device *dev, unsigned int reg)
1453{
1454 return reg == 0;
1455}
1456
1457static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1458{
1459 return reg == 0;
1460}
1461
1462static void raw_noinc_write(struct kunit *test)
1463{
d958d978
BW
1464 struct regmap *map;
1465 struct regmap_config config;
1466 struct regmap_ram_data *data;
7011b51f
BW
1467 unsigned int val;
1468 u16 val_test, val_last;
d958d978
BW
1469 u16 val_array[BLOCK_TEST_SIZE];
1470
1471 config = raw_regmap_config;
1472 config.volatile_reg = reg_zero;
1473 config.writeable_noinc_reg = reg_zero;
1474 config.readable_noinc_reg = reg_zero;
1475
48bccea9 1476 map = gen_raw_regmap(test, &config, &data);
d958d978
BW
1477 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1478 if (IS_ERR(map))
1479 return;
1480
1481 data->noinc_reg = ram_reg_zero;
1482
1483 get_random_bytes(&val_array, sizeof(val_array));
1484
1485 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1486 val_test = be16_to_cpu(val_array[1]) + 100;
1487 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1488 } else {
1489 val_test = le16_to_cpu(val_array[1]) + 100;
1490 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1491 }
1492
1493 /* Put some data into the register following the noinc register */
1494 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1495
1496 /* Write some data to the noinc register */
1497 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1498 sizeof(val_array)));
1499
1500 /* We should read back the last value written */
1501 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1502 KUNIT_ASSERT_EQ(test, val_last, val);
1503
1504 /* Make sure we didn't touch the register after the noinc register */
1505 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1506 KUNIT_ASSERT_EQ(test, val_test, val);
d958d978
BW
1507}
1508
155a6bd6
MB
1509static void raw_sync(struct kunit *test)
1510{
155a6bd6
MB
1511 struct regmap *map;
1512 struct regmap_config config;
1513 struct regmap_ram_data *data;
2f0dbb24 1514 u16 val[3];
155a6bd6
MB
1515 u16 *hw_buf;
1516 unsigned int rval;
1517 int i;
1518
1519 config = raw_regmap_config;
1520
48bccea9 1521 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1522 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1523 if (IS_ERR(map))
1524 return;
1525
1526 hw_buf = (u16 *)data->vals;
1527
2f0dbb24 1528 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
155a6bd6
MB
1529
1530 /* Do a regular write and a raw write in cache only mode */
1531 regcache_cache_only(map, true);
2f0dbb24
MB
1532 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1533 sizeof(u16) * 2));
1534 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
155a6bd6
MB
1535
1536 /* We should read back the new values, and defaults for the rest */
1537 for (i = 0; i < config.max_register + 1; i++) {
1538 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1539
1540 switch (i) {
1541 case 2:
1542 case 3:
155a6bd6
MB
1543 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1544 KUNIT_EXPECT_EQ(test, rval,
866f7021 1545 be16_to_cpu((__force __be16)val[i - 2]));
155a6bd6
MB
1546 } else {
1547 KUNIT_EXPECT_EQ(test, rval,
866f7021 1548 le16_to_cpu((__force __le16)val[i - 2]));
155a6bd6
MB
1549 }
1550 break;
2f0dbb24
MB
1551 case 4:
1552 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1553 break;
155a6bd6
MB
1554 default:
1555 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1556 break;
1557 }
1558 }
2f0dbb24
MB
1559
1560 /*
1561 * The value written via _write() was translated by the core,
1562 * translate the original copy for comparison purposes.
1563 */
1564 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1565 val[2] = cpu_to_be16(val[2]);
1566 else
1567 val[2] = cpu_to_le16(val[2]);
7b7982f1 1568
155a6bd6 1569 /* The values should not appear in the "hardware" */
2f0dbb24 1570 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
155a6bd6
MB
1571
1572 for (i = 0; i < config.max_register + 1; i++)
1573 data->written[i] = false;
1574
1575 /* Do the sync */
1576 regcache_cache_only(map, false);
1577 regcache_mark_dirty(map);
1578 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1579
1580 /* The values should now appear in the "hardware" */
2f0dbb24 1581 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
155a6bd6
MB
1582}
1583
bb92804b
MB
1584static void raw_ranges(struct kunit *test)
1585{
bb92804b
MB
1586 struct regmap *map;
1587 struct regmap_config config;
1588 struct regmap_ram_data *data;
1589 unsigned int val;
1590 int i;
1591
1592 config = raw_regmap_config;
1593 config.volatile_reg = test_range_all_volatile;
1594 config.ranges = &test_range;
1595 config.num_ranges = 1;
1596 config.max_register = test_range.range_max;
1597
48bccea9 1598 map = gen_raw_regmap(test, &config, &data);
bb92804b
MB
1599 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1600 if (IS_ERR(map))
1601 return;
1602
1603 /* Reset the page to a non-zero value to trigger a change */
1604 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1605 test_range.range_max));
1606
1607 /* Check we set the page and use the window for writes */
1608 data->written[test_range.selector_reg] = false;
1609 data->written[test_range.window_start] = false;
1610 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1611 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1612 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1613
1614 data->written[test_range.selector_reg] = false;
1615 data->written[test_range.window_start] = false;
1616 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1617 test_range.range_min +
1618 test_range.window_len,
1619 0));
1620 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1621 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1622
1623 /* Same for reads */
1624 data->written[test_range.selector_reg] = false;
1625 data->read[test_range.window_start] = false;
1626 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
1627 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1628 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1629
1630 data->written[test_range.selector_reg] = false;
1631 data->read[test_range.window_start] = false;
1632 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
1633 test_range.range_min +
1634 test_range.window_len,
1635 &val));
1636 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1637 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1638
1639 /* No physical access triggered in the virtual range */
1640 for (i = test_range.range_min; i < test_range.range_max; i++) {
1641 KUNIT_EXPECT_FALSE(test, data->read[i]);
1642 KUNIT_EXPECT_FALSE(test, data->written[i]);
1643 }
bb92804b
MB
1644}
1645
2238959b
MB
1646static struct kunit_case regmap_test_cases[] = {
1647 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1648 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1649 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
18003306 1650 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
a07bff40 1651 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
2238959b
MB
1652 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1653 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1654 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1655 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1656 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1657 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1658 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
7903d15f
RF
1659 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
1660 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
1661 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
1662 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
357a1ebd 1663 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
2238959b
MB
1664 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1665 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
7dd52d30
RF
1666 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
1667 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
1668 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
d881ee5a 1669 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
ac4394bf 1670 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
155a6bd6
MB
1671
1672 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1673 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1674 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1675 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
d958d978 1676 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
155a6bd6 1677 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
bb92804b 1678 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2238959b
MB
1679 {}
1680};
1681
7b7982f1
RF
1682static int regmap_test_init(struct kunit *test)
1683{
1684 struct regmap_test_priv *priv;
1685 struct device *dev;
1686
1687 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
1688 if (!priv)
1689 return -ENOMEM;
1690
1691 test->priv = priv;
1692
1693 dev = kunit_device_register(test, "regmap_test");
1694 priv->dev = get_device(dev);
1695 if (!priv->dev)
1696 return -ENODEV;
1697
1698 dev_set_drvdata(dev, test);
1699
1700 return 0;
1701}
1702
1703static void regmap_test_exit(struct kunit *test)
1704{
1705 struct regmap_test_priv *priv = test->priv;
1706
1707 /* Destroy the dummy struct device */
1708 if (priv && priv->dev)
1709 put_device(priv->dev);
1710}
1711
2238959b
MB
1712static struct kunit_suite regmap_test_suite = {
1713 .name = "regmap",
7b7982f1
RF
1714 .init = regmap_test_init,
1715 .exit = regmap_test_exit,
2238959b
MB
1716 .test_cases = regmap_test_cases,
1717};
1718kunit_test_suite(regmap_test_suite);
1719
1720MODULE_LICENSE("GPL v2");