regmap: kunit: Add cache-drop test with multiple cache blocks
[linux-2.6-block.git] / drivers / base / regmap / regmap-kunit.c
CommitLineData
2238959b
MB
1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7b7982f1 7#include <kunit/device.h>
ce75e06e 8#include <kunit/resource.h>
2238959b
MB
9#include <kunit/test.h>
10#include "internal.h"
11
12#define BLOCK_TEST_SIZE 12
13
ce75e06e
RF
14KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15
7b7982f1
RF
16struct regmap_test_priv {
17 struct device *dev;
18};
19
48bccea9
RF
20struct regmap_test_param {
21 enum regcache_type cache;
22 enum regmap_endian val_endian;
71091574
RF
23
24 unsigned int from_reg;
48bccea9
RF
25};
26
2f0dbb24
MB
27static void get_changed_bytes(void *orig, void *new, size_t size)
28{
29 char *o = orig;
30 char *n = new;
31 int i;
32
33 get_random_bytes(new, size);
34
35 /*
36 * This could be nicer and more efficient but we shouldn't
37 * super care.
38 */
39 for (i = 0; i < size; i++)
40 while (n[i] == o[i])
41 get_random_bytes(&n[i], 1);
42}
43
2238959b 44static const struct regmap_config test_regmap_config = {
2238959b
MB
45 .reg_stride = 1,
46 .val_bits = sizeof(unsigned int) * 8,
47};
48
48bccea9
RF
49static const char *regcache_type_name(enum regcache_type type)
50{
51 switch (type) {
52 case REGCACHE_NONE:
53 return "none";
54 case REGCACHE_FLAT:
55 return "flat";
56 case REGCACHE_RBTREE:
57 return "rbtree";
58 case REGCACHE_MAPLE:
59 return "maple";
60 default:
61 return NULL;
62 }
63}
64
65static const char *regmap_endian_name(enum regmap_endian endian)
66{
67 switch (endian) {
68 case REGMAP_ENDIAN_BIG:
69 return "big";
70 case REGMAP_ENDIAN_LITTLE:
71 return "little";
72 case REGMAP_ENDIAN_DEFAULT:
73 return "default";
74 case REGMAP_ENDIAN_NATIVE:
75 return "native";
76 default:
77 return NULL;
78 }
79}
2238959b 80
48bccea9 81static void param_to_desc(const struct regmap_test_param *param, char *desc)
2238959b 82{
71091574 83 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
48bccea9 84 regcache_type_name(param->cache),
71091574
RF
85 regmap_endian_name(param->val_endian),
86 param->from_reg);
2238959b
MB
87}
88
48bccea9
RF
89static const struct regmap_test_param regcache_types_list[] = {
90 { .cache = REGCACHE_NONE },
91 { .cache = REGCACHE_FLAT },
92 { .cache = REGCACHE_RBTREE },
93 { .cache = REGCACHE_MAPLE },
2238959b
MB
94};
95
48bccea9 96KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
2238959b 97
ac4394bf 98static const struct regmap_test_param real_cache_types_only_list[] = {
48bccea9
RF
99 { .cache = REGCACHE_FLAT },
100 { .cache = REGCACHE_RBTREE },
101 { .cache = REGCACHE_MAPLE },
2238959b
MB
102};
103
ac4394bf
RF
104KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
105
106static const struct regmap_test_param real_cache_types_list[] = {
107 { .cache = REGCACHE_FLAT, .from_reg = 0 },
108 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
109 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
110 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
111 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
112 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
113 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
114 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
115 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
116 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
117 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
118 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
119 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
120 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
121 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
122};
123
48bccea9 124KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
2238959b 125
48bccea9 126static const struct regmap_test_param sparse_cache_types_list[] = {
71091574
RF
127 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
128 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
129 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
130 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
131 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
132 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
134 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
135 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
136 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
2238959b
MB
137};
138
48bccea9 139KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
2238959b 140
7b7982f1
RF
141static struct regmap *gen_regmap(struct kunit *test,
142 struct regmap_config *config,
2238959b
MB
143 struct regmap_ram_data **data)
144{
48bccea9 145 const struct regmap_test_param *param = test->param_value;
7b7982f1 146 struct regmap_test_priv *priv = test->priv;
2238959b
MB
147 unsigned int *buf;
148 struct regmap *ret;
71091574 149 size_t size;
2238959b
MB
150 int i;
151 struct reg_default *defaults;
152
48bccea9 153 config->cache_type = param->cache;
a9e26169
GR
154 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
155 config->cache_type == REGCACHE_MAPLE;
156
71091574
RF
157 if (config->max_register == 0) {
158 config->max_register = param->from_reg;
159 if (config->num_reg_defaults)
160 config->max_register += (config->num_reg_defaults - 1) *
161 config->reg_stride;
162 else
163 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
164 }
165
166 size = (config->max_register + 1) * sizeof(unsigned int);
2238959b
MB
167 buf = kmalloc(size, GFP_KERNEL);
168 if (!buf)
169 return ERR_PTR(-ENOMEM);
170
171 get_random_bytes(buf, size);
172
173 *data = kzalloc(sizeof(**data), GFP_KERNEL);
174 if (!(*data))
175 return ERR_PTR(-ENOMEM);
176 (*data)->vals = buf;
177
178 if (config->num_reg_defaults) {
179 defaults = kcalloc(config->num_reg_defaults,
180 sizeof(struct reg_default),
181 GFP_KERNEL);
182 if (!defaults)
183 return ERR_PTR(-ENOMEM);
184 config->reg_defaults = defaults;
185
186 for (i = 0; i < config->num_reg_defaults; i++) {
71091574
RF
187 defaults[i].reg = param->from_reg + (i * config->reg_stride);
188 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
2238959b
MB
189 }
190 }
191
7b7982f1 192 ret = regmap_init_ram(priv->dev, config, *data);
2238959b
MB
193 if (IS_ERR(ret)) {
194 kfree(buf);
195 kfree(*data);
ce75e06e
RF
196 } else {
197 kunit_add_action(test, regmap_exit_action, ret);
2238959b
MB
198 }
199
200 return ret;
201}
202
ac4394bf 203static bool reg_5_false(struct device *dev, unsigned int reg)
18003306 204{
ac4394bf
RF
205 struct kunit *test = dev_get_drvdata(dev);
206 const struct regmap_test_param *param = test->param_value;
207
208 return reg != (param->from_reg + 5);
18003306
MB
209}
210
2238959b
MB
211static void basic_read_write(struct kunit *test)
212{
2238959b
MB
213 struct regmap *map;
214 struct regmap_config config;
215 struct regmap_ram_data *data;
216 unsigned int val, rval;
217
218 config = test_regmap_config;
2238959b 219
7b7982f1 220 map = gen_regmap(test, &config, &data);
2238959b
MB
221 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
222 if (IS_ERR(map))
223 return;
224
225 get_random_bytes(&val, sizeof(val));
226
227 /* If we write a value to a register we can read it back */
228 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
229 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
230 KUNIT_EXPECT_EQ(test, val, rval);
231
232 /* If using a cache the cache satisfied the read */
48bccea9 233 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
2238959b
MB
234}
235
236static void bulk_write(struct kunit *test)
237{
2238959b
MB
238 struct regmap *map;
239 struct regmap_config config;
240 struct regmap_ram_data *data;
241 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
242 int i;
243
244 config = test_regmap_config;
2238959b 245
7b7982f1 246 map = gen_regmap(test, &config, &data);
2238959b
MB
247 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
248 if (IS_ERR(map))
249 return;
250
251 get_random_bytes(&val, sizeof(val));
252
253 /*
254 * Data written via the bulk API can be read back with single
255 * reads.
256 */
257 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
258 BLOCK_TEST_SIZE));
259 for (i = 0; i < BLOCK_TEST_SIZE; i++)
260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
261
262 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
263
264 /* If using a cache the cache satisfied the read */
265 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 266 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
267}
268
269static void bulk_read(struct kunit *test)
270{
2238959b
MB
271 struct regmap *map;
272 struct regmap_config config;
273 struct regmap_ram_data *data;
274 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
275 int i;
276
277 config = test_regmap_config;
2238959b 278
7b7982f1 279 map = gen_regmap(test, &config, &data);
2238959b
MB
280 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
281 if (IS_ERR(map))
282 return;
283
284 get_random_bytes(&val, sizeof(val));
285
286 /* Data written as single writes can be read via the bulk API */
287 for (i = 0; i < BLOCK_TEST_SIZE; i++)
288 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
289 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
290 BLOCK_TEST_SIZE));
291 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
292
293 /* If using a cache the cache satisfied the read */
294 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 295 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
296}
297
18003306
MB
298static void write_readonly(struct kunit *test)
299{
18003306
MB
300 struct regmap *map;
301 struct regmap_config config;
302 struct regmap_ram_data *data;
303 unsigned int val;
304 int i;
305
306 config = test_regmap_config;
18003306
MB
307 config.num_reg_defaults = BLOCK_TEST_SIZE;
308 config.writeable_reg = reg_5_false;
309
7b7982f1 310 map = gen_regmap(test, &config, &data);
18003306
MB
311 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
312 if (IS_ERR(map))
313 return;
314
315 get_random_bytes(&val, sizeof(val));
316
317 for (i = 0; i < BLOCK_TEST_SIZE; i++)
318 data->written[i] = false;
319
320 /* Change the value of all registers, readonly should fail */
321 for (i = 0; i < BLOCK_TEST_SIZE; i++)
322 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
323
324 /* Did that match what we see on the device? */
325 for (i = 0; i < BLOCK_TEST_SIZE; i++)
326 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
18003306
MB
327}
328
a07bff40
MB
329static void read_writeonly(struct kunit *test)
330{
a07bff40
MB
331 struct regmap *map;
332 struct regmap_config config;
333 struct regmap_ram_data *data;
334 unsigned int val;
335 int i;
336
337 config = test_regmap_config;
a07bff40
MB
338 config.readable_reg = reg_5_false;
339
7b7982f1 340 map = gen_regmap(test, &config, &data);
a07bff40
MB
341 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
342 if (IS_ERR(map))
343 return;
344
345 for (i = 0; i < BLOCK_TEST_SIZE; i++)
346 data->read[i] = false;
347
d0c99ffe
MB
348 /*
349 * Try to read all the registers, the writeonly one should
350 * fail if we aren't using the flat cache.
351 */
352 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
48bccea9 353 if (config.cache_type != REGCACHE_FLAT) {
d0c99ffe
MB
354 KUNIT_EXPECT_EQ(test, i != 5,
355 regmap_read(map, i, &val) == 0);
356 } else {
357 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
358 }
359 }
a07bff40
MB
360
361 /* Did we trigger a hardware access? */
362 KUNIT_EXPECT_FALSE(test, data->read[5]);
a07bff40
MB
363}
364
2238959b
MB
365static void reg_defaults(struct kunit *test)
366{
2238959b
MB
367 struct regmap *map;
368 struct regmap_config config;
369 struct regmap_ram_data *data;
370 unsigned int rval[BLOCK_TEST_SIZE];
371 int i;
372
373 config = test_regmap_config;
2238959b
MB
374 config.num_reg_defaults = BLOCK_TEST_SIZE;
375
7b7982f1 376 map = gen_regmap(test, &config, &data);
2238959b
MB
377 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
378 if (IS_ERR(map))
379 return;
380
381 /* Read back the expected default data */
382 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
383 BLOCK_TEST_SIZE));
384 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
385
386 /* The data should have been read from cache if there was one */
387 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 388 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
389}
390
391static void reg_defaults_read_dev(struct kunit *test)
392{
2238959b
MB
393 struct regmap *map;
394 struct regmap_config config;
395 struct regmap_ram_data *data;
396 unsigned int rval[BLOCK_TEST_SIZE];
397 int i;
398
399 config = test_regmap_config;
2238959b
MB
400 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
401
7b7982f1 402 map = gen_regmap(test, &config, &data);
2238959b
MB
403 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
404 if (IS_ERR(map))
405 return;
406
407 /* We should have read the cache defaults back from the map */
408 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
48bccea9 409 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
2238959b
MB
410 data->read[i] = false;
411 }
412
413 /* Read back the expected default data */
414 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
415 BLOCK_TEST_SIZE));
416 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
417
418 /* The data should have been read from cache if there was one */
419 for (i = 0; i < BLOCK_TEST_SIZE; i++)
48bccea9 420 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b
MB
421}
422
423static void register_patch(struct kunit *test)
424{
2238959b
MB
425 struct regmap *map;
426 struct regmap_config config;
427 struct regmap_ram_data *data;
428 struct reg_sequence patch[2];
429 unsigned int rval[BLOCK_TEST_SIZE];
430 int i;
431
432 /* We need defaults so readback works */
433 config = test_regmap_config;
2238959b
MB
434 config.num_reg_defaults = BLOCK_TEST_SIZE;
435
7b7982f1 436 map = gen_regmap(test, &config, &data);
2238959b
MB
437 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
438 if (IS_ERR(map))
439 return;
440
441 /* Stash the original values */
442 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
443 BLOCK_TEST_SIZE));
444
445 /* Patch a couple of values */
446 patch[0].reg = 2;
447 patch[0].def = rval[2] + 1;
448 patch[0].delay_us = 0;
449 patch[1].reg = 5;
450 patch[1].def = rval[5] + 1;
451 patch[1].delay_us = 0;
452 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
453 ARRAY_SIZE(patch)));
454
455 /* Only the patched registers are written */
456 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
457 switch (i) {
458 case 2:
459 case 5:
460 KUNIT_EXPECT_TRUE(test, data->written[i]);
461 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
462 break;
463 default:
464 KUNIT_EXPECT_FALSE(test, data->written[i]);
465 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
466 break;
467 }
468 }
2238959b
MB
469}
470
471static void stride(struct kunit *test)
472{
2238959b
MB
473 struct regmap *map;
474 struct regmap_config config;
475 struct regmap_ram_data *data;
476 unsigned int rval;
477 int i;
478
479 config = test_regmap_config;
2238959b
MB
480 config.reg_stride = 2;
481 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
482
7b7982f1 483 map = gen_regmap(test, &config, &data);
2238959b
MB
484 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
485 if (IS_ERR(map))
486 return;
487
488 /* Only even registers can be accessed, try both read and write */
489 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
490 data->read[i] = false;
491 data->written[i] = false;
492
493 if (i % 2) {
494 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
495 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
496 KUNIT_EXPECT_FALSE(test, data->read[i]);
497 KUNIT_EXPECT_FALSE(test, data->written[i]);
498 } else {
499 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
500 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
48bccea9 501 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
2238959b
MB
502 data->read[i]);
503
504 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
505 KUNIT_EXPECT_TRUE(test, data->written[i]);
506 }
507 }
2238959b
MB
508}
509
510static struct regmap_range_cfg test_range = {
511 .selector_reg = 1,
512 .selector_mask = 0xff,
513
514 .window_start = 4,
515 .window_len = 10,
516
517 .range_min = 20,
518 .range_max = 40,
519};
520
6a2e332c 521static bool test_range_window_volatile(struct device *dev, unsigned int reg)
2238959b
MB
522{
523 if (reg >= test_range.window_start &&
fabe32cc 524 reg <= test_range.window_start + test_range.window_len)
2238959b
MB
525 return true;
526
6a2e332c
MB
527 return false;
528}
529
530static bool test_range_all_volatile(struct device *dev, unsigned int reg)
531{
532 if (test_range_window_volatile(dev, reg))
533 return true;
534
2238959b
MB
535 if (reg >= test_range.range_min && reg <= test_range.range_max)
536 return true;
537
538 return false;
539}
540
541static void basic_ranges(struct kunit *test)
542{
2238959b
MB
543 struct regmap *map;
544 struct regmap_config config;
545 struct regmap_ram_data *data;
546 unsigned int val;
547 int i;
548
549 config = test_regmap_config;
6a2e332c 550 config.volatile_reg = test_range_all_volatile;
2238959b
MB
551 config.ranges = &test_range;
552 config.num_ranges = 1;
553 config.max_register = test_range.range_max;
554
7b7982f1 555 map = gen_regmap(test, &config, &data);
2238959b
MB
556 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
557 if (IS_ERR(map))
558 return;
559
560 for (i = test_range.range_min; i < test_range.range_max; i++) {
561 data->read[i] = false;
562 data->written[i] = false;
563 }
564
565 /* Reset the page to a non-zero value to trigger a change */
566 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
567 test_range.range_max));
568
569 /* Check we set the page and use the window for writes */
570 data->written[test_range.selector_reg] = false;
571 data->written[test_range.window_start] = false;
572 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
573 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
574 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
575
576 data->written[test_range.selector_reg] = false;
577 data->written[test_range.window_start] = false;
578 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
579 test_range.range_min +
580 test_range.window_len,
581 0));
582 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
583 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
584
585 /* Same for reads */
586 data->written[test_range.selector_reg] = false;
587 data->read[test_range.window_start] = false;
588 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
589 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
590 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
591
592 data->written[test_range.selector_reg] = false;
593 data->read[test_range.window_start] = false;
594 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
595 test_range.range_min +
596 test_range.window_len,
597 &val));
598 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
599 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
600
601 /* No physical access triggered in the virtual range */
602 for (i = test_range.range_min; i < test_range.range_max; i++) {
603 KUNIT_EXPECT_FALSE(test, data->read[i]);
604 KUNIT_EXPECT_FALSE(test, data->written[i]);
605 }
2238959b
MB
606}
607
608/* Try to stress dynamic creation of cache data structures */
609static void stress_insert(struct kunit *test)
610{
2238959b
MB
611 struct regmap *map;
612 struct regmap_config config;
613 struct regmap_ram_data *data;
614 unsigned int rval, *vals;
615 size_t buf_sz;
616 int i;
617
618 config = test_regmap_config;
2238959b
MB
619 config.max_register = 300;
620
7b7982f1 621 map = gen_regmap(test, &config, &data);
2238959b
MB
622 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
623 if (IS_ERR(map))
624 return;
625
626 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
627 GFP_KERNEL);
628 KUNIT_ASSERT_FALSE(test, vals == NULL);
629 buf_sz = sizeof(unsigned long) * config.max_register;
630
631 get_random_bytes(vals, buf_sz);
632
633 /* Write data into the map/cache in ever decreasing strides */
634 for (i = 0; i < config.max_register; i += 100)
635 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
636 for (i = 0; i < config.max_register; i += 50)
637 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
638 for (i = 0; i < config.max_register; i += 25)
639 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
640 for (i = 0; i < config.max_register; i += 10)
641 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
642 for (i = 0; i < config.max_register; i += 5)
643 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
644 for (i = 0; i < config.max_register; i += 3)
645 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
646 for (i = 0; i < config.max_register; i += 2)
647 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
648 for (i = 0; i < config.max_register; i++)
649 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
650
651 /* Do reads from the cache (if there is one) match? */
652 for (i = 0; i < config.max_register; i ++) {
653 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
654 KUNIT_EXPECT_EQ(test, rval, vals[i]);
48bccea9 655 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
2238959b 656 }
2238959b
MB
657}
658
659static void cache_bypass(struct kunit *test)
660{
ac4394bf 661 const struct regmap_test_param *param = test->param_value;
2238959b
MB
662 struct regmap *map;
663 struct regmap_config config;
664 struct regmap_ram_data *data;
665 unsigned int val, rval;
666
667 config = test_regmap_config;
2238959b 668
7b7982f1 669 map = gen_regmap(test, &config, &data);
2238959b
MB
670 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
671 if (IS_ERR(map))
672 return;
673
674 get_random_bytes(&val, sizeof(val));
675
676 /* Ensure the cache has a value in it */
ac4394bf 677 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
2238959b
MB
678
679 /* Bypass then write a different value */
680 regcache_cache_bypass(map, true);
ac4394bf 681 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
2238959b
MB
682
683 /* Read the bypassed value */
ac4394bf 684 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
2238959b 685 KUNIT_EXPECT_EQ(test, val + 1, rval);
ac4394bf 686 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
2238959b
MB
687
688 /* Disable bypass, the cache should still return the original value */
689 regcache_cache_bypass(map, false);
ac4394bf 690 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
2238959b 691 KUNIT_EXPECT_EQ(test, val, rval);
2238959b
MB
692}
693
7903d15f 694static void cache_sync_marked_dirty(struct kunit *test)
2238959b 695{
ac4394bf 696 const struct regmap_test_param *param = test->param_value;
2238959b
MB
697 struct regmap *map;
698 struct regmap_config config;
699 struct regmap_ram_data *data;
700 unsigned int val[BLOCK_TEST_SIZE];
701 int i;
702
703 config = test_regmap_config;
2238959b 704
7b7982f1 705 map = gen_regmap(test, &config, &data);
2238959b
MB
706 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
707 if (IS_ERR(map))
708 return;
709
710 get_random_bytes(&val, sizeof(val));
711
712 /* Put some data into the cache */
ac4394bf 713 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
2238959b
MB
714 BLOCK_TEST_SIZE));
715 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 716 data->written[param->from_reg + i] = false;
2238959b
MB
717
718 /* Trash the data on the device itself then resync */
719 regcache_mark_dirty(map);
720 memset(data->vals, 0, sizeof(val));
721 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
722
723 /* Did we just write the correct data out? */
ac4394bf 724 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
2238959b 725 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 726 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
2238959b
MB
727}
728
7903d15f
RF
729static void cache_sync_after_cache_only(struct kunit *test)
730{
731 const struct regmap_test_param *param = test->param_value;
732 struct regmap *map;
733 struct regmap_config config;
734 struct regmap_ram_data *data;
735 unsigned int val[BLOCK_TEST_SIZE];
736 unsigned int val_mask;
737 int i;
738
739 config = test_regmap_config;
740
741 map = gen_regmap(test, &config, &data);
742 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
743 if (IS_ERR(map))
744 return;
745
746 val_mask = GENMASK(config.val_bits - 1, 0);
747 get_random_bytes(&val, sizeof(val));
748
749 /* Put some data into the cache */
750 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
751 BLOCK_TEST_SIZE));
752 for (i = 0; i < BLOCK_TEST_SIZE; i++)
753 data->written[param->from_reg + i] = false;
754
755 /* Set cache-only and change the values */
756 regcache_cache_only(map, true);
757 for (i = 0; i < ARRAY_SIZE(val); ++i)
758 val[i] = ~val[i] & val_mask;
759
760 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
761 BLOCK_TEST_SIZE));
762 for (i = 0; i < BLOCK_TEST_SIZE; i++)
763 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
764
765 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
766
767 /* Exit cache-only and sync the cache without marking hardware registers dirty */
768 regcache_cache_only(map, false);
769
770 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
771
772 /* Did we just write the correct data out? */
773 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
774 for (i = 0; i < BLOCK_TEST_SIZE; i++)
775 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
7903d15f
RF
776}
777
778static void cache_sync_defaults_marked_dirty(struct kunit *test)
2238959b 779{
ac4394bf 780 const struct regmap_test_param *param = test->param_value;
2238959b
MB
781 struct regmap *map;
782 struct regmap_config config;
783 struct regmap_ram_data *data;
784 unsigned int val;
785 int i;
786
787 config = test_regmap_config;
2238959b
MB
788 config.num_reg_defaults = BLOCK_TEST_SIZE;
789
7b7982f1 790 map = gen_regmap(test, &config, &data);
2238959b
MB
791 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
792 if (IS_ERR(map))
793 return;
794
795 get_random_bytes(&val, sizeof(val));
796
797 /* Change the value of one register */
ac4394bf 798 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
2238959b
MB
799
800 /* Resync */
801 regcache_mark_dirty(map);
802 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 803 data->written[param->from_reg + i] = false;
2238959b
MB
804 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
805
806 /* Did we just sync the one register we touched? */
807 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 808 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
2238959b 809
7903d15f
RF
810 /* Rewrite registers back to their defaults */
811 for (i = 0; i < config.num_reg_defaults; ++i)
812 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
813 config.reg_defaults[i].def));
814
815 /*
816 * Resync after regcache_mark_dirty() should not write out registers
817 * that are at default value
818 */
819 for (i = 0; i < BLOCK_TEST_SIZE; i++)
820 data->written[param->from_reg + i] = false;
821 regcache_mark_dirty(map);
822 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
823 for (i = 0; i < BLOCK_TEST_SIZE; i++)
824 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
7903d15f
RF
825}
826
827static void cache_sync_default_after_cache_only(struct kunit *test)
828{
829 const struct regmap_test_param *param = test->param_value;
830 struct regmap *map;
831 struct regmap_config config;
832 struct regmap_ram_data *data;
833 unsigned int orig_val;
834 int i;
835
836 config = test_regmap_config;
837 config.num_reg_defaults = BLOCK_TEST_SIZE;
838
839 map = gen_regmap(test, &config, &data);
840 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
841 if (IS_ERR(map))
842 return;
843
844 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
845
846 /* Enter cache-only and change the value of one register */
847 regcache_cache_only(map, true);
848 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
849
850 /* Exit cache-only and resync, should write out the changed register */
851 regcache_cache_only(map, false);
852 for (i = 0; i < BLOCK_TEST_SIZE; i++)
853 data->written[param->from_reg + i] = false;
854 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
855
856 /* Was the register written out? */
857 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
858 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
859
860 /* Enter cache-only and write register back to its default value */
861 regcache_cache_only(map, true);
862 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
863
864 /* Resync should write out the new value */
865 regcache_cache_only(map, false);
866 for (i = 0; i < BLOCK_TEST_SIZE; i++)
867 data->written[param->from_reg + i] = false;
868
869 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
870 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
871 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
2238959b
MB
872}
873
357a1ebd
MB
874static void cache_sync_readonly(struct kunit *test)
875{
ac4394bf 876 const struct regmap_test_param *param = test->param_value;
357a1ebd
MB
877 struct regmap *map;
878 struct regmap_config config;
879 struct regmap_ram_data *data;
880 unsigned int val;
881 int i;
882
883 config = test_regmap_config;
357a1ebd
MB
884 config.writeable_reg = reg_5_false;
885
7b7982f1 886 map = gen_regmap(test, &config, &data);
357a1ebd
MB
887 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
888 if (IS_ERR(map))
889 return;
890
891 /* Read all registers to fill the cache */
892 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 893 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
357a1ebd
MB
894
895 /* Change the value of all registers, readonly should fail */
896 get_random_bytes(&val, sizeof(val));
897 regcache_cache_only(map, true);
898 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 899 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
357a1ebd
MB
900 regcache_cache_only(map, false);
901
902 /* Resync */
903 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 904 data->written[param->from_reg + i] = false;
357a1ebd
MB
905 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
906
907 /* Did that match what we see on the device? */
908 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 909 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
357a1ebd
MB
910}
911
2238959b
MB
912static void cache_sync_patch(struct kunit *test)
913{
ac4394bf 914 const struct regmap_test_param *param = test->param_value;
2238959b
MB
915 struct regmap *map;
916 struct regmap_config config;
917 struct regmap_ram_data *data;
918 struct reg_sequence patch[2];
919 unsigned int rval[BLOCK_TEST_SIZE], val;
920 int i;
921
922 /* We need defaults so readback works */
923 config = test_regmap_config;
2238959b
MB
924 config.num_reg_defaults = BLOCK_TEST_SIZE;
925
7b7982f1 926 map = gen_regmap(test, &config, &data);
2238959b
MB
927 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
928 if (IS_ERR(map))
929 return;
930
931 /* Stash the original values */
ac4394bf 932 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
2238959b
MB
933 BLOCK_TEST_SIZE));
934
935 /* Patch a couple of values */
ac4394bf 936 patch[0].reg = param->from_reg + 2;
2238959b
MB
937 patch[0].def = rval[2] + 1;
938 patch[0].delay_us = 0;
ac4394bf 939 patch[1].reg = param->from_reg + 5;
2238959b
MB
940 patch[1].def = rval[5] + 1;
941 patch[1].delay_us = 0;
942 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
943 ARRAY_SIZE(patch)));
944
945 /* Sync the cache */
946 regcache_mark_dirty(map);
947 for (i = 0; i < BLOCK_TEST_SIZE; i++)
ac4394bf 948 data->written[param->from_reg + i] = false;
2238959b
MB
949 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
950
951 /* The patch should be on the device but not in the cache */
952 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
ac4394bf 953 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
2238959b
MB
954 KUNIT_EXPECT_EQ(test, val, rval[i]);
955
956 switch (i) {
957 case 2:
958 case 5:
ac4394bf
RF
959 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
960 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
2238959b
MB
961 break;
962 default:
ac4394bf
RF
963 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
964 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
2238959b
MB
965 break;
966 }
967 }
2238959b
MB
968}
969
970static void cache_drop(struct kunit *test)
971{
71091574 972 const struct regmap_test_param *param = test->param_value;
2238959b
MB
973 struct regmap *map;
974 struct regmap_config config;
975 struct regmap_ram_data *data;
976 unsigned int rval[BLOCK_TEST_SIZE];
977 int i;
978
979 config = test_regmap_config;
2238959b
MB
980 config.num_reg_defaults = BLOCK_TEST_SIZE;
981
7b7982f1 982 map = gen_regmap(test, &config, &data);
2238959b
MB
983 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
984 if (IS_ERR(map))
985 return;
986
987 /* Ensure the data is read from the cache */
988 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574
RF
989 data->read[param->from_reg + i] = false;
990 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
2238959b
MB
991 BLOCK_TEST_SIZE));
992 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
71091574
RF
993 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
994 data->read[param->from_reg + i] = false;
2238959b 995 }
71091574 996 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
2238959b
MB
997
998 /* Drop some registers */
71091574
RF
999 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1000 param->from_reg + 5));
2238959b
MB
1001
1002 /* Reread and check only the dropped registers hit the device. */
71091574 1003 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
2238959b
MB
1004 BLOCK_TEST_SIZE));
1005 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574
RF
1006 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1007 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
2238959b
MB
1008}
1009
468d277e
RF
1010static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1011{
1012 const struct regmap_test_param *param = test->param_value;
1013 struct regmap *map;
1014 struct regmap_config config;
1015 struct regmap_ram_data *data;
1016 unsigned int val[4][BLOCK_TEST_SIZE];
1017 unsigned int reg;
1018 const int num_ranges = ARRAY_SIZE(val) * 2;
1019 int rangeidx, i;
1020
1021 static_assert(ARRAY_SIZE(val) == 4);
1022
1023 config = test_regmap_config;
1024 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1025
1026 map = gen_regmap(test, &config, &data);
1027 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1028 if (IS_ERR(map))
1029 return;
1030
1031 for (i = 0; i < config.max_register + 1; i++)
1032 data->written[i] = false;
1033
1034 /* Create non-contiguous cache blocks by writing every other range */
1035 get_random_bytes(&val, sizeof(val));
1036 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1037 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1038 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1039 &val[rangeidx / 2],
1040 BLOCK_TEST_SIZE));
1041 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1042 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1043 }
1044
1045 /* Check that odd ranges weren't written */
1046 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1047 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1048 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1049 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1050 }
1051
1052 /* Drop range 2 */
1053 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1054 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1055
1056 /* Drop part of range 4 */
1057 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1058 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1059
1060 /* Mark dirty and reset mock registers to 0 */
1061 regcache_mark_dirty(map);
1062 for (i = 0; i < config.max_register + 1; i++) {
1063 data->vals[i] = 0;
1064 data->written[i] = false;
1065 }
1066
1067 /* The registers that were dropped from range 4 should now remain at 0 */
1068 val[4 / 2][3] = 0;
1069 val[4 / 2][4] = 0;
1070 val[4 / 2][5] = 0;
1071
1072 /* Sync and check that the expected register ranges were written */
1073 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1074
1075 /* Check that odd ranges weren't written */
1076 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1077 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1078 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1079 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1080 }
1081
1082 /* Check that even ranges (except 2 and 4) were written */
1083 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1084 if ((rangeidx == 2) || (rangeidx == 4))
1085 continue;
1086
1087 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1088 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1089 KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1090
1091 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1092 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1093 }
1094
1095 /* Check that range 2 wasn't written */
1096 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1097 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1098 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1099
1100 /* Check that range 4 was partially written */
1101 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1102 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1103 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1104
1105 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1106
1107 /* Nothing before param->from_reg should have been written */
1108 for (i = 0; i < param->from_reg; i++)
1109 KUNIT_EXPECT_FALSE(test, data->written[i]);
1110}
1111
7dd52d30
RF
1112static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1113{
1114 const struct regmap_test_param *param = test->param_value;
1115 struct regmap *map;
1116 struct regmap_config config;
1117 struct regmap_ram_data *data;
1118 unsigned int rval[BLOCK_TEST_SIZE];
1119 int i;
1120
1121 config = test_regmap_config;
1122 config.num_reg_defaults = BLOCK_TEST_SIZE;
1123
1124 map = gen_regmap(test, &config, &data);
1125 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1126 if (IS_ERR(map))
1127 return;
1128
1129 /* Ensure the data is read from the cache */
1130 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1131 data->read[param->from_reg + i] = false;
1132 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1133 BLOCK_TEST_SIZE));
1134 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1135
1136 /* Change all values in cache from defaults */
1137 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1138 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1139
1140 /* Drop all registers */
1141 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1142
1143 /* Mark dirty and cache sync should not write anything. */
1144 regcache_mark_dirty(map);
1145 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1146 data->written[param->from_reg + i] = false;
1147
1148 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1149 for (i = 0; i <= config.max_register; i++)
1150 KUNIT_EXPECT_FALSE(test, data->written[i]);
7dd52d30
RF
1151}
1152
1153static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1154{
1155 const struct regmap_test_param *param = test->param_value;
1156 struct regmap *map;
1157 struct regmap_config config;
1158 struct regmap_ram_data *data;
1159 unsigned int rval[BLOCK_TEST_SIZE];
1160 int i;
1161
1162 config = test_regmap_config;
1163
1164 map = gen_regmap(test, &config, &data);
1165 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1166 if (IS_ERR(map))
1167 return;
1168
1169 /* Ensure the data is read from the cache */
1170 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1171 data->read[param->from_reg + i] = false;
1172 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1173 BLOCK_TEST_SIZE));
1174 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1175
1176 /* Change all values in cache */
1177 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1178 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1179
1180 /* Drop all registers */
1181 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1182
1183 /*
1184 * Sync cache without marking it dirty. All registers were dropped
1185 * so the cache should not have any entries to write out.
1186 */
1187 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1188 data->written[param->from_reg + i] = false;
1189
1190 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1191 for (i = 0; i <= config.max_register; i++)
1192 KUNIT_EXPECT_FALSE(test, data->written[i]);
7dd52d30
RF
1193}
1194
1195static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1196{
1197 const struct regmap_test_param *param = test->param_value;
1198 struct regmap *map;
1199 struct regmap_config config;
1200 struct regmap_ram_data *data;
1201 unsigned int rval[BLOCK_TEST_SIZE];
1202 int i;
1203
1204 config = test_regmap_config;
1205 config.num_reg_defaults = BLOCK_TEST_SIZE;
1206
1207 map = gen_regmap(test, &config, &data);
1208 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1209 if (IS_ERR(map))
1210 return;
1211
1212 /* Ensure the data is read from the cache */
1213 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1214 data->read[param->from_reg + i] = false;
1215 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1216 BLOCK_TEST_SIZE));
1217 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1218
1219 /* Change all values in cache from defaults */
1220 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1221 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1222
1223 /* Drop all registers */
1224 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1225
1226 /*
1227 * Sync cache without marking it dirty. All registers were dropped
1228 * so the cache should not have any entries to write out.
1229 */
1230 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1231 data->written[param->from_reg + i] = false;
1232
1233 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1234 for (i = 0; i <= config.max_register; i++)
1235 KUNIT_EXPECT_FALSE(test, data->written[i]);
7dd52d30
RF
1236}
1237
d881ee5a
MB
1238static void cache_present(struct kunit *test)
1239{
71091574 1240 const struct regmap_test_param *param = test->param_value;
d881ee5a
MB
1241 struct regmap *map;
1242 struct regmap_config config;
1243 struct regmap_ram_data *data;
1244 unsigned int val;
1245 int i;
1246
1247 config = test_regmap_config;
d881ee5a 1248
7b7982f1 1249 map = gen_regmap(test, &config, &data);
d881ee5a
MB
1250 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1251 if (IS_ERR(map))
1252 return;
1253
1254 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1255 data->read[param->from_reg + i] = false;
d881ee5a
MB
1256
1257 /* No defaults so no registers cached. */
1258 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1259 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
d881ee5a
MB
1260
1261 /* We didn't trigger any reads */
1262 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1263 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
d881ee5a
MB
1264
1265 /* Fill the cache */
1266 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1267 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
d881ee5a
MB
1268
1269 /* Now everything should be cached */
1270 for (i = 0; i < BLOCK_TEST_SIZE; i++)
71091574 1271 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
d881ee5a
MB
1272}
1273
6a2e332c
MB
1274/* Check that caching the window register works with sync */
1275static void cache_range_window_reg(struct kunit *test)
1276{
6a2e332c
MB
1277 struct regmap *map;
1278 struct regmap_config config;
1279 struct regmap_ram_data *data;
1280 unsigned int val;
1281 int i;
1282
1283 config = test_regmap_config;
6a2e332c
MB
1284 config.volatile_reg = test_range_window_volatile;
1285 config.ranges = &test_range;
1286 config.num_ranges = 1;
1287 config.max_register = test_range.range_max;
1288
7b7982f1 1289 map = gen_regmap(test, &config, &data);
6a2e332c
MB
1290 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1291 if (IS_ERR(map))
1292 return;
1293
1294 /* Write new values to the entire range */
1295 for (i = test_range.range_min; i <= test_range.range_max; i++)
1296 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1297
1298 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1299 KUNIT_ASSERT_EQ(test, val, 2);
1300
1301 /* Write to the first register in the range to reset the page */
1302 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1303 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1304 KUNIT_ASSERT_EQ(test, val, 0);
1305
1306 /* Trigger a cache sync */
1307 regcache_mark_dirty(map);
1308 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1309
1310 /* Write to the first register again, the page should be reset */
1311 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1312 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1313 KUNIT_ASSERT_EQ(test, val, 0);
1314
1315 /* Trigger another cache sync */
1316 regcache_mark_dirty(map);
1317 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1318
1319 /* Write to the last register again, the page should be reset */
1320 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1321 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1322 KUNIT_ASSERT_EQ(test, val, 2);
1323}
1324
48bccea9
RF
1325static const struct regmap_test_param raw_types_list[] = {
1326 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1327 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1328 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1329 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1330 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1331 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1332 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1333 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
155a6bd6
MB
1334};
1335
48bccea9 1336KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
155a6bd6 1337
48bccea9
RF
1338static const struct regmap_test_param raw_cache_types_list[] = {
1339 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1340 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1341 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1342 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1343 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1344 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
155a6bd6
MB
1345};
1346
48bccea9 1347KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
155a6bd6
MB
1348
1349static const struct regmap_config raw_regmap_config = {
1350 .max_register = BLOCK_TEST_SIZE,
1351
1352 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1353 .reg_bits = 16,
1354 .val_bits = 16,
1355};
1356
7b7982f1
RF
1357static struct regmap *gen_raw_regmap(struct kunit *test,
1358 struct regmap_config *config,
155a6bd6
MB
1359 struct regmap_ram_data **data)
1360{
7b7982f1 1361 struct regmap_test_priv *priv = test->priv;
48bccea9 1362 const struct regmap_test_param *param = test->param_value;
155a6bd6
MB
1363 u16 *buf;
1364 struct regmap *ret;
1365 size_t size = (config->max_register + 1) * config->reg_bits / 8;
1366 int i;
1367 struct reg_default *defaults;
1368
48bccea9
RF
1369 config->cache_type = param->cache;
1370 config->val_format_endian = param->val_endian;
a9e26169
GR
1371 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1372 config->cache_type == REGCACHE_MAPLE;
155a6bd6
MB
1373
1374 buf = kmalloc(size, GFP_KERNEL);
1375 if (!buf)
1376 return ERR_PTR(-ENOMEM);
1377
1378 get_random_bytes(buf, size);
1379
1380 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1381 if (!(*data))
1382 return ERR_PTR(-ENOMEM);
1383 (*data)->vals = (void *)buf;
1384
1385 config->num_reg_defaults = config->max_register + 1;
1386 defaults = kcalloc(config->num_reg_defaults,
1387 sizeof(struct reg_default),
1388 GFP_KERNEL);
1389 if (!defaults)
1390 return ERR_PTR(-ENOMEM);
1391 config->reg_defaults = defaults;
1392
1393 for (i = 0; i < config->num_reg_defaults; i++) {
1394 defaults[i].reg = i;
48bccea9 1395 switch (param->val_endian) {
155a6bd6
MB
1396 case REGMAP_ENDIAN_LITTLE:
1397 defaults[i].def = le16_to_cpu(buf[i]);
1398 break;
1399 case REGMAP_ENDIAN_BIG:
1400 defaults[i].def = be16_to_cpu(buf[i]);
1401 break;
1402 default:
1403 return ERR_PTR(-EINVAL);
1404 }
1405 }
1406
1407 /*
1408 * We use the defaults in the tests but they don't make sense
1409 * to the core if there's no cache.
1410 */
1411 if (config->cache_type == REGCACHE_NONE)
1412 config->num_reg_defaults = 0;
1413
7b7982f1 1414 ret = regmap_init_raw_ram(priv->dev, config, *data);
155a6bd6
MB
1415 if (IS_ERR(ret)) {
1416 kfree(buf);
1417 kfree(*data);
ce75e06e
RF
1418 } else {
1419 kunit_add_action(test, regmap_exit_action, ret);
155a6bd6
MB
1420 }
1421
1422 return ret;
1423}
1424
1425static void raw_read_defaults_single(struct kunit *test)
1426{
155a6bd6
MB
1427 struct regmap *map;
1428 struct regmap_config config;
1429 struct regmap_ram_data *data;
1430 unsigned int rval;
1431 int i;
1432
1433 config = raw_regmap_config;
1434
48bccea9 1435 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1436 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1437 if (IS_ERR(map))
1438 return;
1439
1440 /* Check that we can read the defaults via the API */
1441 for (i = 0; i < config.max_register + 1; i++) {
1442 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1443 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1444 }
155a6bd6
MB
1445}
1446
1447static void raw_read_defaults(struct kunit *test)
1448{
155a6bd6
MB
1449 struct regmap *map;
1450 struct regmap_config config;
1451 struct regmap_ram_data *data;
1452 u16 *rval;
1453 u16 def;
1454 size_t val_len;
1455 int i;
1456
1457 config = raw_regmap_config;
1458
48bccea9 1459 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1460 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1461 if (IS_ERR(map))
1462 return;
1463
1464 val_len = sizeof(*rval) * (config.max_register + 1);
d6f2fd7a 1465 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
155a6bd6
MB
1466 KUNIT_ASSERT_TRUE(test, rval != NULL);
1467 if (!rval)
1468 return;
7b7982f1 1469
155a6bd6
MB
1470 /* Check that we can read the defaults via the API */
1471 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1472 for (i = 0; i < config.max_register + 1; i++) {
1473 def = config.reg_defaults[i].def;
1474 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
866f7021 1475 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
155a6bd6 1476 } else {
866f7021 1477 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
155a6bd6
MB
1478 }
1479 }
155a6bd6
MB
1480}
1481
1482static void raw_write_read_single(struct kunit *test)
1483{
155a6bd6
MB
1484 struct regmap *map;
1485 struct regmap_config config;
1486 struct regmap_ram_data *data;
1487 u16 val;
1488 unsigned int rval;
1489
1490 config = raw_regmap_config;
1491
48bccea9 1492 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1493 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1494 if (IS_ERR(map))
1495 return;
1496
1497 get_random_bytes(&val, sizeof(val));
1498
1499 /* If we write a value to a register we can read it back */
1500 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1501 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1502 KUNIT_EXPECT_EQ(test, val, rval);
155a6bd6
MB
1503}
1504
1505static void raw_write(struct kunit *test)
1506{
155a6bd6
MB
1507 struct regmap *map;
1508 struct regmap_config config;
1509 struct regmap_ram_data *data;
1510 u16 *hw_buf;
1511 u16 val[2];
1512 unsigned int rval;
1513 int i;
1514
1515 config = raw_regmap_config;
1516
48bccea9 1517 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1518 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1519 if (IS_ERR(map))
1520 return;
1521
1522 hw_buf = (u16 *)data->vals;
1523
1524 get_random_bytes(&val, sizeof(val));
1525
1526 /* Do a raw write */
1527 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1528
1529 /* We should read back the new values, and defaults for the rest */
1530 for (i = 0; i < config.max_register + 1; i++) {
1531 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1532
1533 switch (i) {
1534 case 2:
1535 case 3:
1536 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1537 KUNIT_EXPECT_EQ(test, rval,
866f7021 1538 be16_to_cpu((__force __be16)val[i % 2]));
155a6bd6
MB
1539 } else {
1540 KUNIT_EXPECT_EQ(test, rval,
866f7021 1541 le16_to_cpu((__force __le16)val[i % 2]));
155a6bd6
MB
1542 }
1543 break;
1544 default:
1545 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1546 break;
1547 }
1548 }
1549
1550 /* The values should appear in the "hardware" */
1551 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
155a6bd6
MB
1552}
1553
d958d978
BW
1554static bool reg_zero(struct device *dev, unsigned int reg)
1555{
1556 return reg == 0;
1557}
1558
1559static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1560{
1561 return reg == 0;
1562}
1563
1564static void raw_noinc_write(struct kunit *test)
1565{
d958d978
BW
1566 struct regmap *map;
1567 struct regmap_config config;
1568 struct regmap_ram_data *data;
7011b51f
BW
1569 unsigned int val;
1570 u16 val_test, val_last;
d958d978
BW
1571 u16 val_array[BLOCK_TEST_SIZE];
1572
1573 config = raw_regmap_config;
1574 config.volatile_reg = reg_zero;
1575 config.writeable_noinc_reg = reg_zero;
1576 config.readable_noinc_reg = reg_zero;
1577
48bccea9 1578 map = gen_raw_regmap(test, &config, &data);
d958d978
BW
1579 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1580 if (IS_ERR(map))
1581 return;
1582
1583 data->noinc_reg = ram_reg_zero;
1584
1585 get_random_bytes(&val_array, sizeof(val_array));
1586
1587 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1588 val_test = be16_to_cpu(val_array[1]) + 100;
1589 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1590 } else {
1591 val_test = le16_to_cpu(val_array[1]) + 100;
1592 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1593 }
1594
1595 /* Put some data into the register following the noinc register */
1596 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1597
1598 /* Write some data to the noinc register */
1599 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1600 sizeof(val_array)));
1601
1602 /* We should read back the last value written */
1603 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1604 KUNIT_ASSERT_EQ(test, val_last, val);
1605
1606 /* Make sure we didn't touch the register after the noinc register */
1607 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1608 KUNIT_ASSERT_EQ(test, val_test, val);
d958d978
BW
1609}
1610
155a6bd6
MB
1611static void raw_sync(struct kunit *test)
1612{
155a6bd6
MB
1613 struct regmap *map;
1614 struct regmap_config config;
1615 struct regmap_ram_data *data;
2f0dbb24 1616 u16 val[3];
155a6bd6
MB
1617 u16 *hw_buf;
1618 unsigned int rval;
1619 int i;
1620
1621 config = raw_regmap_config;
1622
48bccea9 1623 map = gen_raw_regmap(test, &config, &data);
155a6bd6
MB
1624 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1625 if (IS_ERR(map))
1626 return;
1627
1628 hw_buf = (u16 *)data->vals;
1629
2f0dbb24 1630 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
155a6bd6
MB
1631
1632 /* Do a regular write and a raw write in cache only mode */
1633 regcache_cache_only(map, true);
2f0dbb24
MB
1634 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1635 sizeof(u16) * 2));
1636 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
155a6bd6
MB
1637
1638 /* We should read back the new values, and defaults for the rest */
1639 for (i = 0; i < config.max_register + 1; i++) {
1640 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1641
1642 switch (i) {
1643 case 2:
1644 case 3:
155a6bd6
MB
1645 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1646 KUNIT_EXPECT_EQ(test, rval,
866f7021 1647 be16_to_cpu((__force __be16)val[i - 2]));
155a6bd6
MB
1648 } else {
1649 KUNIT_EXPECT_EQ(test, rval,
866f7021 1650 le16_to_cpu((__force __le16)val[i - 2]));
155a6bd6
MB
1651 }
1652 break;
2f0dbb24
MB
1653 case 4:
1654 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1655 break;
155a6bd6
MB
1656 default:
1657 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1658 break;
1659 }
1660 }
2f0dbb24
MB
1661
1662 /*
1663 * The value written via _write() was translated by the core,
1664 * translate the original copy for comparison purposes.
1665 */
1666 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1667 val[2] = cpu_to_be16(val[2]);
1668 else
1669 val[2] = cpu_to_le16(val[2]);
7b7982f1 1670
155a6bd6 1671 /* The values should not appear in the "hardware" */
2f0dbb24 1672 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
155a6bd6
MB
1673
1674 for (i = 0; i < config.max_register + 1; i++)
1675 data->written[i] = false;
1676
1677 /* Do the sync */
1678 regcache_cache_only(map, false);
1679 regcache_mark_dirty(map);
1680 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1681
1682 /* The values should now appear in the "hardware" */
2f0dbb24 1683 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
155a6bd6
MB
1684}
1685
bb92804b
MB
1686static void raw_ranges(struct kunit *test)
1687{
bb92804b
MB
1688 struct regmap *map;
1689 struct regmap_config config;
1690 struct regmap_ram_data *data;
1691 unsigned int val;
1692 int i;
1693
1694 config = raw_regmap_config;
1695 config.volatile_reg = test_range_all_volatile;
1696 config.ranges = &test_range;
1697 config.num_ranges = 1;
1698 config.max_register = test_range.range_max;
1699
48bccea9 1700 map = gen_raw_regmap(test, &config, &data);
bb92804b
MB
1701 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1702 if (IS_ERR(map))
1703 return;
1704
1705 /* Reset the page to a non-zero value to trigger a change */
1706 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1707 test_range.range_max));
1708
1709 /* Check we set the page and use the window for writes */
1710 data->written[test_range.selector_reg] = false;
1711 data->written[test_range.window_start] = false;
1712 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1713 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1714 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1715
1716 data->written[test_range.selector_reg] = false;
1717 data->written[test_range.window_start] = false;
1718 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1719 test_range.range_min +
1720 test_range.window_len,
1721 0));
1722 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1723 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1724
1725 /* Same for reads */
1726 data->written[test_range.selector_reg] = false;
1727 data->read[test_range.window_start] = false;
1728 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
1729 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1730 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1731
1732 data->written[test_range.selector_reg] = false;
1733 data->read[test_range.window_start] = false;
1734 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
1735 test_range.range_min +
1736 test_range.window_len,
1737 &val));
1738 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1739 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1740
1741 /* No physical access triggered in the virtual range */
1742 for (i = test_range.range_min; i < test_range.range_max; i++) {
1743 KUNIT_EXPECT_FALSE(test, data->read[i]);
1744 KUNIT_EXPECT_FALSE(test, data->written[i]);
1745 }
bb92804b
MB
1746}
1747
2238959b
MB
1748static struct kunit_case regmap_test_cases[] = {
1749 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1750 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1751 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
18003306 1752 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
a07bff40 1753 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
2238959b
MB
1754 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1755 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1756 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1757 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1758 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1759 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1760 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
7903d15f
RF
1761 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
1762 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
1763 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
1764 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
357a1ebd 1765 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
2238959b
MB
1766 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1767 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
468d277e 1768 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
7dd52d30
RF
1769 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
1770 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
1771 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
d881ee5a 1772 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
ac4394bf 1773 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
155a6bd6
MB
1774
1775 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1776 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1777 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1778 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
d958d978 1779 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
155a6bd6 1780 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
bb92804b 1781 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2238959b
MB
1782 {}
1783};
1784
7b7982f1
RF
1785static int regmap_test_init(struct kunit *test)
1786{
1787 struct regmap_test_priv *priv;
1788 struct device *dev;
1789
1790 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
1791 if (!priv)
1792 return -ENOMEM;
1793
1794 test->priv = priv;
1795
1796 dev = kunit_device_register(test, "regmap_test");
1797 priv->dev = get_device(dev);
1798 if (!priv->dev)
1799 return -ENODEV;
1800
1801 dev_set_drvdata(dev, test);
1802
1803 return 0;
1804}
1805
1806static void regmap_test_exit(struct kunit *test)
1807{
1808 struct regmap_test_priv *priv = test->priv;
1809
1810 /* Destroy the dummy struct device */
1811 if (priv && priv->dev)
1812 put_device(priv->dev);
1813}
1814
2238959b
MB
1815static struct kunit_suite regmap_test_suite = {
1816 .name = "regmap",
7b7982f1
RF
1817 .init = regmap_test_init,
1818 .exit = regmap_test_exit,
2238959b
MB
1819 .test_cases = regmap_test_cases,
1820};
1821kunit_test_suite(regmap_test_suite);
1822
1823MODULE_LICENSE("GPL v2");