Commit | Line | Data |
---|---|---|
9fabe24e DP |
1 | /* |
2 | * Register cache access API | |
3 | * | |
4 | * Copyright 2011 Wolfson Microelectronics plc | |
5 | * | |
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/slab.h> | |
1b6bc32f | 14 | #include <linux/export.h> |
51990e82 | 15 | #include <linux/device.h> |
9fabe24e | 16 | #include <trace/events/regmap.h> |
f094fea6 | 17 | #include <linux/bsearch.h> |
c08604b8 | 18 | #include <linux/sort.h> |
9fabe24e DP |
19 | |
20 | #include "internal.h" | |
21 | ||
22 | static const struct regcache_ops *cache_types[] = { | |
28644c80 | 23 | ®cache_rbtree_ops, |
2cbbb579 | 24 | ®cache_lzo_ops, |
2ac902ce | 25 | ®cache_flat_ops, |
9fabe24e DP |
26 | }; |
27 | ||
28 | static int regcache_hw_init(struct regmap *map) | |
29 | { | |
30 | int i, j; | |
31 | int ret; | |
32 | int count; | |
33 | unsigned int val; | |
34 | void *tmp_buf; | |
35 | ||
36 | if (!map->num_reg_defaults_raw) | |
37 | return -EINVAL; | |
38 | ||
39 | if (!map->reg_defaults_raw) { | |
df00c79f | 40 | u32 cache_bypass = map->cache_bypass; |
9fabe24e | 41 | dev_warn(map->dev, "No cache defaults, reading back from HW\n"); |
df00c79f LD |
42 | |
43 | /* Bypass the cache access till data read from HW*/ | |
44 | map->cache_bypass = 1; | |
9fabe24e DP |
45 | tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); |
46 | if (!tmp_buf) | |
47 | return -EINVAL; | |
eb4cb76f MB |
48 | ret = regmap_raw_read(map, 0, tmp_buf, |
49 | map->num_reg_defaults_raw); | |
df00c79f | 50 | map->cache_bypass = cache_bypass; |
9fabe24e DP |
51 | if (ret < 0) { |
52 | kfree(tmp_buf); | |
53 | return ret; | |
54 | } | |
55 | map->reg_defaults_raw = tmp_buf; | |
56 | map->cache_free = 1; | |
57 | } | |
58 | ||
59 | /* calculate the size of reg_defaults */ | |
60 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { | |
879082c9 | 61 | val = regcache_get_val(map, map->reg_defaults_raw, i); |
f01ee60f | 62 | if (regmap_volatile(map, i * map->reg_stride)) |
9fabe24e DP |
63 | continue; |
64 | count++; | |
65 | } | |
66 | ||
67 | map->reg_defaults = kmalloc(count * sizeof(struct reg_default), | |
68 | GFP_KERNEL); | |
021cd616 LPC |
69 | if (!map->reg_defaults) { |
70 | ret = -ENOMEM; | |
71 | goto err_free; | |
72 | } | |
9fabe24e DP |
73 | |
74 | /* fill the reg_defaults */ | |
75 | map->num_reg_defaults = count; | |
76 | for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { | |
879082c9 | 77 | val = regcache_get_val(map, map->reg_defaults_raw, i); |
f01ee60f | 78 | if (regmap_volatile(map, i * map->reg_stride)) |
9fabe24e | 79 | continue; |
f01ee60f | 80 | map->reg_defaults[j].reg = i * map->reg_stride; |
9fabe24e DP |
81 | map->reg_defaults[j].def = val; |
82 | j++; | |
83 | } | |
84 | ||
85 | return 0; | |
021cd616 LPC |
86 | |
87 | err_free: | |
88 | if (map->cache_free) | |
89 | kfree(map->reg_defaults_raw); | |
90 | ||
91 | return ret; | |
9fabe24e DP |
92 | } |
93 | ||
e5e3b8ab | 94 | int regcache_init(struct regmap *map, const struct regmap_config *config) |
9fabe24e DP |
95 | { |
96 | int ret; | |
97 | int i; | |
98 | void *tmp_buf; | |
99 | ||
f01ee60f SW |
100 | for (i = 0; i < config->num_reg_defaults; i++) |
101 | if (config->reg_defaults[i].reg % map->reg_stride) | |
102 | return -EINVAL; | |
103 | ||
e7a6db30 MB |
104 | if (map->cache_type == REGCACHE_NONE) { |
105 | map->cache_bypass = true; | |
9fabe24e | 106 | return 0; |
e7a6db30 | 107 | } |
9fabe24e DP |
108 | |
109 | for (i = 0; i < ARRAY_SIZE(cache_types); i++) | |
110 | if (cache_types[i]->type == map->cache_type) | |
111 | break; | |
112 | ||
113 | if (i == ARRAY_SIZE(cache_types)) { | |
114 | dev_err(map->dev, "Could not match compress type: %d\n", | |
115 | map->cache_type); | |
116 | return -EINVAL; | |
117 | } | |
118 | ||
e5e3b8ab LPC |
119 | map->num_reg_defaults = config->num_reg_defaults; |
120 | map->num_reg_defaults_raw = config->num_reg_defaults_raw; | |
121 | map->reg_defaults_raw = config->reg_defaults_raw; | |
064d4db1 LPC |
122 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); |
123 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; | |
78493f2d MB |
124 | map->cache_present = NULL; |
125 | map->cache_present_nbits = 0; | |
e5e3b8ab | 126 | |
9fabe24e DP |
127 | map->cache = NULL; |
128 | map->cache_ops = cache_types[i]; | |
129 | ||
130 | if (!map->cache_ops->read || | |
131 | !map->cache_ops->write || | |
132 | !map->cache_ops->name) | |
133 | return -EINVAL; | |
134 | ||
135 | /* We still need to ensure that the reg_defaults | |
136 | * won't vanish from under us. We'll need to make | |
137 | * a copy of it. | |
138 | */ | |
720e4616 | 139 | if (config->reg_defaults) { |
9fabe24e DP |
140 | if (!map->num_reg_defaults) |
141 | return -EINVAL; | |
720e4616 | 142 | tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * |
9fabe24e DP |
143 | sizeof(struct reg_default), GFP_KERNEL); |
144 | if (!tmp_buf) | |
145 | return -ENOMEM; | |
146 | map->reg_defaults = tmp_buf; | |
8528bdd4 | 147 | } else if (map->num_reg_defaults_raw) { |
5fcd2560 | 148 | /* Some devices such as PMICs don't have cache defaults, |
9fabe24e DP |
149 | * we cope with this by reading back the HW registers and |
150 | * crafting the cache defaults by hand. | |
151 | */ | |
152 | ret = regcache_hw_init(map); | |
153 | if (ret < 0) | |
154 | return ret; | |
155 | } | |
156 | ||
157 | if (!map->max_register) | |
158 | map->max_register = map->num_reg_defaults_raw; | |
159 | ||
160 | if (map->cache_ops->init) { | |
161 | dev_dbg(map->dev, "Initializing %s cache\n", | |
162 | map->cache_ops->name); | |
bd061c78 LPC |
163 | ret = map->cache_ops->init(map); |
164 | if (ret) | |
165 | goto err_free; | |
9fabe24e DP |
166 | } |
167 | return 0; | |
bd061c78 LPC |
168 | |
169 | err_free: | |
170 | kfree(map->reg_defaults); | |
171 | if (map->cache_free) | |
172 | kfree(map->reg_defaults_raw); | |
173 | ||
174 | return ret; | |
9fabe24e DP |
175 | } |
176 | ||
177 | void regcache_exit(struct regmap *map) | |
178 | { | |
179 | if (map->cache_type == REGCACHE_NONE) | |
180 | return; | |
181 | ||
182 | BUG_ON(!map->cache_ops); | |
183 | ||
78493f2d | 184 | kfree(map->cache_present); |
9fabe24e DP |
185 | kfree(map->reg_defaults); |
186 | if (map->cache_free) | |
187 | kfree(map->reg_defaults_raw); | |
188 | ||
189 | if (map->cache_ops->exit) { | |
190 | dev_dbg(map->dev, "Destroying %s cache\n", | |
191 | map->cache_ops->name); | |
192 | map->cache_ops->exit(map); | |
193 | } | |
194 | } | |
195 | ||
196 | /** | |
197 | * regcache_read: Fetch the value of a given register from the cache. | |
198 | * | |
199 | * @map: map to configure. | |
200 | * @reg: The register index. | |
201 | * @value: The value to be returned. | |
202 | * | |
203 | * Return a negative value on failure, 0 on success. | |
204 | */ | |
205 | int regcache_read(struct regmap *map, | |
206 | unsigned int reg, unsigned int *value) | |
207 | { | |
bc7ee556 MB |
208 | int ret; |
209 | ||
9fabe24e DP |
210 | if (map->cache_type == REGCACHE_NONE) |
211 | return -ENOSYS; | |
212 | ||
213 | BUG_ON(!map->cache_ops); | |
214 | ||
bc7ee556 MB |
215 | if (!regmap_volatile(map, reg)) { |
216 | ret = map->cache_ops->read(map, reg, value); | |
217 | ||
218 | if (ret == 0) | |
219 | trace_regmap_reg_read_cache(map->dev, reg, *value); | |
220 | ||
221 | return ret; | |
222 | } | |
9fabe24e DP |
223 | |
224 | return -EINVAL; | |
225 | } | |
9fabe24e DP |
226 | |
227 | /** | |
228 | * regcache_write: Set the value of a given register in the cache. | |
229 | * | |
230 | * @map: map to configure. | |
231 | * @reg: The register index. | |
232 | * @value: The new register value. | |
233 | * | |
234 | * Return a negative value on failure, 0 on success. | |
235 | */ | |
236 | int regcache_write(struct regmap *map, | |
237 | unsigned int reg, unsigned int value) | |
238 | { | |
239 | if (map->cache_type == REGCACHE_NONE) | |
240 | return 0; | |
241 | ||
242 | BUG_ON(!map->cache_ops); | |
243 | ||
244 | if (!regmap_writeable(map, reg)) | |
245 | return -EIO; | |
246 | ||
247 | if (!regmap_volatile(map, reg)) | |
248 | return map->cache_ops->write(map, reg, value); | |
249 | ||
250 | return 0; | |
251 | } | |
9fabe24e | 252 | |
d856fce4 MH |
253 | static int regcache_default_sync(struct regmap *map, unsigned int min, |
254 | unsigned int max) | |
255 | { | |
256 | unsigned int reg; | |
257 | ||
258 | for (reg = min; reg <= max; reg++) { | |
259 | unsigned int val; | |
260 | int ret; | |
261 | ||
262 | if (regmap_volatile(map, reg)) | |
263 | continue; | |
264 | ||
265 | ret = regcache_read(map, reg, &val); | |
266 | if (ret) | |
267 | return ret; | |
268 | ||
269 | /* Is this the hardware default? If so skip. */ | |
270 | ret = regcache_lookup_reg(map, reg); | |
271 | if (ret >= 0 && val == map->reg_defaults[ret].def) | |
272 | continue; | |
273 | ||
274 | map->cache_bypass = 1; | |
275 | ret = _regmap_write(map, reg, val); | |
276 | map->cache_bypass = 0; | |
277 | if (ret) | |
278 | return ret; | |
279 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); | |
280 | } | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
9fabe24e DP |
285 | /** |
286 | * regcache_sync: Sync the register cache with the hardware. | |
287 | * | |
288 | * @map: map to configure. | |
289 | * | |
290 | * Any registers that should not be synced should be marked as | |
291 | * volatile. In general drivers can choose not to use the provided | |
292 | * syncing functionality if they so require. | |
293 | * | |
294 | * Return a negative value on failure, 0 on success. | |
295 | */ | |
296 | int regcache_sync(struct regmap *map) | |
297 | { | |
954757d7 | 298 | int ret = 0; |
954757d7 | 299 | unsigned int i; |
59360089 | 300 | const char *name; |
beb1a10f | 301 | unsigned int bypass; |
59360089 | 302 | |
d856fce4 | 303 | BUG_ON(!map->cache_ops); |
9fabe24e | 304 | |
81485f52 | 305 | map->lock(map->lock_arg); |
beb1a10f DP |
306 | /* Remember the initial bypass state */ |
307 | bypass = map->cache_bypass; | |
954757d7 DP |
308 | dev_dbg(map->dev, "Syncing %s cache\n", |
309 | map->cache_ops->name); | |
310 | name = map->cache_ops->name; | |
311 | trace_regcache_sync(map->dev, name, "start"); | |
22f0d90a | 312 | |
8ae0d7e8 MB |
313 | if (!map->cache_dirty) |
314 | goto out; | |
d9db7627 | 315 | |
22f0d90a | 316 | /* Apply any patch first */ |
8a892d69 | 317 | map->cache_bypass = 1; |
22f0d90a | 318 | for (i = 0; i < map->patch_regs; i++) { |
f01ee60f SW |
319 | if (map->patch[i].reg % map->reg_stride) { |
320 | ret = -EINVAL; | |
321 | goto out; | |
322 | } | |
22f0d90a MB |
323 | ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); |
324 | if (ret != 0) { | |
325 | dev_err(map->dev, "Failed to write %x = %x: %d\n", | |
326 | map->patch[i].reg, map->patch[i].def, ret); | |
327 | goto out; | |
328 | } | |
329 | } | |
8a892d69 | 330 | map->cache_bypass = 0; |
22f0d90a | 331 | |
d856fce4 MH |
332 | if (map->cache_ops->sync) |
333 | ret = map->cache_ops->sync(map, 0, map->max_register); | |
334 | else | |
335 | ret = regcache_default_sync(map, 0, map->max_register); | |
954757d7 | 336 | |
6ff73738 MB |
337 | if (ret == 0) |
338 | map->cache_dirty = false; | |
954757d7 | 339 | |
954757d7 DP |
340 | out: |
341 | trace_regcache_sync(map->dev, name, "stop"); | |
beb1a10f DP |
342 | /* Restore the bypass state */ |
343 | map->cache_bypass = bypass; | |
81485f52 | 344 | map->unlock(map->lock_arg); |
954757d7 DP |
345 | |
346 | return ret; | |
9fabe24e DP |
347 | } |
348 | EXPORT_SYMBOL_GPL(regcache_sync); | |
349 | ||
4d4cfd16 MB |
350 | /** |
351 | * regcache_sync_region: Sync part of the register cache with the hardware. | |
352 | * | |
353 | * @map: map to sync. | |
354 | * @min: first register to sync | |
355 | * @max: last register to sync | |
356 | * | |
357 | * Write all non-default register values in the specified region to | |
358 | * the hardware. | |
359 | * | |
360 | * Return a negative value on failure, 0 on success. | |
361 | */ | |
362 | int regcache_sync_region(struct regmap *map, unsigned int min, | |
363 | unsigned int max) | |
364 | { | |
365 | int ret = 0; | |
366 | const char *name; | |
367 | unsigned int bypass; | |
368 | ||
d856fce4 | 369 | BUG_ON(!map->cache_ops); |
4d4cfd16 | 370 | |
81485f52 | 371 | map->lock(map->lock_arg); |
4d4cfd16 MB |
372 | |
373 | /* Remember the initial bypass state */ | |
374 | bypass = map->cache_bypass; | |
375 | ||
376 | name = map->cache_ops->name; | |
377 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); | |
378 | ||
379 | trace_regcache_sync(map->dev, name, "start region"); | |
380 | ||
381 | if (!map->cache_dirty) | |
382 | goto out; | |
383 | ||
d856fce4 MH |
384 | if (map->cache_ops->sync) |
385 | ret = map->cache_ops->sync(map, min, max); | |
386 | else | |
387 | ret = regcache_default_sync(map, min, max); | |
4d4cfd16 MB |
388 | |
389 | out: | |
390 | trace_regcache_sync(map->dev, name, "stop region"); | |
391 | /* Restore the bypass state */ | |
392 | map->cache_bypass = bypass; | |
81485f52 | 393 | map->unlock(map->lock_arg); |
4d4cfd16 MB |
394 | |
395 | return ret; | |
396 | } | |
e466de05 | 397 | EXPORT_SYMBOL_GPL(regcache_sync_region); |
4d4cfd16 | 398 | |
697e85bc MB |
399 | /** |
400 | * regcache_drop_region: Discard part of the register cache | |
401 | * | |
402 | * @map: map to operate on | |
403 | * @min: first register to discard | |
404 | * @max: last register to discard | |
405 | * | |
406 | * Discard part of the register cache. | |
407 | * | |
408 | * Return a negative value on failure, 0 on success. | |
409 | */ | |
410 | int regcache_drop_region(struct regmap *map, unsigned int min, | |
411 | unsigned int max) | |
412 | { | |
413 | unsigned int reg; | |
414 | int ret = 0; | |
415 | ||
416 | if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop)) | |
417 | return -EINVAL; | |
418 | ||
81485f52 | 419 | map->lock(map->lock_arg); |
697e85bc MB |
420 | |
421 | trace_regcache_drop_region(map->dev, min, max); | |
422 | ||
423 | if (map->cache_present) | |
424 | for (reg = min; reg < max + 1; reg++) | |
425 | clear_bit(reg, map->cache_present); | |
426 | ||
427 | if (map->cache_ops && map->cache_ops->drop) | |
428 | ret = map->cache_ops->drop(map, min, max); | |
429 | ||
81485f52 | 430 | map->unlock(map->lock_arg); |
697e85bc MB |
431 | |
432 | return ret; | |
433 | } | |
434 | EXPORT_SYMBOL_GPL(regcache_drop_region); | |
435 | ||
92afb286 MB |
436 | /** |
437 | * regcache_cache_only: Put a register map into cache only mode | |
438 | * | |
439 | * @map: map to configure | |
440 | * @cache_only: flag if changes should be written to the hardware | |
441 | * | |
442 | * When a register map is marked as cache only writes to the register | |
443 | * map API will only update the register cache, they will not cause | |
444 | * any hardware changes. This is useful for allowing portions of | |
445 | * drivers to act as though the device were functioning as normal when | |
446 | * it is disabled for power saving reasons. | |
447 | */ | |
448 | void regcache_cache_only(struct regmap *map, bool enable) | |
449 | { | |
81485f52 | 450 | map->lock(map->lock_arg); |
ac77a765 | 451 | WARN_ON(map->cache_bypass && enable); |
92afb286 | 452 | map->cache_only = enable; |
5d5b7d4f | 453 | trace_regmap_cache_only(map->dev, enable); |
81485f52 | 454 | map->unlock(map->lock_arg); |
92afb286 MB |
455 | } |
456 | EXPORT_SYMBOL_GPL(regcache_cache_only); | |
457 | ||
8ae0d7e8 MB |
458 | /** |
459 | * regcache_mark_dirty: Mark the register cache as dirty | |
460 | * | |
461 | * @map: map to mark | |
462 | * | |
463 | * Mark the register cache as dirty, for example due to the device | |
464 | * having been powered down for suspend. If the cache is not marked | |
465 | * as dirty then the cache sync will be suppressed. | |
466 | */ | |
467 | void regcache_mark_dirty(struct regmap *map) | |
468 | { | |
81485f52 | 469 | map->lock(map->lock_arg); |
8ae0d7e8 | 470 | map->cache_dirty = true; |
81485f52 | 471 | map->unlock(map->lock_arg); |
8ae0d7e8 MB |
472 | } |
473 | EXPORT_SYMBOL_GPL(regcache_mark_dirty); | |
474 | ||
6eb0f5e0 DP |
475 | /** |
476 | * regcache_cache_bypass: Put a register map into cache bypass mode | |
477 | * | |
478 | * @map: map to configure | |
0eef6b04 | 479 | * @cache_bypass: flag if changes should not be written to the hardware |
6eb0f5e0 DP |
480 | * |
481 | * When a register map is marked with the cache bypass option, writes | |
482 | * to the register map API will only update the hardware and not the | |
483 | * the cache directly. This is useful when syncing the cache back to | |
484 | * the hardware. | |
485 | */ | |
486 | void regcache_cache_bypass(struct regmap *map, bool enable) | |
487 | { | |
81485f52 | 488 | map->lock(map->lock_arg); |
ac77a765 | 489 | WARN_ON(map->cache_only && enable); |
6eb0f5e0 | 490 | map->cache_bypass = enable; |
5d5b7d4f | 491 | trace_regmap_cache_bypass(map->dev, enable); |
81485f52 | 492 | map->unlock(map->lock_arg); |
6eb0f5e0 DP |
493 | } |
494 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | |
495 | ||
78493f2d MB |
496 | int regcache_set_reg_present(struct regmap *map, unsigned int reg) |
497 | { | |
498 | unsigned long *cache_present; | |
499 | unsigned int cache_present_size; | |
500 | unsigned int nregs; | |
501 | int i; | |
502 | ||
503 | nregs = reg + 1; | |
504 | cache_present_size = BITS_TO_LONGS(nregs); | |
505 | cache_present_size *= sizeof(long); | |
506 | ||
507 | if (!map->cache_present) { | |
508 | cache_present = kmalloc(cache_present_size, GFP_KERNEL); | |
509 | if (!cache_present) | |
510 | return -ENOMEM; | |
511 | bitmap_zero(cache_present, nregs); | |
512 | map->cache_present = cache_present; | |
513 | map->cache_present_nbits = nregs; | |
514 | } | |
515 | ||
516 | if (nregs > map->cache_present_nbits) { | |
517 | cache_present = krealloc(map->cache_present, | |
518 | cache_present_size, GFP_KERNEL); | |
519 | if (!cache_present) | |
520 | return -ENOMEM; | |
521 | for (i = 0; i < nregs; i++) | |
522 | if (i >= map->cache_present_nbits) | |
523 | clear_bit(i, cache_present); | |
524 | map->cache_present = cache_present; | |
525 | map->cache_present_nbits = nregs; | |
526 | } | |
527 | ||
528 | set_bit(reg, map->cache_present); | |
529 | return 0; | |
530 | } | |
531 | ||
879082c9 MB |
532 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
533 | unsigned int val) | |
9fabe24e | 534 | { |
325acab4 MB |
535 | if (regcache_get_val(map, base, idx) == val) |
536 | return true; | |
537 | ||
eb4cb76f MB |
538 | /* Use device native format if possible */ |
539 | if (map->format.format_val) { | |
540 | map->format.format_val(base + (map->cache_word_size * idx), | |
541 | val, 0); | |
542 | return false; | |
543 | } | |
544 | ||
879082c9 | 545 | switch (map->cache_word_size) { |
9fabe24e DP |
546 | case 1: { |
547 | u8 *cache = base; | |
9fabe24e DP |
548 | cache[idx] = val; |
549 | break; | |
550 | } | |
551 | case 2: { | |
552 | u16 *cache = base; | |
9fabe24e DP |
553 | cache[idx] = val; |
554 | break; | |
555 | } | |
7d5e525b MB |
556 | case 4: { |
557 | u32 *cache = base; | |
7d5e525b MB |
558 | cache[idx] = val; |
559 | break; | |
560 | } | |
9fabe24e DP |
561 | default: |
562 | BUG(); | |
563 | } | |
9fabe24e DP |
564 | return false; |
565 | } | |
566 | ||
879082c9 MB |
567 | unsigned int regcache_get_val(struct regmap *map, const void *base, |
568 | unsigned int idx) | |
9fabe24e DP |
569 | { |
570 | if (!base) | |
571 | return -EINVAL; | |
572 | ||
eb4cb76f MB |
573 | /* Use device native format if possible */ |
574 | if (map->format.parse_val) | |
8817796b MB |
575 | return map->format.parse_val(regcache_get_val_addr(map, base, |
576 | idx)); | |
eb4cb76f | 577 | |
879082c9 | 578 | switch (map->cache_word_size) { |
9fabe24e DP |
579 | case 1: { |
580 | const u8 *cache = base; | |
581 | return cache[idx]; | |
582 | } | |
583 | case 2: { | |
584 | const u16 *cache = base; | |
585 | return cache[idx]; | |
586 | } | |
7d5e525b MB |
587 | case 4: { |
588 | const u32 *cache = base; | |
589 | return cache[idx]; | |
590 | } | |
9fabe24e DP |
591 | default: |
592 | BUG(); | |
593 | } | |
594 | /* unreachable */ | |
595 | return -1; | |
596 | } | |
597 | ||
f094fea6 | 598 | static int regcache_default_cmp(const void *a, const void *b) |
c08604b8 DP |
599 | { |
600 | const struct reg_default *_a = a; | |
601 | const struct reg_default *_b = b; | |
602 | ||
603 | return _a->reg - _b->reg; | |
604 | } | |
605 | ||
f094fea6 MB |
606 | int regcache_lookup_reg(struct regmap *map, unsigned int reg) |
607 | { | |
608 | struct reg_default key; | |
609 | struct reg_default *r; | |
610 | ||
611 | key.reg = reg; | |
612 | key.def = 0; | |
613 | ||
614 | r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, | |
615 | sizeof(struct reg_default), regcache_default_cmp); | |
616 | ||
617 | if (r) | |
618 | return r - map->reg_defaults; | |
619 | else | |
6e6ace00 | 620 | return -ENOENT; |
f094fea6 | 621 | } |
f8bd822c | 622 | |
cfdeb8c3 MB |
623 | static int regcache_sync_block_single(struct regmap *map, void *block, |
624 | unsigned int block_base, | |
625 | unsigned int start, unsigned int end) | |
626 | { | |
627 | unsigned int i, regtmp, val; | |
628 | int ret; | |
629 | ||
630 | for (i = start; i < end; i++) { | |
631 | regtmp = block_base + (i * map->reg_stride); | |
632 | ||
633 | if (!regcache_reg_present(map, regtmp)) | |
634 | continue; | |
635 | ||
636 | val = regcache_get_val(map, block, i); | |
637 | ||
638 | /* Is this the hardware default? If so skip. */ | |
639 | ret = regcache_lookup_reg(map, regtmp); | |
640 | if (ret >= 0 && val == map->reg_defaults[ret].def) | |
641 | continue; | |
642 | ||
643 | map->cache_bypass = 1; | |
644 | ||
645 | ret = _regmap_write(map, regtmp, val); | |
646 | ||
647 | map->cache_bypass = 0; | |
648 | if (ret != 0) | |
649 | return ret; | |
650 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | |
651 | regtmp, val); | |
652 | } | |
653 | ||
654 | return 0; | |
655 | } | |
656 | ||
75a5f89f MB |
657 | static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, |
658 | unsigned int base, unsigned int cur) | |
659 | { | |
660 | size_t val_bytes = map->format.val_bytes; | |
661 | int ret, count; | |
662 | ||
663 | if (*data == NULL) | |
664 | return 0; | |
665 | ||
666 | count = cur - base; | |
667 | ||
9659293c | 668 | dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", |
75a5f89f MB |
669 | count * val_bytes, count, base, cur - 1); |
670 | ||
671 | map->cache_bypass = 1; | |
672 | ||
673 | ret = _regmap_raw_write(map, base, *data, count * val_bytes, | |
674 | false); | |
675 | ||
676 | map->cache_bypass = 0; | |
677 | ||
678 | *data = NULL; | |
679 | ||
680 | return ret; | |
681 | } | |
682 | ||
f52687af | 683 | static int regcache_sync_block_raw(struct regmap *map, void *block, |
cfdeb8c3 MB |
684 | unsigned int block_base, unsigned int start, |
685 | unsigned int end) | |
f8bd822c | 686 | { |
75a5f89f MB |
687 | unsigned int i, val; |
688 | unsigned int regtmp = 0; | |
689 | unsigned int base = 0; | |
690 | const void *data = NULL; | |
f8bd822c MB |
691 | int ret; |
692 | ||
693 | for (i = start; i < end; i++) { | |
694 | regtmp = block_base + (i * map->reg_stride); | |
695 | ||
75a5f89f MB |
696 | if (!regcache_reg_present(map, regtmp)) { |
697 | ret = regcache_sync_block_raw_flush(map, &data, | |
698 | base, regtmp); | |
699 | if (ret != 0) | |
700 | return ret; | |
f8bd822c | 701 | continue; |
75a5f89f | 702 | } |
f8bd822c MB |
703 | |
704 | val = regcache_get_val(map, block, i); | |
705 | ||
706 | /* Is this the hardware default? If so skip. */ | |
707 | ret = regcache_lookup_reg(map, regtmp); | |
75a5f89f MB |
708 | if (ret >= 0 && val == map->reg_defaults[ret].def) { |
709 | ret = regcache_sync_block_raw_flush(map, &data, | |
710 | base, regtmp); | |
711 | if (ret != 0) | |
712 | return ret; | |
f8bd822c | 713 | continue; |
75a5f89f | 714 | } |
f8bd822c | 715 | |
75a5f89f MB |
716 | if (!data) { |
717 | data = regcache_get_val_addr(map, block, i); | |
718 | base = regtmp; | |
719 | } | |
f8bd822c MB |
720 | } |
721 | ||
75a5f89f | 722 | return regcache_sync_block_raw_flush(map, &data, base, regtmp); |
f8bd822c | 723 | } |
cfdeb8c3 MB |
724 | |
725 | int regcache_sync_block(struct regmap *map, void *block, | |
726 | unsigned int block_base, unsigned int start, | |
727 | unsigned int end) | |
728 | { | |
729 | if (regmap_can_raw_write(map)) | |
730 | return regcache_sync_block_raw(map, block, block_base, | |
731 | start, end); | |
732 | else | |
733 | return regcache_sync_block_single(map, block, block_base, | |
734 | start, end); | |
735 | } |