Commit | Line | Data |
---|---|---|
9fabe24e DP |
1 | /* |
2 | * Register cache access API | |
3 | * | |
4 | * Copyright 2011 Wolfson Microelectronics plc | |
5 | * | |
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/slab.h> | |
1b6bc32f | 14 | #include <linux/export.h> |
51990e82 | 15 | #include <linux/device.h> |
9fabe24e | 16 | #include <trace/events/regmap.h> |
f094fea6 | 17 | #include <linux/bsearch.h> |
c08604b8 | 18 | #include <linux/sort.h> |
9fabe24e DP |
19 | |
20 | #include "internal.h" | |
21 | ||
22 | static const struct regcache_ops *cache_types[] = { | |
28644c80 | 23 | ®cache_rbtree_ops, |
2cbbb579 | 24 | ®cache_lzo_ops, |
2ac902ce | 25 | ®cache_flat_ops, |
9fabe24e DP |
26 | }; |
27 | ||
28 | static int regcache_hw_init(struct regmap *map) | |
29 | { | |
30 | int i, j; | |
31 | int ret; | |
32 | int count; | |
33 | unsigned int val; | |
34 | void *tmp_buf; | |
35 | ||
36 | if (!map->num_reg_defaults_raw) | |
37 | return -EINVAL; | |
38 | ||
39 | if (!map->reg_defaults_raw) { | |
df00c79f | 40 | u32 cache_bypass = map->cache_bypass; |
9fabe24e | 41 | dev_warn(map->dev, "No cache defaults, reading back from HW\n"); |
df00c79f LD |
42 | |
43 | /* Bypass the cache access till data read from HW*/ | |
44 | map->cache_bypass = 1; | |
9fabe24e DP |
45 | tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); |
46 | if (!tmp_buf) | |
47 | return -EINVAL; | |
eb4cb76f MB |
48 | ret = regmap_raw_read(map, 0, tmp_buf, |
49 | map->num_reg_defaults_raw); | |
df00c79f | 50 | map->cache_bypass = cache_bypass; |
9fabe24e DP |
51 | if (ret < 0) { |
52 | kfree(tmp_buf); | |
53 | return ret; | |
54 | } | |
55 | map->reg_defaults_raw = tmp_buf; | |
56 | map->cache_free = 1; | |
57 | } | |
58 | ||
59 | /* calculate the size of reg_defaults */ | |
60 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { | |
879082c9 | 61 | val = regcache_get_val(map, map->reg_defaults_raw, i); |
f01ee60f | 62 | if (regmap_volatile(map, i * map->reg_stride)) |
9fabe24e DP |
63 | continue; |
64 | count++; | |
65 | } | |
66 | ||
67 | map->reg_defaults = kmalloc(count * sizeof(struct reg_default), | |
68 | GFP_KERNEL); | |
021cd616 LPC |
69 | if (!map->reg_defaults) { |
70 | ret = -ENOMEM; | |
71 | goto err_free; | |
72 | } | |
9fabe24e DP |
73 | |
74 | /* fill the reg_defaults */ | |
75 | map->num_reg_defaults = count; | |
76 | for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { | |
879082c9 | 77 | val = regcache_get_val(map, map->reg_defaults_raw, i); |
f01ee60f | 78 | if (regmap_volatile(map, i * map->reg_stride)) |
9fabe24e | 79 | continue; |
f01ee60f | 80 | map->reg_defaults[j].reg = i * map->reg_stride; |
9fabe24e DP |
81 | map->reg_defaults[j].def = val; |
82 | j++; | |
83 | } | |
84 | ||
85 | return 0; | |
021cd616 LPC |
86 | |
87 | err_free: | |
88 | if (map->cache_free) | |
89 | kfree(map->reg_defaults_raw); | |
90 | ||
91 | return ret; | |
9fabe24e DP |
92 | } |
93 | ||
e5e3b8ab | 94 | int regcache_init(struct regmap *map, const struct regmap_config *config) |
9fabe24e DP |
95 | { |
96 | int ret; | |
97 | int i; | |
98 | void *tmp_buf; | |
99 | ||
f01ee60f SW |
100 | for (i = 0; i < config->num_reg_defaults; i++) |
101 | if (config->reg_defaults[i].reg % map->reg_stride) | |
102 | return -EINVAL; | |
103 | ||
e7a6db30 MB |
104 | if (map->cache_type == REGCACHE_NONE) { |
105 | map->cache_bypass = true; | |
9fabe24e | 106 | return 0; |
e7a6db30 | 107 | } |
9fabe24e DP |
108 | |
109 | for (i = 0; i < ARRAY_SIZE(cache_types); i++) | |
110 | if (cache_types[i]->type == map->cache_type) | |
111 | break; | |
112 | ||
113 | if (i == ARRAY_SIZE(cache_types)) { | |
114 | dev_err(map->dev, "Could not match compress type: %d\n", | |
115 | map->cache_type); | |
116 | return -EINVAL; | |
117 | } | |
118 | ||
e5e3b8ab LPC |
119 | map->num_reg_defaults = config->num_reg_defaults; |
120 | map->num_reg_defaults_raw = config->num_reg_defaults_raw; | |
121 | map->reg_defaults_raw = config->reg_defaults_raw; | |
064d4db1 LPC |
122 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); |
123 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; | |
e5e3b8ab | 124 | |
9fabe24e DP |
125 | map->cache = NULL; |
126 | map->cache_ops = cache_types[i]; | |
127 | ||
128 | if (!map->cache_ops->read || | |
129 | !map->cache_ops->write || | |
130 | !map->cache_ops->name) | |
131 | return -EINVAL; | |
132 | ||
133 | /* We still need to ensure that the reg_defaults | |
134 | * won't vanish from under us. We'll need to make | |
135 | * a copy of it. | |
136 | */ | |
720e4616 | 137 | if (config->reg_defaults) { |
9fabe24e DP |
138 | if (!map->num_reg_defaults) |
139 | return -EINVAL; | |
720e4616 | 140 | tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * |
9fabe24e DP |
141 | sizeof(struct reg_default), GFP_KERNEL); |
142 | if (!tmp_buf) | |
143 | return -ENOMEM; | |
144 | map->reg_defaults = tmp_buf; | |
8528bdd4 | 145 | } else if (map->num_reg_defaults_raw) { |
5fcd2560 | 146 | /* Some devices such as PMICs don't have cache defaults, |
9fabe24e DP |
147 | * we cope with this by reading back the HW registers and |
148 | * crafting the cache defaults by hand. | |
149 | */ | |
150 | ret = regcache_hw_init(map); | |
151 | if (ret < 0) | |
152 | return ret; | |
153 | } | |
154 | ||
155 | if (!map->max_register) | |
156 | map->max_register = map->num_reg_defaults_raw; | |
157 | ||
158 | if (map->cache_ops->init) { | |
159 | dev_dbg(map->dev, "Initializing %s cache\n", | |
160 | map->cache_ops->name); | |
bd061c78 LPC |
161 | ret = map->cache_ops->init(map); |
162 | if (ret) | |
163 | goto err_free; | |
9fabe24e DP |
164 | } |
165 | return 0; | |
bd061c78 LPC |
166 | |
167 | err_free: | |
168 | kfree(map->reg_defaults); | |
169 | if (map->cache_free) | |
170 | kfree(map->reg_defaults_raw); | |
171 | ||
172 | return ret; | |
9fabe24e DP |
173 | } |
174 | ||
175 | void regcache_exit(struct regmap *map) | |
176 | { | |
177 | if (map->cache_type == REGCACHE_NONE) | |
178 | return; | |
179 | ||
180 | BUG_ON(!map->cache_ops); | |
181 | ||
182 | kfree(map->reg_defaults); | |
183 | if (map->cache_free) | |
184 | kfree(map->reg_defaults_raw); | |
185 | ||
186 | if (map->cache_ops->exit) { | |
187 | dev_dbg(map->dev, "Destroying %s cache\n", | |
188 | map->cache_ops->name); | |
189 | map->cache_ops->exit(map); | |
190 | } | |
191 | } | |
192 | ||
193 | /** | |
194 | * regcache_read: Fetch the value of a given register from the cache. | |
195 | * | |
196 | * @map: map to configure. | |
197 | * @reg: The register index. | |
198 | * @value: The value to be returned. | |
199 | * | |
200 | * Return a negative value on failure, 0 on success. | |
201 | */ | |
202 | int regcache_read(struct regmap *map, | |
203 | unsigned int reg, unsigned int *value) | |
204 | { | |
bc7ee556 MB |
205 | int ret; |
206 | ||
9fabe24e DP |
207 | if (map->cache_type == REGCACHE_NONE) |
208 | return -ENOSYS; | |
209 | ||
210 | BUG_ON(!map->cache_ops); | |
211 | ||
bc7ee556 MB |
212 | if (!regmap_volatile(map, reg)) { |
213 | ret = map->cache_ops->read(map, reg, value); | |
214 | ||
215 | if (ret == 0) | |
216 | trace_regmap_reg_read_cache(map->dev, reg, *value); | |
217 | ||
218 | return ret; | |
219 | } | |
9fabe24e DP |
220 | |
221 | return -EINVAL; | |
222 | } | |
9fabe24e DP |
223 | |
224 | /** | |
225 | * regcache_write: Set the value of a given register in the cache. | |
226 | * | |
227 | * @map: map to configure. | |
228 | * @reg: The register index. | |
229 | * @value: The new register value. | |
230 | * | |
231 | * Return a negative value on failure, 0 on success. | |
232 | */ | |
233 | int regcache_write(struct regmap *map, | |
234 | unsigned int reg, unsigned int value) | |
235 | { | |
236 | if (map->cache_type == REGCACHE_NONE) | |
237 | return 0; | |
238 | ||
239 | BUG_ON(!map->cache_ops); | |
240 | ||
9fabe24e DP |
241 | if (!regmap_volatile(map, reg)) |
242 | return map->cache_ops->write(map, reg, value); | |
243 | ||
244 | return 0; | |
245 | } | |
9fabe24e | 246 | |
d856fce4 MH |
247 | static int regcache_default_sync(struct regmap *map, unsigned int min, |
248 | unsigned int max) | |
249 | { | |
250 | unsigned int reg; | |
251 | ||
252 | for (reg = min; reg <= max; reg++) { | |
253 | unsigned int val; | |
254 | int ret; | |
255 | ||
256 | if (regmap_volatile(map, reg)) | |
257 | continue; | |
258 | ||
259 | ret = regcache_read(map, reg, &val); | |
260 | if (ret) | |
261 | return ret; | |
262 | ||
263 | /* Is this the hardware default? If so skip. */ | |
264 | ret = regcache_lookup_reg(map, reg); | |
265 | if (ret >= 0 && val == map->reg_defaults[ret].def) | |
266 | continue; | |
267 | ||
268 | map->cache_bypass = 1; | |
269 | ret = _regmap_write(map, reg, val); | |
270 | map->cache_bypass = 0; | |
271 | if (ret) | |
272 | return ret; | |
273 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); | |
274 | } | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
9fabe24e DP |
279 | /** |
280 | * regcache_sync: Sync the register cache with the hardware. | |
281 | * | |
282 | * @map: map to configure. | |
283 | * | |
284 | * Any registers that should not be synced should be marked as | |
285 | * volatile. In general drivers can choose not to use the provided | |
286 | * syncing functionality if they so require. | |
287 | * | |
288 | * Return a negative value on failure, 0 on success. | |
289 | */ | |
290 | int regcache_sync(struct regmap *map) | |
291 | { | |
954757d7 | 292 | int ret = 0; |
954757d7 | 293 | unsigned int i; |
59360089 | 294 | const char *name; |
beb1a10f | 295 | unsigned int bypass; |
59360089 | 296 | |
d856fce4 | 297 | BUG_ON(!map->cache_ops); |
9fabe24e | 298 | |
81485f52 | 299 | map->lock(map->lock_arg); |
beb1a10f DP |
300 | /* Remember the initial bypass state */ |
301 | bypass = map->cache_bypass; | |
954757d7 DP |
302 | dev_dbg(map->dev, "Syncing %s cache\n", |
303 | map->cache_ops->name); | |
304 | name = map->cache_ops->name; | |
305 | trace_regcache_sync(map->dev, name, "start"); | |
22f0d90a | 306 | |
8ae0d7e8 MB |
307 | if (!map->cache_dirty) |
308 | goto out; | |
d9db7627 | 309 | |
affbe886 MB |
310 | map->async = true; |
311 | ||
22f0d90a | 312 | /* Apply any patch first */ |
8a892d69 | 313 | map->cache_bypass = 1; |
22f0d90a | 314 | for (i = 0; i < map->patch_regs; i++) { |
f01ee60f SW |
315 | if (map->patch[i].reg % map->reg_stride) { |
316 | ret = -EINVAL; | |
317 | goto out; | |
318 | } | |
22f0d90a MB |
319 | ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); |
320 | if (ret != 0) { | |
321 | dev_err(map->dev, "Failed to write %x = %x: %d\n", | |
322 | map->patch[i].reg, map->patch[i].def, ret); | |
323 | goto out; | |
324 | } | |
325 | } | |
8a892d69 | 326 | map->cache_bypass = 0; |
22f0d90a | 327 | |
d856fce4 MH |
328 | if (map->cache_ops->sync) |
329 | ret = map->cache_ops->sync(map, 0, map->max_register); | |
330 | else | |
331 | ret = regcache_default_sync(map, 0, map->max_register); | |
954757d7 | 332 | |
6ff73738 MB |
333 | if (ret == 0) |
334 | map->cache_dirty = false; | |
954757d7 | 335 | |
954757d7 | 336 | out: |
beb1a10f | 337 | /* Restore the bypass state */ |
affbe886 | 338 | map->async = false; |
beb1a10f | 339 | map->cache_bypass = bypass; |
81485f52 | 340 | map->unlock(map->lock_arg); |
954757d7 | 341 | |
affbe886 MB |
342 | regmap_async_complete(map); |
343 | ||
344 | trace_regcache_sync(map->dev, name, "stop"); | |
345 | ||
954757d7 | 346 | return ret; |
9fabe24e DP |
347 | } |
348 | EXPORT_SYMBOL_GPL(regcache_sync); | |
349 | ||
4d4cfd16 MB |
350 | /** |
351 | * regcache_sync_region: Sync part of the register cache with the hardware. | |
352 | * | |
353 | * @map: map to sync. | |
354 | * @min: first register to sync | |
355 | * @max: last register to sync | |
356 | * | |
357 | * Write all non-default register values in the specified region to | |
358 | * the hardware. | |
359 | * | |
360 | * Return a negative value on failure, 0 on success. | |
361 | */ | |
362 | int regcache_sync_region(struct regmap *map, unsigned int min, | |
363 | unsigned int max) | |
364 | { | |
365 | int ret = 0; | |
366 | const char *name; | |
367 | unsigned int bypass; | |
368 | ||
d856fce4 | 369 | BUG_ON(!map->cache_ops); |
4d4cfd16 | 370 | |
81485f52 | 371 | map->lock(map->lock_arg); |
4d4cfd16 MB |
372 | |
373 | /* Remember the initial bypass state */ | |
374 | bypass = map->cache_bypass; | |
375 | ||
376 | name = map->cache_ops->name; | |
377 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); | |
378 | ||
379 | trace_regcache_sync(map->dev, name, "start region"); | |
380 | ||
381 | if (!map->cache_dirty) | |
382 | goto out; | |
383 | ||
affbe886 MB |
384 | map->async = true; |
385 | ||
d856fce4 MH |
386 | if (map->cache_ops->sync) |
387 | ret = map->cache_ops->sync(map, min, max); | |
388 | else | |
389 | ret = regcache_default_sync(map, min, max); | |
4d4cfd16 MB |
390 | |
391 | out: | |
4d4cfd16 MB |
392 | /* Restore the bypass state */ |
393 | map->cache_bypass = bypass; | |
affbe886 | 394 | map->async = false; |
81485f52 | 395 | map->unlock(map->lock_arg); |
4d4cfd16 | 396 | |
affbe886 MB |
397 | regmap_async_complete(map); |
398 | ||
399 | trace_regcache_sync(map->dev, name, "stop region"); | |
400 | ||
4d4cfd16 MB |
401 | return ret; |
402 | } | |
e466de05 | 403 | EXPORT_SYMBOL_GPL(regcache_sync_region); |
4d4cfd16 | 404 | |
697e85bc MB |
405 | /** |
406 | * regcache_drop_region: Discard part of the register cache | |
407 | * | |
408 | * @map: map to operate on | |
409 | * @min: first register to discard | |
410 | * @max: last register to discard | |
411 | * | |
412 | * Discard part of the register cache. | |
413 | * | |
414 | * Return a negative value on failure, 0 on success. | |
415 | */ | |
416 | int regcache_drop_region(struct regmap *map, unsigned int min, | |
417 | unsigned int max) | |
418 | { | |
697e85bc MB |
419 | int ret = 0; |
420 | ||
3f4ff561 | 421 | if (!map->cache_ops || !map->cache_ops->drop) |
697e85bc MB |
422 | return -EINVAL; |
423 | ||
81485f52 | 424 | map->lock(map->lock_arg); |
697e85bc MB |
425 | |
426 | trace_regcache_drop_region(map->dev, min, max); | |
427 | ||
3f4ff561 | 428 | ret = map->cache_ops->drop(map, min, max); |
697e85bc | 429 | |
81485f52 | 430 | map->unlock(map->lock_arg); |
697e85bc MB |
431 | |
432 | return ret; | |
433 | } | |
434 | EXPORT_SYMBOL_GPL(regcache_drop_region); | |
435 | ||
92afb286 MB |
436 | /** |
437 | * regcache_cache_only: Put a register map into cache only mode | |
438 | * | |
439 | * @map: map to configure | |
440 | * @cache_only: flag if changes should be written to the hardware | |
441 | * | |
442 | * When a register map is marked as cache only writes to the register | |
443 | * map API will only update the register cache, they will not cause | |
444 | * any hardware changes. This is useful for allowing portions of | |
445 | * drivers to act as though the device were functioning as normal when | |
446 | * it is disabled for power saving reasons. | |
447 | */ | |
448 | void regcache_cache_only(struct regmap *map, bool enable) | |
449 | { | |
81485f52 | 450 | map->lock(map->lock_arg); |
ac77a765 | 451 | WARN_ON(map->cache_bypass && enable); |
92afb286 | 452 | map->cache_only = enable; |
5d5b7d4f | 453 | trace_regmap_cache_only(map->dev, enable); |
81485f52 | 454 | map->unlock(map->lock_arg); |
92afb286 MB |
455 | } |
456 | EXPORT_SYMBOL_GPL(regcache_cache_only); | |
457 | ||
8ae0d7e8 MB |
458 | /** |
459 | * regcache_mark_dirty: Mark the register cache as dirty | |
460 | * | |
461 | * @map: map to mark | |
462 | * | |
463 | * Mark the register cache as dirty, for example due to the device | |
464 | * having been powered down for suspend. If the cache is not marked | |
465 | * as dirty then the cache sync will be suppressed. | |
466 | */ | |
467 | void regcache_mark_dirty(struct regmap *map) | |
468 | { | |
81485f52 | 469 | map->lock(map->lock_arg); |
8ae0d7e8 | 470 | map->cache_dirty = true; |
81485f52 | 471 | map->unlock(map->lock_arg); |
8ae0d7e8 MB |
472 | } |
473 | EXPORT_SYMBOL_GPL(regcache_mark_dirty); | |
474 | ||
6eb0f5e0 DP |
475 | /** |
476 | * regcache_cache_bypass: Put a register map into cache bypass mode | |
477 | * | |
478 | * @map: map to configure | |
0eef6b04 | 479 | * @cache_bypass: flag if changes should not be written to the hardware |
6eb0f5e0 DP |
480 | * |
481 | * When a register map is marked with the cache bypass option, writes | |
482 | * to the register map API will only update the hardware and not the | |
483 | * the cache directly. This is useful when syncing the cache back to | |
484 | * the hardware. | |
485 | */ | |
486 | void regcache_cache_bypass(struct regmap *map, bool enable) | |
487 | { | |
81485f52 | 488 | map->lock(map->lock_arg); |
ac77a765 | 489 | WARN_ON(map->cache_only && enable); |
6eb0f5e0 | 490 | map->cache_bypass = enable; |
5d5b7d4f | 491 | trace_regmap_cache_bypass(map->dev, enable); |
81485f52 | 492 | map->unlock(map->lock_arg); |
6eb0f5e0 DP |
493 | } |
494 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | |
495 | ||
879082c9 MB |
496 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
497 | unsigned int val) | |
9fabe24e | 498 | { |
325acab4 MB |
499 | if (regcache_get_val(map, base, idx) == val) |
500 | return true; | |
501 | ||
eb4cb76f MB |
502 | /* Use device native format if possible */ |
503 | if (map->format.format_val) { | |
504 | map->format.format_val(base + (map->cache_word_size * idx), | |
505 | val, 0); | |
506 | return false; | |
507 | } | |
508 | ||
879082c9 | 509 | switch (map->cache_word_size) { |
9fabe24e DP |
510 | case 1: { |
511 | u8 *cache = base; | |
9fabe24e DP |
512 | cache[idx] = val; |
513 | break; | |
514 | } | |
515 | case 2: { | |
516 | u16 *cache = base; | |
9fabe24e DP |
517 | cache[idx] = val; |
518 | break; | |
519 | } | |
7d5e525b MB |
520 | case 4: { |
521 | u32 *cache = base; | |
7d5e525b MB |
522 | cache[idx] = val; |
523 | break; | |
524 | } | |
9fabe24e DP |
525 | default: |
526 | BUG(); | |
527 | } | |
9fabe24e DP |
528 | return false; |
529 | } | |
530 | ||
879082c9 MB |
531 | unsigned int regcache_get_val(struct regmap *map, const void *base, |
532 | unsigned int idx) | |
9fabe24e DP |
533 | { |
534 | if (!base) | |
535 | return -EINVAL; | |
536 | ||
eb4cb76f MB |
537 | /* Use device native format if possible */ |
538 | if (map->format.parse_val) | |
8817796b MB |
539 | return map->format.parse_val(regcache_get_val_addr(map, base, |
540 | idx)); | |
eb4cb76f | 541 | |
879082c9 | 542 | switch (map->cache_word_size) { |
9fabe24e DP |
543 | case 1: { |
544 | const u8 *cache = base; | |
545 | return cache[idx]; | |
546 | } | |
547 | case 2: { | |
548 | const u16 *cache = base; | |
549 | return cache[idx]; | |
550 | } | |
7d5e525b MB |
551 | case 4: { |
552 | const u32 *cache = base; | |
553 | return cache[idx]; | |
554 | } | |
9fabe24e DP |
555 | default: |
556 | BUG(); | |
557 | } | |
558 | /* unreachable */ | |
559 | return -1; | |
560 | } | |
561 | ||
f094fea6 | 562 | static int regcache_default_cmp(const void *a, const void *b) |
c08604b8 DP |
563 | { |
564 | const struct reg_default *_a = a; | |
565 | const struct reg_default *_b = b; | |
566 | ||
567 | return _a->reg - _b->reg; | |
568 | } | |
569 | ||
f094fea6 MB |
570 | int regcache_lookup_reg(struct regmap *map, unsigned int reg) |
571 | { | |
572 | struct reg_default key; | |
573 | struct reg_default *r; | |
574 | ||
575 | key.reg = reg; | |
576 | key.def = 0; | |
577 | ||
578 | r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, | |
579 | sizeof(struct reg_default), regcache_default_cmp); | |
580 | ||
581 | if (r) | |
582 | return r - map->reg_defaults; | |
583 | else | |
6e6ace00 | 584 | return -ENOENT; |
f094fea6 | 585 | } |
f8bd822c | 586 | |
3f4ff561 LPC |
587 | static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) |
588 | { | |
589 | if (!cache_present) | |
590 | return true; | |
591 | ||
592 | return test_bit(idx, cache_present); | |
593 | } | |
594 | ||
cfdeb8c3 | 595 | static int regcache_sync_block_single(struct regmap *map, void *block, |
3f4ff561 | 596 | unsigned long *cache_present, |
cfdeb8c3 MB |
597 | unsigned int block_base, |
598 | unsigned int start, unsigned int end) | |
599 | { | |
600 | unsigned int i, regtmp, val; | |
601 | int ret; | |
602 | ||
603 | for (i = start; i < end; i++) { | |
604 | regtmp = block_base + (i * map->reg_stride); | |
605 | ||
3f4ff561 | 606 | if (!regcache_reg_present(cache_present, i)) |
cfdeb8c3 MB |
607 | continue; |
608 | ||
609 | val = regcache_get_val(map, block, i); | |
610 | ||
611 | /* Is this the hardware default? If so skip. */ | |
612 | ret = regcache_lookup_reg(map, regtmp); | |
613 | if (ret >= 0 && val == map->reg_defaults[ret].def) | |
614 | continue; | |
615 | ||
616 | map->cache_bypass = 1; | |
617 | ||
618 | ret = _regmap_write(map, regtmp, val); | |
619 | ||
620 | map->cache_bypass = 0; | |
621 | if (ret != 0) | |
622 | return ret; | |
623 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | |
624 | regtmp, val); | |
625 | } | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
75a5f89f MB |
630 | static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, |
631 | unsigned int base, unsigned int cur) | |
632 | { | |
633 | size_t val_bytes = map->format.val_bytes; | |
634 | int ret, count; | |
635 | ||
636 | if (*data == NULL) | |
637 | return 0; | |
638 | ||
639 | count = cur - base; | |
640 | ||
9659293c | 641 | dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", |
75a5f89f MB |
642 | count * val_bytes, count, base, cur - 1); |
643 | ||
644 | map->cache_bypass = 1; | |
645 | ||
0a819809 | 646 | ret = _regmap_raw_write(map, base, *data, count * val_bytes); |
75a5f89f MB |
647 | |
648 | map->cache_bypass = 0; | |
649 | ||
650 | *data = NULL; | |
651 | ||
652 | return ret; | |
653 | } | |
654 | ||
f52687af | 655 | static int regcache_sync_block_raw(struct regmap *map, void *block, |
3f4ff561 | 656 | unsigned long *cache_present, |
cfdeb8c3 MB |
657 | unsigned int block_base, unsigned int start, |
658 | unsigned int end) | |
f8bd822c | 659 | { |
75a5f89f MB |
660 | unsigned int i, val; |
661 | unsigned int regtmp = 0; | |
662 | unsigned int base = 0; | |
663 | const void *data = NULL; | |
f8bd822c MB |
664 | int ret; |
665 | ||
666 | for (i = start; i < end; i++) { | |
667 | regtmp = block_base + (i * map->reg_stride); | |
668 | ||
3f4ff561 | 669 | if (!regcache_reg_present(cache_present, i)) { |
75a5f89f MB |
670 | ret = regcache_sync_block_raw_flush(map, &data, |
671 | base, regtmp); | |
672 | if (ret != 0) | |
673 | return ret; | |
f8bd822c | 674 | continue; |
75a5f89f | 675 | } |
f8bd822c MB |
676 | |
677 | val = regcache_get_val(map, block, i); | |
678 | ||
679 | /* Is this the hardware default? If so skip. */ | |
680 | ret = regcache_lookup_reg(map, regtmp); | |
75a5f89f MB |
681 | if (ret >= 0 && val == map->reg_defaults[ret].def) { |
682 | ret = regcache_sync_block_raw_flush(map, &data, | |
683 | base, regtmp); | |
684 | if (ret != 0) | |
685 | return ret; | |
f8bd822c | 686 | continue; |
75a5f89f | 687 | } |
f8bd822c | 688 | |
75a5f89f MB |
689 | if (!data) { |
690 | data = regcache_get_val_addr(map, block, i); | |
691 | base = regtmp; | |
692 | } | |
f8bd822c MB |
693 | } |
694 | ||
2d49b598 LPC |
695 | return regcache_sync_block_raw_flush(map, &data, base, regtmp + |
696 | map->reg_stride); | |
f8bd822c | 697 | } |
cfdeb8c3 MB |
698 | |
699 | int regcache_sync_block(struct regmap *map, void *block, | |
3f4ff561 | 700 | unsigned long *cache_present, |
cfdeb8c3 MB |
701 | unsigned int block_base, unsigned int start, |
702 | unsigned int end) | |
703 | { | |
704 | if (regmap_can_raw_write(map)) | |
3f4ff561 LPC |
705 | return regcache_sync_block_raw(map, block, cache_present, |
706 | block_base, start, end); | |
cfdeb8c3 | 707 | else |
3f4ff561 LPC |
708 | return regcache_sync_block_single(map, block, cache_present, |
709 | block_base, start, end); | |
cfdeb8c3 | 710 | } |