Commit | Line | Data |
---|---|---|
28644c80 DP |
1 | /* |
2 | * Register cache access API - rbtree caching support | |
3 | * | |
4 | * Copyright 2011 Wolfson Microelectronics plc | |
5 | * | |
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/slab.h> | |
14 | #include <linux/rbtree.h> | |
15 | ||
16 | #include "internal.h" | |
17 | ||
18 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |
19 | unsigned int value); | |
20 | ||
21 | struct regcache_rbtree_node { | |
22 | /* the actual rbtree node holding this block */ | |
23 | struct rb_node node; | |
24 | /* base register handled by this block */ | |
25 | unsigned int base_reg; | |
26 | /* number of bytes needed to represent the register index */ | |
27 | unsigned int word_size; | |
28 | /* block of adjacent registers */ | |
29 | void *block; | |
30 | /* number of registers available in the block */ | |
31 | unsigned int blklen; | |
32 | } __attribute__ ((packed)); | |
33 | ||
34 | struct regcache_rbtree_ctx { | |
35 | struct rb_root root; | |
36 | struct regcache_rbtree_node *cached_rbnode; | |
37 | }; | |
38 | ||
39 | static inline void regcache_rbtree_get_base_top_reg( | |
40 | struct regcache_rbtree_node *rbnode, | |
41 | unsigned int *base, unsigned int *top) | |
42 | { | |
43 | *base = rbnode->base_reg; | |
44 | *top = rbnode->base_reg + rbnode->blklen - 1; | |
45 | } | |
46 | ||
47 | static unsigned int regcache_rbtree_get_register( | |
48 | struct regcache_rbtree_node *rbnode, unsigned int idx) | |
49 | { | |
50 | unsigned int val; | |
51 | ||
52 | switch (rbnode->word_size) { | |
53 | case 1: { | |
54 | u8 *p = rbnode->block; | |
55 | val = p[idx]; | |
56 | return val; | |
57 | } | |
58 | case 2: { | |
59 | u16 *p = rbnode->block; | |
60 | val = p[idx]; | |
61 | return val; | |
62 | } | |
63 | default: | |
64 | BUG(); | |
65 | break; | |
66 | } | |
67 | return -1; | |
68 | } | |
69 | ||
70 | static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, | |
71 | unsigned int idx, unsigned int val) | |
72 | { | |
73 | switch (rbnode->word_size) { | |
74 | case 1: { | |
75 | u8 *p = rbnode->block; | |
76 | p[idx] = val; | |
77 | break; | |
78 | } | |
79 | case 2: { | |
80 | u16 *p = rbnode->block; | |
81 | p[idx] = val; | |
82 | break; | |
83 | } | |
84 | default: | |
85 | BUG(); | |
86 | break; | |
87 | } | |
88 | } | |
89 | ||
90 | static struct regcache_rbtree_node *regcache_rbtree_lookup( | |
91 | struct rb_root *root, unsigned int reg) | |
92 | { | |
93 | struct rb_node *node; | |
94 | struct regcache_rbtree_node *rbnode; | |
95 | unsigned int base_reg, top_reg; | |
96 | ||
97 | node = root->rb_node; | |
98 | while (node) { | |
99 | rbnode = container_of(node, struct regcache_rbtree_node, node); | |
100 | regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); | |
101 | if (reg >= base_reg && reg <= top_reg) | |
102 | return rbnode; | |
103 | else if (reg > top_reg) | |
104 | node = node->rb_right; | |
105 | else if (reg < base_reg) | |
106 | node = node->rb_left; | |
107 | } | |
108 | ||
109 | return NULL; | |
110 | } | |
111 | ||
112 | static int regcache_rbtree_insert(struct rb_root *root, | |
113 | struct regcache_rbtree_node *rbnode) | |
114 | { | |
115 | struct rb_node **new, *parent; | |
116 | struct regcache_rbtree_node *rbnode_tmp; | |
117 | unsigned int base_reg_tmp, top_reg_tmp; | |
118 | unsigned int base_reg; | |
119 | ||
120 | parent = NULL; | |
121 | new = &root->rb_node; | |
122 | while (*new) { | |
123 | rbnode_tmp = container_of(*new, struct regcache_rbtree_node, | |
124 | node); | |
125 | /* base and top registers of the current rbnode */ | |
126 | regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp, | |
127 | &top_reg_tmp); | |
128 | /* base register of the rbnode to be added */ | |
129 | base_reg = rbnode->base_reg; | |
130 | parent = *new; | |
131 | /* if this register has already been inserted, just return */ | |
132 | if (base_reg >= base_reg_tmp && | |
133 | base_reg <= top_reg_tmp) | |
134 | return 0; | |
135 | else if (base_reg > top_reg_tmp) | |
136 | new = &((*new)->rb_right); | |
137 | else if (base_reg < base_reg_tmp) | |
138 | new = &((*new)->rb_left); | |
139 | } | |
140 | ||
141 | /* insert the node into the rbtree */ | |
142 | rb_link_node(&rbnode->node, parent, new); | |
143 | rb_insert_color(&rbnode->node, root); | |
144 | ||
145 | return 1; | |
146 | } | |
147 | ||
148 | static int regcache_rbtree_init(struct regmap *map) | |
149 | { | |
150 | struct regcache_rbtree_ctx *rbtree_ctx; | |
151 | int i; | |
152 | int ret; | |
153 | ||
154 | map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); | |
155 | if (!map->cache) | |
156 | return -ENOMEM; | |
157 | ||
158 | rbtree_ctx = map->cache; | |
159 | rbtree_ctx->root = RB_ROOT; | |
160 | rbtree_ctx->cached_rbnode = NULL; | |
161 | ||
162 | for (i = 0; i < map->num_reg_defaults; i++) { | |
163 | ret = regcache_rbtree_write(map, | |
164 | map->reg_defaults[i].reg, | |
165 | map->reg_defaults[i].def); | |
166 | if (ret) | |
167 | goto err; | |
168 | } | |
169 | ||
170 | return 0; | |
171 | ||
172 | err: | |
173 | regcache_exit(map); | |
174 | return ret; | |
175 | } | |
176 | ||
177 | static int regcache_rbtree_exit(struct regmap *map) | |
178 | { | |
179 | struct rb_node *next; | |
180 | struct regcache_rbtree_ctx *rbtree_ctx; | |
181 | struct regcache_rbtree_node *rbtree_node; | |
182 | ||
183 | /* if we've already been called then just return */ | |
184 | rbtree_ctx = map->cache; | |
185 | if (!rbtree_ctx) | |
186 | return 0; | |
187 | ||
188 | /* free up the rbtree */ | |
189 | next = rb_first(&rbtree_ctx->root); | |
190 | while (next) { | |
191 | rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); | |
192 | next = rb_next(&rbtree_node->node); | |
193 | rb_erase(&rbtree_node->node, &rbtree_ctx->root); | |
194 | kfree(rbtree_node->block); | |
195 | kfree(rbtree_node); | |
196 | } | |
197 | ||
198 | /* release the resources */ | |
199 | kfree(map->cache); | |
200 | map->cache = NULL; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
205 | static int regcache_rbtree_read(struct regmap *map, | |
206 | unsigned int reg, unsigned int *value) | |
207 | { | |
208 | struct regcache_rbtree_ctx *rbtree_ctx; | |
209 | struct regcache_rbtree_node *rbnode; | |
210 | unsigned int base_reg, top_reg; | |
211 | unsigned int reg_tmp; | |
212 | ||
213 | rbtree_ctx = map->cache; | |
214 | /* look up the required register in the cached rbnode */ | |
215 | rbnode = rbtree_ctx->cached_rbnode; | |
216 | if (rbnode) { | |
217 | regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); | |
218 | if (reg >= base_reg && reg <= top_reg) { | |
219 | reg_tmp = reg - base_reg; | |
220 | *value = regcache_rbtree_get_register(rbnode, reg_tmp); | |
221 | return 0; | |
222 | } | |
223 | } | |
224 | /* if we can't locate it in the cached rbnode we'll have | |
225 | * to traverse the rbtree looking for it. | |
226 | */ | |
227 | rbnode = regcache_rbtree_lookup(&rbtree_ctx->root, reg); | |
228 | if (rbnode) { | |
229 | reg_tmp = reg - rbnode->base_reg; | |
230 | *value = regcache_rbtree_get_register(rbnode, reg_tmp); | |
231 | rbtree_ctx->cached_rbnode = rbnode; | |
232 | } else { | |
233 | /* uninitialized registers default to 0 */ | |
234 | *value = 0; | |
235 | } | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
240 | ||
241 | static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, | |
242 | unsigned int pos, unsigned int reg, | |
243 | unsigned int value) | |
244 | { | |
245 | u8 *blk; | |
246 | ||
247 | blk = krealloc(rbnode->block, | |
248 | (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL); | |
249 | if (!blk) | |
250 | return -ENOMEM; | |
251 | ||
252 | /* insert the register value in the correct place in the rbnode block */ | |
253 | memmove(blk + (pos + 1) * rbnode->word_size, | |
254 | blk + pos * rbnode->word_size, | |
255 | (rbnode->blklen - pos) * rbnode->word_size); | |
256 | ||
257 | /* update the rbnode block, its size and the base register */ | |
258 | rbnode->block = blk; | |
259 | rbnode->blklen++; | |
260 | if (!pos) | |
261 | rbnode->base_reg = reg; | |
262 | ||
263 | regcache_rbtree_set_register(rbnode, pos, value); | |
264 | return 0; | |
265 | } | |
266 | ||
267 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |
268 | unsigned int value) | |
269 | { | |
270 | struct regcache_rbtree_ctx *rbtree_ctx; | |
271 | struct regcache_rbtree_node *rbnode, *rbnode_tmp; | |
272 | struct rb_node *node; | |
273 | unsigned int val; | |
274 | unsigned int reg_tmp; | |
275 | unsigned int base_reg, top_reg; | |
276 | unsigned int pos; | |
277 | int i; | |
278 | int ret; | |
279 | ||
280 | rbtree_ctx = map->cache; | |
281 | /* look up the required register in the cached rbnode */ | |
282 | rbnode = rbtree_ctx->cached_rbnode; | |
283 | if (rbnode) { | |
284 | regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); | |
285 | if (reg >= base_reg && reg <= top_reg) { | |
286 | reg_tmp = reg - base_reg; | |
287 | val = regcache_rbtree_get_register(rbnode, reg_tmp); | |
288 | if (val == value) | |
289 | return 0; | |
290 | regcache_rbtree_set_register(rbnode, reg_tmp, value); | |
291 | return 0; | |
292 | } | |
293 | } | |
294 | /* if we can't locate it in the cached rbnode we'll have | |
295 | * to traverse the rbtree looking for it. | |
296 | */ | |
297 | rbnode = regcache_rbtree_lookup(&rbtree_ctx->root, reg); | |
298 | if (rbnode) { | |
299 | reg_tmp = reg - rbnode->base_reg; | |
300 | val = regcache_rbtree_get_register(rbnode, reg_tmp); | |
301 | if (val == value) | |
302 | return 0; | |
303 | regcache_rbtree_set_register(rbnode, reg_tmp, value); | |
304 | rbtree_ctx->cached_rbnode = rbnode; | |
305 | } else { | |
306 | /* bail out early, no need to create the rbnode yet */ | |
307 | if (!value) | |
308 | return 0; | |
309 | /* look for an adjacent register to the one we are about to add */ | |
310 | for (node = rb_first(&rbtree_ctx->root); node; | |
311 | node = rb_next(node)) { | |
312 | rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); | |
313 | for (i = 0; i < rbnode_tmp->blklen; i++) { | |
314 | reg_tmp = rbnode_tmp->base_reg + i; | |
315 | if (abs(reg_tmp - reg) != 1) | |
316 | continue; | |
317 | /* decide where in the block to place our register */ | |
318 | if (reg_tmp + 1 == reg) | |
319 | pos = i + 1; | |
320 | else | |
321 | pos = i; | |
322 | ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, | |
323 | reg, value); | |
324 | if (ret) | |
325 | return ret; | |
326 | rbtree_ctx->cached_rbnode = rbnode_tmp; | |
327 | return 0; | |
328 | } | |
329 | } | |
330 | /* we did not manage to find a place to insert it in an existing | |
331 | * block so create a new rbnode with a single register in its block. | |
332 | * This block will get populated further if any other adjacent | |
333 | * registers get modified in the future. | |
334 | */ | |
335 | rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); | |
336 | if (!rbnode) | |
337 | return -ENOMEM; | |
338 | rbnode->blklen = 1; | |
339 | rbnode->base_reg = reg; | |
340 | rbnode->word_size = map->cache_word_size; | |
341 | rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size, | |
342 | GFP_KERNEL); | |
343 | if (!rbnode->block) { | |
344 | kfree(rbnode); | |
345 | return -ENOMEM; | |
346 | } | |
347 | regcache_rbtree_set_register(rbnode, 0, value); | |
348 | regcache_rbtree_insert(&rbtree_ctx->root, rbnode); | |
349 | rbtree_ctx->cached_rbnode = rbnode; | |
350 | } | |
351 | ||
352 | return 0; | |
353 | } | |
354 | ||
355 | static int regcache_rbtree_sync(struct regmap *map) | |
356 | { | |
357 | struct regcache_rbtree_ctx *rbtree_ctx; | |
358 | struct rb_node *node; | |
359 | struct regcache_rbtree_node *rbnode; | |
360 | unsigned int regtmp; | |
361 | unsigned int val, def; | |
362 | int ret; | |
363 | int i; | |
364 | ||
365 | rbtree_ctx = map->cache; | |
366 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { | |
367 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); | |
368 | for (i = 0; i < rbnode->blklen; i++) { | |
369 | regtmp = rbnode->base_reg + i; | |
370 | val = regcache_rbtree_get_register(rbnode, i); | |
371 | ret = regcache_lookup_reg(map, i); | |
372 | if (ret < 0) | |
373 | def = 0; | |
374 | else | |
375 | def = map->reg_defaults[ret].def; | |
376 | if (val == def) | |
377 | continue; | |
378 | map->cache_bypass = 1; | |
379 | ret = regmap_write(map, regtmp, val); | |
380 | map->cache_bypass = 0; | |
381 | if (ret) | |
382 | return ret; | |
383 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | |
384 | regtmp, val); | |
385 | } | |
386 | } | |
387 | ||
388 | return 0; | |
389 | } | |
390 | ||
391 | struct regcache_ops regcache_rbtree_ops = { | |
392 | .type = REGCACHE_RBTREE, | |
393 | .name = "rbtree", | |
394 | .init = regcache_rbtree_init, | |
395 | .exit = regcache_rbtree_exit, | |
396 | .read = regcache_rbtree_read, | |
397 | .write = regcache_rbtree_write, | |
398 | .sync = regcache_rbtree_sync | |
399 | }; |