regmap: Track if the register cache is dirty and suppress unneeded syncs
[linux-2.6-block.git] / drivers / base / regmap / regcache-lzo.c
CommitLineData
2cbbb579
DP
1/*
2 * Register cache access API - LZO caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
14#include <linux/lzo.h>
15
16#include "internal.h"
17
18struct regcache_lzo_ctx {
19 void *wmem;
20 void *dst;
21 const void *src;
22 size_t src_len;
23 size_t dst_len;
24 size_t decompressed_size;
25 unsigned long *sync_bmp;
26 int sync_bmp_nbits;
27};
28
29#define LZO_BLOCK_NUM 8
30static int regcache_lzo_block_count(void)
31{
32 return LZO_BLOCK_NUM;
33}
34
35static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
36{
37 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
38 if (!lzo_ctx->wmem)
39 return -ENOMEM;
40 return 0;
41}
42
43static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
44{
45 size_t compress_size;
46 int ret;
47
48 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
49 lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
50 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
51 return -EINVAL;
52 lzo_ctx->dst_len = compress_size;
53 return 0;
54}
55
56static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
57{
58 size_t dst_len;
59 int ret;
60
61 dst_len = lzo_ctx->dst_len;
62 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
63 lzo_ctx->dst, &dst_len);
64 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
65 return -EINVAL;
66 return 0;
67}
68
69static int regcache_lzo_compress_cache_block(struct regmap *map,
70 struct regcache_lzo_ctx *lzo_ctx)
71{
72 int ret;
73
74 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
75 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
76 if (!lzo_ctx->dst) {
77 lzo_ctx->dst_len = 0;
78 return -ENOMEM;
79 }
80
81 ret = regcache_lzo_compress(lzo_ctx);
82 if (ret < 0)
83 return ret;
84 return 0;
85}
86
87static int regcache_lzo_decompress_cache_block(struct regmap *map,
88 struct regcache_lzo_ctx *lzo_ctx)
89{
90 int ret;
91
92 lzo_ctx->dst_len = lzo_ctx->decompressed_size;
93 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
94 if (!lzo_ctx->dst) {
95 lzo_ctx->dst_len = 0;
96 return -ENOMEM;
97 }
98
99 ret = regcache_lzo_decompress(lzo_ctx);
100 if (ret < 0)
101 return ret;
102 return 0;
103}
104
105static inline int regcache_lzo_get_blkindex(struct regmap *map,
106 unsigned int reg)
107{
108 return (reg * map->cache_word_size) /
109 DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
110}
111
112static inline int regcache_lzo_get_blkpos(struct regmap *map,
113 unsigned int reg)
114{
115 return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
116 map->cache_word_size);
117}
118
119static inline int regcache_lzo_get_blksize(struct regmap *map)
120{
121 return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
122}
123
124static int regcache_lzo_init(struct regmap *map)
125{
126 struct regcache_lzo_ctx **lzo_blocks;
127 size_t bmp_size;
128 int ret, i, blksize, blkcount;
129 const char *p, *end;
130 unsigned long *sync_bmp;
131
132 ret = 0;
133
134 blkcount = regcache_lzo_block_count();
135 map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
136 GFP_KERNEL);
137 if (!map->cache)
138 return -ENOMEM;
139 lzo_blocks = map->cache;
140
141 /*
142 * allocate a bitmap to be used when syncing the cache with
143 * the hardware. Each time a register is modified, the corresponding
144 * bit is set in the bitmap, so we know that we have to sync
145 * that register.
146 */
147 bmp_size = map->num_reg_defaults_raw;
148 sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
149 GFP_KERNEL);
150 if (!sync_bmp) {
151 ret = -ENOMEM;
152 goto err;
153 }
154 bitmap_zero(sync_bmp, bmp_size);
155
156 /* allocate the lzo blocks and initialize them */
157 for (i = 0; i < blkcount; i++) {
158 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
159 GFP_KERNEL);
160 if (!lzo_blocks[i]) {
161 kfree(sync_bmp);
162 ret = -ENOMEM;
163 goto err;
164 }
165 lzo_blocks[i]->sync_bmp = sync_bmp;
166 lzo_blocks[i]->sync_bmp_nbits = bmp_size;
167 /* alloc the working space for the compressed block */
168 ret = regcache_lzo_prepare(lzo_blocks[i]);
169 if (ret < 0)
170 goto err;
171 }
172
173 blksize = regcache_lzo_get_blksize(map);
174 p = map->reg_defaults_raw;
175 end = map->reg_defaults_raw + map->cache_size_raw;
176 /* compress the register map and fill the lzo blocks */
177 for (i = 0; i < blkcount; i++, p += blksize) {
178 lzo_blocks[i]->src = p;
179 if (p + blksize > end)
180 lzo_blocks[i]->src_len = end - p;
181 else
182 lzo_blocks[i]->src_len = blksize;
183 ret = regcache_lzo_compress_cache_block(map,
184 lzo_blocks[i]);
185 if (ret < 0)
186 goto err;
187 lzo_blocks[i]->decompressed_size =
188 lzo_blocks[i]->src_len;
189 }
190
191 return 0;
192err:
193 regcache_exit(map);
194 return ret;
195}
196
197static int regcache_lzo_exit(struct regmap *map)
198{
199 struct regcache_lzo_ctx **lzo_blocks;
200 int i, blkcount;
201
202 lzo_blocks = map->cache;
203 if (!lzo_blocks)
204 return 0;
205
206 blkcount = regcache_lzo_block_count();
207 /*
208 * the pointer to the bitmap used for syncing the cache
209 * is shared amongst all lzo_blocks. Ensure it is freed
210 * only once.
211 */
212 if (lzo_blocks[0])
213 kfree(lzo_blocks[0]->sync_bmp);
214 for (i = 0; i < blkcount; i++) {
215 if (lzo_blocks[i]) {
216 kfree(lzo_blocks[i]->wmem);
217 kfree(lzo_blocks[i]->dst);
218 }
219 /* each lzo_block is a pointer returned by kmalloc or NULL */
220 kfree(lzo_blocks[i]);
221 }
222 kfree(lzo_blocks);
223 map->cache = NULL;
224 return 0;
225}
226
227static int regcache_lzo_read(struct regmap *map,
228 unsigned int reg, unsigned int *value)
229{
230 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
231 int ret, blkindex, blkpos;
232 size_t blksize, tmp_dst_len;
233 void *tmp_dst;
234
2cbbb579
DP
235 /* index of the compressed lzo block */
236 blkindex = regcache_lzo_get_blkindex(map, reg);
237 /* register index within the decompressed block */
238 blkpos = regcache_lzo_get_blkpos(map, reg);
239 /* size of the compressed block */
240 blksize = regcache_lzo_get_blksize(map);
241 lzo_blocks = map->cache;
242 lzo_block = lzo_blocks[blkindex];
243
244 /* save the pointer and length of the compressed block */
245 tmp_dst = lzo_block->dst;
246 tmp_dst_len = lzo_block->dst_len;
247
248 /* prepare the source to be the compressed block */
249 lzo_block->src = lzo_block->dst;
250 lzo_block->src_len = lzo_block->dst_len;
251
252 /* decompress the block */
253 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
254 if (ret >= 0)
255 /* fetch the value from the cache */
256 *value = regcache_get_val(lzo_block->dst, blkpos,
257 map->cache_word_size);
258
259 kfree(lzo_block->dst);
260 /* restore the pointer and length of the compressed block */
261 lzo_block->dst = tmp_dst;
262 lzo_block->dst_len = tmp_dst_len;
6e6ace00
MB
263
264 return ret;
2cbbb579
DP
265}
266
267static int regcache_lzo_write(struct regmap *map,
268 unsigned int reg, unsigned int value)
269{
270 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
271 int ret, blkindex, blkpos;
272 size_t blksize, tmp_dst_len;
273 void *tmp_dst;
274
275 /* index of the compressed lzo block */
276 blkindex = regcache_lzo_get_blkindex(map, reg);
277 /* register index within the decompressed block */
278 blkpos = regcache_lzo_get_blkpos(map, reg);
279 /* size of the compressed block */
280 blksize = regcache_lzo_get_blksize(map);
281 lzo_blocks = map->cache;
282 lzo_block = lzo_blocks[blkindex];
283
284 /* save the pointer and length of the compressed block */
285 tmp_dst = lzo_block->dst;
286 tmp_dst_len = lzo_block->dst_len;
287
288 /* prepare the source to be the compressed block */
289 lzo_block->src = lzo_block->dst;
290 lzo_block->src_len = lzo_block->dst_len;
291
292 /* decompress the block */
293 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
294 if (ret < 0) {
295 kfree(lzo_block->dst);
296 goto out;
297 }
298
299 /* write the new value to the cache */
300 if (regcache_set_val(lzo_block->dst, blkpos, value,
301 map->cache_word_size)) {
302 kfree(lzo_block->dst);
303 goto out;
304 }
305
306 /* prepare the source to be the decompressed block */
307 lzo_block->src = lzo_block->dst;
308 lzo_block->src_len = lzo_block->dst_len;
309
310 /* compress the block */
311 ret = regcache_lzo_compress_cache_block(map, lzo_block);
312 if (ret < 0) {
313 kfree(lzo_block->dst);
314 kfree(lzo_block->src);
315 goto out;
316 }
317
318 /* set the bit so we know we have to sync this register */
319 set_bit(reg, lzo_block->sync_bmp);
320 kfree(tmp_dst);
321 kfree(lzo_block->src);
322 return 0;
323out:
324 lzo_block->dst = tmp_dst;
325 lzo_block->dst_len = tmp_dst_len;
326 return ret;
327}
328
329static int regcache_lzo_sync(struct regmap *map)
330{
331 struct regcache_lzo_ctx **lzo_blocks;
332 unsigned int val;
333 int i;
334 int ret;
335
336 lzo_blocks = map->cache;
337 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
338 ret = regcache_read(map, i, &val);
339 if (ret)
340 return ret;
341 map->cache_bypass = 1;
13753a90 342 ret = _regmap_write(map, i, val);
2cbbb579
DP
343 map->cache_bypass = 0;
344 if (ret)
345 return ret;
346 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
347 i, val);
348 }
349
350 return 0;
351}
352
353struct regcache_ops regcache_lzo_ops = {
354 .type = REGCACHE_LZO,
355 .name = "lzo",
356 .init = regcache_lzo_init,
357 .exit = regcache_lzo_exit,
358 .read = regcache_lzo_read,
359 .write = regcache_lzo_write,
360 .sync = regcache_lzo_sync
361};