1 /*
2  * Register cache access API - LZO caching support
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/lzo.h>
16 
17 #include "internal.h"
18 
19 static int regcache_lzo_exit(struct regmap *map);
20 
21 struct regcache_lzo_ctx {
22 	void *wmem;
23 	void *dst;
24 	const void *src;
25 	size_t src_len;
26 	size_t dst_len;
27 	size_t decompressed_size;
28 	unsigned long *sync_bmp;
29 	int sync_bmp_nbits;
30 };
31 
32 #define LZO_BLOCK_NUM 8
regcache_lzo_block_count(struct regmap * map)33 static int regcache_lzo_block_count(struct regmap *map)
34 {
35 	return LZO_BLOCK_NUM;
36 }
37 
regcache_lzo_prepare(struct regcache_lzo_ctx * lzo_ctx)38 static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
39 {
40 	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
41 	if (!lzo_ctx->wmem)
42 		return -ENOMEM;
43 	return 0;
44 }
45 
regcache_lzo_compress(struct regcache_lzo_ctx * lzo_ctx)46 static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
47 {
48 	size_t compress_size;
49 	int ret;
50 
51 	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
52 			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
53 	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
54 		return -EINVAL;
55 	lzo_ctx->dst_len = compress_size;
56 	return 0;
57 }
58 
regcache_lzo_decompress(struct regcache_lzo_ctx * lzo_ctx)59 static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
60 {
61 	size_t dst_len;
62 	int ret;
63 
64 	dst_len = lzo_ctx->dst_len;
65 	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
66 				    lzo_ctx->dst, &dst_len);
67 	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
68 		return -EINVAL;
69 	return 0;
70 }
71 
regcache_lzo_compress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)72 static int regcache_lzo_compress_cache_block(struct regmap *map,
73 		struct regcache_lzo_ctx *lzo_ctx)
74 {
75 	int ret;
76 
77 	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
78 	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
79 	if (!lzo_ctx->dst) {
80 		lzo_ctx->dst_len = 0;
81 		return -ENOMEM;
82 	}
83 
84 	ret = regcache_lzo_compress(lzo_ctx);
85 	if (ret < 0)
86 		return ret;
87 	return 0;
88 }
89 
regcache_lzo_decompress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)90 static int regcache_lzo_decompress_cache_block(struct regmap *map,
91 		struct regcache_lzo_ctx *lzo_ctx)
92 {
93 	int ret;
94 
95 	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
96 	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
97 	if (!lzo_ctx->dst) {
98 		lzo_ctx->dst_len = 0;
99 		return -ENOMEM;
100 	}
101 
102 	ret = regcache_lzo_decompress(lzo_ctx);
103 	if (ret < 0)
104 		return ret;
105 	return 0;
106 }
107 
regcache_lzo_get_blkindex(struct regmap * map,unsigned int reg)108 static inline int regcache_lzo_get_blkindex(struct regmap *map,
109 					    unsigned int reg)
110 {
111 	return (reg * map->cache_word_size) /
112 		DIV_ROUND_UP(map->cache_size_raw,
113 			     regcache_lzo_block_count(map));
114 }
115 
regcache_lzo_get_blkpos(struct regmap * map,unsigned int reg)116 static inline int regcache_lzo_get_blkpos(struct regmap *map,
117 					  unsigned int reg)
118 {
119 	return reg % (DIV_ROUND_UP(map->cache_size_raw,
120 				   regcache_lzo_block_count(map)) /
121 		      map->cache_word_size);
122 }
123 
regcache_lzo_get_blksize(struct regmap * map)124 static inline int regcache_lzo_get_blksize(struct regmap *map)
125 {
126 	return DIV_ROUND_UP(map->cache_size_raw,
127 			    regcache_lzo_block_count(map));
128 }
129 
regcache_lzo_init(struct regmap * map)130 static int regcache_lzo_init(struct regmap *map)
131 {
132 	struct regcache_lzo_ctx **lzo_blocks;
133 	size_t bmp_size;
134 	int ret, i, blksize, blkcount;
135 	const char *p, *end;
136 	unsigned long *sync_bmp;
137 
138 	ret = 0;
139 
140 	blkcount = regcache_lzo_block_count(map);
141 	map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
142 			     GFP_KERNEL);
143 	if (!map->cache)
144 		return -ENOMEM;
145 	lzo_blocks = map->cache;
146 
147 	/*
148 	 * allocate a bitmap to be used when syncing the cache with
149 	 * the hardware.  Each time a register is modified, the corresponding
150 	 * bit is set in the bitmap, so we know that we have to sync
151 	 * that register.
152 	 */
153 	bmp_size = map->num_reg_defaults_raw;
154 	sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
155 			   GFP_KERNEL);
156 	if (!sync_bmp) {
157 		ret = -ENOMEM;
158 		goto err;
159 	}
160 	bitmap_zero(sync_bmp, bmp_size);
161 
162 	/* allocate the lzo blocks and initialize them */
163 	for (i = 0; i < blkcount; i++) {
164 		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
165 					GFP_KERNEL);
166 		if (!lzo_blocks[i]) {
167 			kfree(sync_bmp);
168 			ret = -ENOMEM;
169 			goto err;
170 		}
171 		lzo_blocks[i]->sync_bmp = sync_bmp;
172 		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
173 		/* alloc the working space for the compressed block */
174 		ret = regcache_lzo_prepare(lzo_blocks[i]);
175 		if (ret < 0)
176 			goto err;
177 	}
178 
179 	blksize = regcache_lzo_get_blksize(map);
180 	p = map->reg_defaults_raw;
181 	end = map->reg_defaults_raw + map->cache_size_raw;
182 	/* compress the register map and fill the lzo blocks */
183 	for (i = 0; i < blkcount; i++, p += blksize) {
184 		lzo_blocks[i]->src = p;
185 		if (p + blksize > end)
186 			lzo_blocks[i]->src_len = end - p;
187 		else
188 			lzo_blocks[i]->src_len = blksize;
189 		ret = regcache_lzo_compress_cache_block(map,
190 						       lzo_blocks[i]);
191 		if (ret < 0)
192 			goto err;
193 		lzo_blocks[i]->decompressed_size =
194 			lzo_blocks[i]->src_len;
195 	}
196 
197 	return 0;
198 err:
199 	regcache_lzo_exit(map);
200 	return ret;
201 }
202 
regcache_lzo_exit(struct regmap * map)203 static int regcache_lzo_exit(struct regmap *map)
204 {
205 	struct regcache_lzo_ctx **lzo_blocks;
206 	int i, blkcount;
207 
208 	lzo_blocks = map->cache;
209 	if (!lzo_blocks)
210 		return 0;
211 
212 	blkcount = regcache_lzo_block_count(map);
213 	/*
214 	 * the pointer to the bitmap used for syncing the cache
215 	 * is shared amongst all lzo_blocks.  Ensure it is freed
216 	 * only once.
217 	 */
218 	if (lzo_blocks[0])
219 		kfree(lzo_blocks[0]->sync_bmp);
220 	for (i = 0; i < blkcount; i++) {
221 		if (lzo_blocks[i]) {
222 			kfree(lzo_blocks[i]->wmem);
223 			kfree(lzo_blocks[i]->dst);
224 		}
225 		/* each lzo_block is a pointer returned by kmalloc or NULL */
226 		kfree(lzo_blocks[i]);
227 	}
228 	kfree(lzo_blocks);
229 	map->cache = NULL;
230 	return 0;
231 }
232 
regcache_lzo_read(struct regmap * map,unsigned int reg,unsigned int * value)233 static int regcache_lzo_read(struct regmap *map,
234 			     unsigned int reg, unsigned int *value)
235 {
236 	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
237 	int ret, blkindex, blkpos;
238 	size_t blksize, tmp_dst_len;
239 	void *tmp_dst;
240 
241 	/* index of the compressed lzo block */
242 	blkindex = regcache_lzo_get_blkindex(map, reg);
243 	/* register index within the decompressed block */
244 	blkpos = regcache_lzo_get_blkpos(map, reg);
245 	/* size of the compressed block */
246 	blksize = regcache_lzo_get_blksize(map);
247 	lzo_blocks = map->cache;
248 	lzo_block = lzo_blocks[blkindex];
249 
250 	/* save the pointer and length of the compressed block */
251 	tmp_dst = lzo_block->dst;
252 	tmp_dst_len = lzo_block->dst_len;
253 
254 	/* prepare the source to be the compressed block */
255 	lzo_block->src = lzo_block->dst;
256 	lzo_block->src_len = lzo_block->dst_len;
257 
258 	/* decompress the block */
259 	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
260 	if (ret >= 0)
261 		/* fetch the value from the cache */
262 		*value = regcache_get_val(lzo_block->dst, blkpos,
263 					  map->cache_word_size);
264 
265 	kfree(lzo_block->dst);
266 	/* restore the pointer and length of the compressed block */
267 	lzo_block->dst = tmp_dst;
268 	lzo_block->dst_len = tmp_dst_len;
269 
270 	return ret;
271 }
272 
regcache_lzo_write(struct regmap * map,unsigned int reg,unsigned int value)273 static int regcache_lzo_write(struct regmap *map,
274 			      unsigned int reg, unsigned int value)
275 {
276 	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
277 	int ret, blkindex, blkpos;
278 	size_t blksize, tmp_dst_len;
279 	void *tmp_dst;
280 
281 	/* index of the compressed lzo block */
282 	blkindex = regcache_lzo_get_blkindex(map, reg);
283 	/* register index within the decompressed block */
284 	blkpos = regcache_lzo_get_blkpos(map, reg);
285 	/* size of the compressed block */
286 	blksize = regcache_lzo_get_blksize(map);
287 	lzo_blocks = map->cache;
288 	lzo_block = lzo_blocks[blkindex];
289 
290 	/* save the pointer and length of the compressed block */
291 	tmp_dst = lzo_block->dst;
292 	tmp_dst_len = lzo_block->dst_len;
293 
294 	/* prepare the source to be the compressed block */
295 	lzo_block->src = lzo_block->dst;
296 	lzo_block->src_len = lzo_block->dst_len;
297 
298 	/* decompress the block */
299 	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
300 	if (ret < 0) {
301 		kfree(lzo_block->dst);
302 		goto out;
303 	}
304 
305 	/* write the new value to the cache */
306 	if (regcache_set_val(lzo_block->dst, blkpos, value,
307 			     map->cache_word_size)) {
308 		kfree(lzo_block->dst);
309 		goto out;
310 	}
311 
312 	/* prepare the source to be the decompressed block */
313 	lzo_block->src = lzo_block->dst;
314 	lzo_block->src_len = lzo_block->dst_len;
315 
316 	/* compress the block */
317 	ret = regcache_lzo_compress_cache_block(map, lzo_block);
318 	if (ret < 0) {
319 		kfree(lzo_block->dst);
320 		kfree(lzo_block->src);
321 		goto out;
322 	}
323 
324 	/* set the bit so we know we have to sync this register */
325 	set_bit(reg, lzo_block->sync_bmp);
326 	kfree(tmp_dst);
327 	kfree(lzo_block->src);
328 	return 0;
329 out:
330 	lzo_block->dst = tmp_dst;
331 	lzo_block->dst_len = tmp_dst_len;
332 	return ret;
333 }
334 
regcache_lzo_sync(struct regmap * map,unsigned int min,unsigned int max)335 static int regcache_lzo_sync(struct regmap *map, unsigned int min,
336 			     unsigned int max)
337 {
338 	struct regcache_lzo_ctx **lzo_blocks;
339 	unsigned int val;
340 	int i;
341 	int ret;
342 
343 	lzo_blocks = map->cache;
344 	i = min;
345 	for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
346 			      lzo_blocks[0]->sync_bmp_nbits) {
347 		if (i > max)
348 			continue;
349 
350 		ret = regcache_read(map, i, &val);
351 		if (ret)
352 			return ret;
353 
354 		/* Is this the hardware default?  If so skip. */
355 		ret = regcache_lookup_reg(map, i);
356 		if (ret > 0 && val == map->reg_defaults[ret].def)
357 			continue;
358 
359 		map->cache_bypass = 1;
360 		ret = _regmap_write(map, i, val);
361 		map->cache_bypass = 0;
362 		if (ret)
363 			return ret;
364 		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
365 			i, val);
366 	}
367 
368 	return 0;
369 }
370 
371 struct regcache_ops regcache_lzo_ops = {
372 	.type = REGCACHE_COMPRESSED,
373 	.name = "lzo",
374 	.init = regcache_lzo_init,
375 	.exit = regcache_lzo_exit,
376 	.read = regcache_lzo_read,
377 	.write = regcache_lzo_write,
378 	.sync = regcache_lzo_sync
379 };
380