blob: 79446262812c2ec6dfbfcd2b8fb0931a2c62a5d4 [file] [log] [blame]
Dimitris Papastamos9fabe242011-09-19 14:34:00 +01001/*
2 * Register cache access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040014#include <linux/export.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010015#include <trace/events/regmap.h>
Mark Brownf094fea2011-10-04 22:05:47 +010016#include <linux/bsearch.h>
Dimitris Papastamosc08604b2011-10-03 10:50:14 +010017#include <linux/sort.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010018
19#include "internal.h"
20
21static const struct regcache_ops *cache_types[] = {
Dimitris Papastamos195af652011-09-19 14:34:01 +010022 &regcache_indexed_ops,
Dimitris Papastamos28644c802011-09-19 14:34:02 +010023 &regcache_rbtree_ops,
Dimitris Papastamos2cbbb572011-09-19 14:34:03 +010024 &regcache_lzo_ops,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010025};
26
27static int regcache_hw_init(struct regmap *map)
28{
29 int i, j;
30 int ret;
31 int count;
32 unsigned int val;
33 void *tmp_buf;
34
35 if (!map->num_reg_defaults_raw)
36 return -EINVAL;
37
38 if (!map->reg_defaults_raw) {
39 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
40 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
41 if (!tmp_buf)
42 return -EINVAL;
43 ret = regmap_bulk_read(map, 0, tmp_buf,
44 map->num_reg_defaults_raw);
45 if (ret < 0) {
46 kfree(tmp_buf);
47 return ret;
48 }
49 map->reg_defaults_raw = tmp_buf;
50 map->cache_free = 1;
51 }
52
53 /* calculate the size of reg_defaults */
54 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
55 val = regcache_get_val(map->reg_defaults_raw,
56 i, map->cache_word_size);
57 if (!val)
58 continue;
59 count++;
60 }
61
62 map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
63 GFP_KERNEL);
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010064 if (!map->reg_defaults) {
65 ret = -ENOMEM;
66 goto err_free;
67 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010068
69 /* fill the reg_defaults */
70 map->num_reg_defaults = count;
71 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
72 val = regcache_get_val(map->reg_defaults_raw,
73 i, map->cache_word_size);
74 if (!val)
75 continue;
76 map->reg_defaults[j].reg = i;
77 map->reg_defaults[j].def = val;
78 j++;
79 }
80
81 return 0;
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010082
83err_free:
84 if (map->cache_free)
85 kfree(map->reg_defaults_raw);
86
87 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010088}
89
90int regcache_init(struct regmap *map)
91{
92 int ret;
93 int i;
94 void *tmp_buf;
95
Mark Browne7a6db32011-09-19 16:08:03 +010096 if (map->cache_type == REGCACHE_NONE) {
97 map->cache_bypass = true;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010098 return 0;
Mark Browne7a6db32011-09-19 16:08:03 +010099 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100100
101 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
102 if (cache_types[i]->type == map->cache_type)
103 break;
104
105 if (i == ARRAY_SIZE(cache_types)) {
106 dev_err(map->dev, "Could not match compress type: %d\n",
107 map->cache_type);
108 return -EINVAL;
109 }
110
111 map->cache = NULL;
112 map->cache_ops = cache_types[i];
113
114 if (!map->cache_ops->read ||
115 !map->cache_ops->write ||
116 !map->cache_ops->name)
117 return -EINVAL;
118
119 /* We still need to ensure that the reg_defaults
120 * won't vanish from under us. We'll need to make
121 * a copy of it.
122 */
123 if (map->reg_defaults) {
124 if (!map->num_reg_defaults)
125 return -EINVAL;
126 tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
127 sizeof(struct reg_default), GFP_KERNEL);
128 if (!tmp_buf)
129 return -ENOMEM;
130 map->reg_defaults = tmp_buf;
Mark Brown8528bdd2011-10-09 13:13:58 +0100131 } else if (map->num_reg_defaults_raw) {
Mark Brown5fcd2562011-09-29 15:24:54 +0100132 /* Some devices such as PMICs don't have cache defaults,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100133 * we cope with this by reading back the HW registers and
134 * crafting the cache defaults by hand.
135 */
136 ret = regcache_hw_init(map);
137 if (ret < 0)
138 return ret;
139 }
140
141 if (!map->max_register)
142 map->max_register = map->num_reg_defaults_raw;
143
144 if (map->cache_ops->init) {
145 dev_dbg(map->dev, "Initializing %s cache\n",
146 map->cache_ops->name);
147 return map->cache_ops->init(map);
148 }
149 return 0;
150}
151
152void regcache_exit(struct regmap *map)
153{
154 if (map->cache_type == REGCACHE_NONE)
155 return;
156
157 BUG_ON(!map->cache_ops);
158
159 kfree(map->reg_defaults);
160 if (map->cache_free)
161 kfree(map->reg_defaults_raw);
162
163 if (map->cache_ops->exit) {
164 dev_dbg(map->dev, "Destroying %s cache\n",
165 map->cache_ops->name);
166 map->cache_ops->exit(map);
167 }
168}
169
170/**
171 * regcache_read: Fetch the value of a given register from the cache.
172 *
173 * @map: map to configure.
174 * @reg: The register index.
175 * @value: The value to be returned.
176 *
177 * Return a negative value on failure, 0 on success.
178 */
179int regcache_read(struct regmap *map,
180 unsigned int reg, unsigned int *value)
181{
182 if (map->cache_type == REGCACHE_NONE)
183 return -ENOSYS;
184
185 BUG_ON(!map->cache_ops);
186
187 if (!regmap_readable(map, reg))
188 return -EIO;
189
190 if (!regmap_volatile(map, reg))
191 return map->cache_ops->read(map, reg, value);
192
193 return -EINVAL;
194}
195EXPORT_SYMBOL_GPL(regcache_read);
196
197/**
198 * regcache_write: Set the value of a given register in the cache.
199 *
200 * @map: map to configure.
201 * @reg: The register index.
202 * @value: The new register value.
203 *
204 * Return a negative value on failure, 0 on success.
205 */
206int regcache_write(struct regmap *map,
207 unsigned int reg, unsigned int value)
208{
209 if (map->cache_type == REGCACHE_NONE)
210 return 0;
211
212 BUG_ON(!map->cache_ops);
213
214 if (!regmap_writeable(map, reg))
215 return -EIO;
216
217 if (!regmap_volatile(map, reg))
218 return map->cache_ops->write(map, reg, value);
219
220 return 0;
221}
222EXPORT_SYMBOL_GPL(regcache_write);
223
224/**
225 * regcache_sync: Sync the register cache with the hardware.
226 *
227 * @map: map to configure.
228 *
229 * Any registers that should not be synced should be marked as
230 * volatile. In general drivers can choose not to use the provided
231 * syncing functionality if they so require.
232 *
233 * Return a negative value on failure, 0 on success.
234 */
235int regcache_sync(struct regmap *map)
236{
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100237 int ret = 0;
238 unsigned int val;
239 unsigned int i;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100240 const char *name;
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100241 unsigned int bypass;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100242
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100243 BUG_ON(!map->cache_ops);
244
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100245 mutex_lock(&map->lock);
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100246 /* Remember the initial bypass state */
247 bypass = map->cache_bypass;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100248 dev_dbg(map->dev, "Syncing %s cache\n",
249 map->cache_ops->name);
250 name = map->cache_ops->name;
251 trace_regcache_sync(map->dev, name, "start");
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200252 if (!map->cache_dirty)
253 goto out;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100254 if (map->cache_ops->sync) {
Dimitris Papastamos59360082011-09-19 14:34:04 +0100255 ret = map->cache_ops->sync(map);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100256 } else {
257 for (i = 0; i < map->num_reg_defaults; i++) {
258 ret = regcache_read(map, i, &val);
259 if (ret < 0)
260 goto out;
Dimitris Papastamosec8a3652011-09-28 11:43:42 +0100261 map->cache_bypass = 1;
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100262 ret = _regmap_write(map, i, val);
Dimitris Papastamosec8a3652011-09-28 11:43:42 +0100263 map->cache_bypass = 0;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100264 if (ret < 0)
265 goto out;
266 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
267 map->reg_defaults[i].reg,
268 map->reg_defaults[i].def);
269 }
270
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100271 }
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100272out:
273 trace_regcache_sync(map->dev, name, "stop");
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100274 /* Restore the bypass state */
275 map->cache_bypass = bypass;
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100276 mutex_unlock(&map->lock);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100277
278 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100279}
280EXPORT_SYMBOL_GPL(regcache_sync);
281
Mark Brown92afb282011-09-19 18:22:14 +0100282/**
283 * regcache_cache_only: Put a register map into cache only mode
284 *
285 * @map: map to configure
286 * @cache_only: flag if changes should be written to the hardware
287 *
288 * When a register map is marked as cache only writes to the register
289 * map API will only update the register cache, they will not cause
290 * any hardware changes. This is useful for allowing portions of
291 * drivers to act as though the device were functioning as normal when
292 * it is disabled for power saving reasons.
293 */
294void regcache_cache_only(struct regmap *map, bool enable)
295{
Mark Brown2cd148f2011-09-29 10:40:55 +0100296 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100297 WARN_ON(map->cache_bypass && enable);
Mark Brown92afb282011-09-19 18:22:14 +0100298 map->cache_only = enable;
Mark Brown2cd148f2011-09-29 10:40:55 +0100299 mutex_unlock(&map->lock);
Mark Brown92afb282011-09-19 18:22:14 +0100300}
301EXPORT_SYMBOL_GPL(regcache_cache_only);
302
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100303/**
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200304 * regcache_mark_dirty: Mark the register cache as dirty
305 *
306 * @map: map to mark
307 *
308 * Mark the register cache as dirty, for example due to the device
309 * having been powered down for suspend. If the cache is not marked
310 * as dirty then the cache sync will be suppressed.
311 */
312void regcache_mark_dirty(struct regmap *map)
313{
314 mutex_lock(&map->lock);
315 map->cache_dirty = true;
316 mutex_unlock(&map->lock);
317}
318EXPORT_SYMBOL_GPL(regcache_mark_dirty);
319
320/**
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100321 * regcache_cache_bypass: Put a register map into cache bypass mode
322 *
323 * @map: map to configure
Dimitris Papastamos0eef6b02011-10-03 06:54:16 +0100324 * @cache_bypass: flag if changes should not be written to the hardware
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100325 *
326 * When a register map is marked with the cache bypass option, writes
327 * to the register map API will only update the hardware and not the
328 * the cache directly. This is useful when syncing the cache back to
329 * the hardware.
330 */
331void regcache_cache_bypass(struct regmap *map, bool enable)
332{
333 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100334 WARN_ON(map->cache_only && enable);
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100335 map->cache_bypass = enable;
336 mutex_unlock(&map->lock);
337}
338EXPORT_SYMBOL_GPL(regcache_cache_bypass);
339
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100340bool regcache_set_val(void *base, unsigned int idx,
341 unsigned int val, unsigned int word_size)
342{
343 switch (word_size) {
344 case 1: {
345 u8 *cache = base;
346 if (cache[idx] == val)
347 return true;
348 cache[idx] = val;
349 break;
350 }
351 case 2: {
352 u16 *cache = base;
353 if (cache[idx] == val)
354 return true;
355 cache[idx] = val;
356 break;
357 }
358 default:
359 BUG();
360 }
361 /* unreachable */
362 return false;
363}
364
365unsigned int regcache_get_val(const void *base, unsigned int idx,
366 unsigned int word_size)
367{
368 if (!base)
369 return -EINVAL;
370
371 switch (word_size) {
372 case 1: {
373 const u8 *cache = base;
374 return cache[idx];
375 }
376 case 2: {
377 const u16 *cache = base;
378 return cache[idx];
379 }
380 default:
381 BUG();
382 }
383 /* unreachable */
384 return -1;
385}
386
Mark Brownf094fea2011-10-04 22:05:47 +0100387static int regcache_default_cmp(const void *a, const void *b)
Dimitris Papastamosc08604b2011-10-03 10:50:14 +0100388{
389 const struct reg_default *_a = a;
390 const struct reg_default *_b = b;
391
392 return _a->reg - _b->reg;
393}
394
Mark Brownf094fea2011-10-04 22:05:47 +0100395int regcache_lookup_reg(struct regmap *map, unsigned int reg)
396{
397 struct reg_default key;
398 struct reg_default *r;
399
400 key.reg = reg;
401 key.def = 0;
402
403 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
404 sizeof(struct reg_default), regcache_default_cmp);
405
406 if (r)
407 return r - map->reg_defaults;
408 else
Mark Brown6e6ace02011-10-09 13:23:31 +0100409 return -ENOENT;
Mark Brownf094fea2011-10-04 22:05:47 +0100410}
411
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100412int regcache_insert_reg(struct regmap *map, unsigned int reg,
413 unsigned int val)
414{
415 void *tmp;
416
417 tmp = krealloc(map->reg_defaults,
418 (map->num_reg_defaults + 1) * sizeof(struct reg_default),
419 GFP_KERNEL);
420 if (!tmp)
421 return -ENOMEM;
422 map->reg_defaults = tmp;
423 map->num_reg_defaults++;
424 map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
425 map->reg_defaults[map->num_reg_defaults - 1].def = val;
Dimitris Papastamosc08604b2011-10-03 10:50:14 +0100426 sort(map->reg_defaults, map->num_reg_defaults,
Mark Brownf094fea2011-10-04 22:05:47 +0100427 sizeof(struct reg_default), regcache_default_cmp, NULL);
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100428 return 0;
429}