Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Register cache access API - rbtree caching support |
| 3 | * |
| 4 | * Copyright 2011 Wolfson Microelectronics plc |
| 5 | * |
| 6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/slab.h> |
Paul Gortmaker | 51990e8 | 2012-01-22 11:23:42 -0500 | [diff] [blame] | 14 | #include <linux/device.h> |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 15 | #include <linux/debugfs.h> |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 16 | #include <linux/rbtree.h> |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 17 | #include <linux/seq_file.h> |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 18 | |
| 19 | #include "internal.h" |
| 20 | |
| 21 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, |
| 22 | unsigned int value); |
Lars-Peter Clausen | 462a185 | 2011-11-15 13:34:40 +0100 | [diff] [blame] | 23 | static int regcache_rbtree_exit(struct regmap *map); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 24 | |
| 25 | struct regcache_rbtree_node { |
| 26 | /* the actual rbtree node holding this block */ |
| 27 | struct rb_node node; |
| 28 | /* base register handled by this block */ |
| 29 | unsigned int base_reg; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 30 | /* block of adjacent registers */ |
| 31 | void *block; |
| 32 | /* number of registers available in the block */ |
| 33 | unsigned int blklen; |
| 34 | } __attribute__ ((packed)); |
| 35 | |
| 36 | struct regcache_rbtree_ctx { |
| 37 | struct rb_root root; |
| 38 | struct regcache_rbtree_node *cached_rbnode; |
| 39 | }; |
| 40 | |
| 41 | static inline void regcache_rbtree_get_base_top_reg( |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 42 | struct regmap *map, |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 43 | struct regcache_rbtree_node *rbnode, |
| 44 | unsigned int *base, unsigned int *top) |
| 45 | { |
| 46 | *base = rbnode->base_reg; |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 47 | *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 48 | } |
| 49 | |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 50 | static unsigned int regcache_rbtree_get_register(struct regmap *map, |
| 51 | struct regcache_rbtree_node *rbnode, unsigned int idx) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 52 | { |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 53 | return regcache_get_val(map, rbnode->block, idx); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 54 | } |
| 55 | |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 56 | static void regcache_rbtree_set_register(struct regmap *map, |
| 57 | struct regcache_rbtree_node *rbnode, |
| 58 | unsigned int idx, unsigned int val) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 59 | { |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 60 | regcache_set_val(map, rbnode->block, idx, val); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 61 | } |
| 62 | |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 63 | static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 64 | unsigned int reg) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 65 | { |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 66 | struct regcache_rbtree_ctx *rbtree_ctx = map->cache; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 67 | struct rb_node *node; |
| 68 | struct regcache_rbtree_node *rbnode; |
| 69 | unsigned int base_reg, top_reg; |
| 70 | |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 71 | rbnode = rbtree_ctx->cached_rbnode; |
| 72 | if (rbnode) { |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 73 | regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, |
| 74 | &top_reg); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 75 | if (reg >= base_reg && reg <= top_reg) |
| 76 | return rbnode; |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | node = rbtree_ctx->root.rb_node; |
| 80 | while (node) { |
| 81 | rbnode = container_of(node, struct regcache_rbtree_node, node); |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 82 | regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, |
| 83 | &top_reg); |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 84 | if (reg >= base_reg && reg <= top_reg) { |
| 85 | rbtree_ctx->cached_rbnode = rbnode; |
| 86 | return rbnode; |
| 87 | } else if (reg > top_reg) { |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 88 | node = node->rb_right; |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 89 | } else if (reg < base_reg) { |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 90 | node = node->rb_left; |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 91 | } |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | return NULL; |
| 95 | } |
| 96 | |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 97 | static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root, |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 98 | struct regcache_rbtree_node *rbnode) |
| 99 | { |
| 100 | struct rb_node **new, *parent; |
| 101 | struct regcache_rbtree_node *rbnode_tmp; |
| 102 | unsigned int base_reg_tmp, top_reg_tmp; |
| 103 | unsigned int base_reg; |
| 104 | |
| 105 | parent = NULL; |
| 106 | new = &root->rb_node; |
| 107 | while (*new) { |
| 108 | rbnode_tmp = container_of(*new, struct regcache_rbtree_node, |
| 109 | node); |
| 110 | /* base and top registers of the current rbnode */ |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 111 | regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp, |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 112 | &top_reg_tmp); |
| 113 | /* base register of the rbnode to be added */ |
| 114 | base_reg = rbnode->base_reg; |
| 115 | parent = *new; |
| 116 | /* if this register has already been inserted, just return */ |
| 117 | if (base_reg >= base_reg_tmp && |
| 118 | base_reg <= top_reg_tmp) |
| 119 | return 0; |
| 120 | else if (base_reg > top_reg_tmp) |
| 121 | new = &((*new)->rb_right); |
| 122 | else if (base_reg < base_reg_tmp) |
| 123 | new = &((*new)->rb_left); |
| 124 | } |
| 125 | |
| 126 | /* insert the node into the rbtree */ |
| 127 | rb_link_node(&rbnode->node, parent, new); |
| 128 | rb_insert_color(&rbnode->node, root); |
| 129 | |
| 130 | return 1; |
| 131 | } |
| 132 | |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 133 | #ifdef CONFIG_DEBUG_FS |
| 134 | static int rbtree_show(struct seq_file *s, void *ignored) |
| 135 | { |
| 136 | struct regmap *map = s->private; |
| 137 | struct regcache_rbtree_ctx *rbtree_ctx = map->cache; |
| 138 | struct regcache_rbtree_node *n; |
| 139 | struct rb_node *node; |
| 140 | unsigned int base, top; |
Dimitris Papastamos | a42277c | 2013-03-12 17:26:49 +0000 | [diff] [blame^] | 141 | size_t mem_size; |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 142 | int nodes = 0; |
| 143 | int registers = 0; |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 144 | int this_registers, average; |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 145 | |
Stephen Warren | bacdbe0 | 2012-04-04 15:48:28 -0600 | [diff] [blame] | 146 | map->lock(map); |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 147 | |
Dimitris Papastamos | a42277c | 2013-03-12 17:26:49 +0000 | [diff] [blame^] | 148 | mem_size = sizeof(*rbtree_ctx); |
| 149 | |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 150 | for (node = rb_first(&rbtree_ctx->root); node != NULL; |
| 151 | node = rb_next(node)) { |
| 152 | n = container_of(node, struct regcache_rbtree_node, node); |
Dimitris Papastamos | a42277c | 2013-03-12 17:26:49 +0000 | [diff] [blame^] | 153 | mem_size += sizeof(*n); |
| 154 | mem_size += (n->blklen * map->cache_word_size); |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 155 | |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 156 | regcache_rbtree_get_base_top_reg(map, n, &base, &top); |
| 157 | this_registers = ((top - base) / map->reg_stride) + 1; |
| 158 | seq_printf(s, "%x-%x (%d)\n", base, top, this_registers); |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 159 | |
| 160 | nodes++; |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 161 | registers += this_registers; |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 162 | } |
| 163 | |
Stephen Warren | c04c1b9 | 2012-04-04 15:48:33 -0600 | [diff] [blame] | 164 | if (nodes) |
| 165 | average = registers / nodes; |
| 166 | else |
| 167 | average = 0; |
| 168 | |
Dimitris Papastamos | a42277c | 2013-03-12 17:26:49 +0000 | [diff] [blame^] | 169 | seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", |
| 170 | nodes, registers, average, mem_size); |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 171 | |
Stephen Warren | bacdbe0 | 2012-04-04 15:48:28 -0600 | [diff] [blame] | 172 | map->unlock(map); |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static int rbtree_open(struct inode *inode, struct file *file) |
| 178 | { |
| 179 | return single_open(file, rbtree_show, inode->i_private); |
| 180 | } |
| 181 | |
| 182 | static const struct file_operations rbtree_fops = { |
| 183 | .open = rbtree_open, |
| 184 | .read = seq_read, |
| 185 | .llseek = seq_lseek, |
| 186 | .release = single_release, |
| 187 | }; |
Mark Brown | cce585c | 2011-11-22 11:33:31 +0000 | [diff] [blame] | 188 | |
| 189 | static void rbtree_debugfs_init(struct regmap *map) |
| 190 | { |
| 191 | debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); |
| 192 | } |
| 193 | #else |
| 194 | static void rbtree_debugfs_init(struct regmap *map) |
| 195 | { |
| 196 | } |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 197 | #endif |
| 198 | |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 199 | static int regcache_rbtree_init(struct regmap *map) |
| 200 | { |
| 201 | struct regcache_rbtree_ctx *rbtree_ctx; |
| 202 | int i; |
| 203 | int ret; |
| 204 | |
| 205 | map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); |
| 206 | if (!map->cache) |
| 207 | return -ENOMEM; |
| 208 | |
| 209 | rbtree_ctx = map->cache; |
| 210 | rbtree_ctx->root = RB_ROOT; |
| 211 | rbtree_ctx->cached_rbnode = NULL; |
| 212 | |
| 213 | for (i = 0; i < map->num_reg_defaults; i++) { |
| 214 | ret = regcache_rbtree_write(map, |
| 215 | map->reg_defaults[i].reg, |
| 216 | map->reg_defaults[i].def); |
| 217 | if (ret) |
| 218 | goto err; |
| 219 | } |
| 220 | |
Mark Brown | cce585c | 2011-11-22 11:33:31 +0000 | [diff] [blame] | 221 | rbtree_debugfs_init(map); |
Mark Brown | bad2ab4 | 2011-11-21 19:44:44 +0000 | [diff] [blame] | 222 | |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 223 | return 0; |
| 224 | |
| 225 | err: |
Lars-Peter Clausen | 462a185 | 2011-11-15 13:34:40 +0100 | [diff] [blame] | 226 | regcache_rbtree_exit(map); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 227 | return ret; |
| 228 | } |
| 229 | |
| 230 | static int regcache_rbtree_exit(struct regmap *map) |
| 231 | { |
| 232 | struct rb_node *next; |
| 233 | struct regcache_rbtree_ctx *rbtree_ctx; |
| 234 | struct regcache_rbtree_node *rbtree_node; |
| 235 | |
| 236 | /* if we've already been called then just return */ |
| 237 | rbtree_ctx = map->cache; |
| 238 | if (!rbtree_ctx) |
| 239 | return 0; |
| 240 | |
| 241 | /* free up the rbtree */ |
| 242 | next = rb_first(&rbtree_ctx->root); |
| 243 | while (next) { |
| 244 | rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); |
| 245 | next = rb_next(&rbtree_node->node); |
| 246 | rb_erase(&rbtree_node->node, &rbtree_ctx->root); |
| 247 | kfree(rbtree_node->block); |
| 248 | kfree(rbtree_node); |
| 249 | } |
| 250 | |
| 251 | /* release the resources */ |
| 252 | kfree(map->cache); |
| 253 | map->cache = NULL; |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | static int regcache_rbtree_read(struct regmap *map, |
| 259 | unsigned int reg, unsigned int *value) |
| 260 | { |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 261 | struct regcache_rbtree_node *rbnode; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 262 | unsigned int reg_tmp; |
| 263 | |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 264 | rbnode = regcache_rbtree_lookup(map, reg); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 265 | if (rbnode) { |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 266 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 267 | *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 268 | } else { |
Mark Brown | 6e6ace0 | 2011-10-09 13:23:31 +0100 | [diff] [blame] | 269 | return -ENOENT; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | return 0; |
| 273 | } |
| 274 | |
| 275 | |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 276 | static int regcache_rbtree_insert_to_block(struct regmap *map, |
| 277 | struct regcache_rbtree_node *rbnode, |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 278 | unsigned int pos, unsigned int reg, |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 279 | unsigned int value) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 280 | { |
| 281 | u8 *blk; |
| 282 | |
| 283 | blk = krealloc(rbnode->block, |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 284 | (rbnode->blklen + 1) * map->cache_word_size, |
| 285 | GFP_KERNEL); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 286 | if (!blk) |
| 287 | return -ENOMEM; |
| 288 | |
| 289 | /* insert the register value in the correct place in the rbnode block */ |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 290 | memmove(blk + (pos + 1) * map->cache_word_size, |
| 291 | blk + pos * map->cache_word_size, |
| 292 | (rbnode->blklen - pos) * map->cache_word_size); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 293 | |
| 294 | /* update the rbnode block, its size and the base register */ |
| 295 | rbnode->block = blk; |
| 296 | rbnode->blklen++; |
| 297 | if (!pos) |
| 298 | rbnode->base_reg = reg; |
| 299 | |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 300 | regcache_rbtree_set_register(map, rbnode, pos, value); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, |
| 305 | unsigned int value) |
| 306 | { |
| 307 | struct regcache_rbtree_ctx *rbtree_ctx; |
| 308 | struct regcache_rbtree_node *rbnode, *rbnode_tmp; |
| 309 | struct rb_node *node; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 310 | unsigned int reg_tmp; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 311 | unsigned int pos; |
| 312 | int i; |
| 313 | int ret; |
| 314 | |
| 315 | rbtree_ctx = map->cache; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 316 | /* if we can't locate it in the cached rbnode we'll have |
| 317 | * to traverse the rbtree looking for it. |
| 318 | */ |
Lars-Peter Clausen | 3405add | 2011-09-27 20:15:38 +0200 | [diff] [blame] | 319 | rbnode = regcache_rbtree_lookup(map, reg); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 320 | if (rbnode) { |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 321 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 322 | regcache_rbtree_set_register(map, rbnode, reg_tmp, value); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 323 | } else { |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 324 | /* look for an adjacent register to the one we are about to add */ |
| 325 | for (node = rb_first(&rbtree_ctx->root); node; |
| 326 | node = rb_next(node)) { |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 327 | rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, |
| 328 | node); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 329 | for (i = 0; i < rbnode_tmp->blklen; i++) { |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 330 | reg_tmp = rbnode_tmp->base_reg + |
| 331 | (i * map->reg_stride); |
| 332 | if (abs(reg_tmp - reg) != map->reg_stride) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 333 | continue; |
| 334 | /* decide where in the block to place our register */ |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 335 | if (reg_tmp + map->reg_stride == reg) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 336 | pos = i + 1; |
| 337 | else |
| 338 | pos = i; |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 339 | ret = regcache_rbtree_insert_to_block(map, |
| 340 | rbnode_tmp, |
| 341 | pos, reg, |
| 342 | value); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 343 | if (ret) |
| 344 | return ret; |
| 345 | rbtree_ctx->cached_rbnode = rbnode_tmp; |
| 346 | return 0; |
| 347 | } |
| 348 | } |
| 349 | /* we did not manage to find a place to insert it in an existing |
| 350 | * block so create a new rbnode with a single register in its block. |
| 351 | * This block will get populated further if any other adjacent |
| 352 | * registers get modified in the future. |
| 353 | */ |
| 354 | rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); |
| 355 | if (!rbnode) |
| 356 | return -ENOMEM; |
| 357 | rbnode->blklen = 1; |
| 358 | rbnode->base_reg = reg; |
Dimitris Papastamos | 25ed115 | 2011-09-27 11:25:07 +0100 | [diff] [blame] | 359 | rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 360 | GFP_KERNEL); |
| 361 | if (!rbnode->block) { |
| 362 | kfree(rbnode); |
| 363 | return -ENOMEM; |
| 364 | } |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 365 | regcache_rbtree_set_register(map, rbnode, 0, value); |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 366 | regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 367 | rbtree_ctx->cached_rbnode = rbnode; |
| 368 | } |
| 369 | |
| 370 | return 0; |
| 371 | } |
| 372 | |
Mark Brown | ac8d91c | 2012-02-23 19:31:04 +0000 | [diff] [blame] | 373 | static int regcache_rbtree_sync(struct regmap *map, unsigned int min, |
| 374 | unsigned int max) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 375 | { |
| 376 | struct regcache_rbtree_ctx *rbtree_ctx; |
| 377 | struct rb_node *node; |
| 378 | struct regcache_rbtree_node *rbnode; |
| 379 | unsigned int regtmp; |
Mark Brown | b03622a | 2011-10-09 12:54:25 +0100 | [diff] [blame] | 380 | unsigned int val; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 381 | int ret; |
Mark Brown | ac8d91c | 2012-02-23 19:31:04 +0000 | [diff] [blame] | 382 | int i, base, end; |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 383 | |
| 384 | rbtree_ctx = map->cache; |
| 385 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { |
| 386 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); |
Mark Brown | ac8d91c | 2012-02-23 19:31:04 +0000 | [diff] [blame] | 387 | |
| 388 | if (rbnode->base_reg < min) |
| 389 | continue; |
| 390 | if (rbnode->base_reg > max) |
| 391 | break; |
| 392 | if (rbnode->base_reg + rbnode->blklen < min) |
| 393 | continue; |
| 394 | |
Mark Brown | f9353e7 | 2012-03-05 23:28:49 +0000 | [diff] [blame] | 395 | if (min > rbnode->base_reg) |
Mark Brown | ac8d91c | 2012-02-23 19:31:04 +0000 | [diff] [blame] | 396 | base = min - rbnode->base_reg; |
| 397 | else |
| 398 | base = 0; |
| 399 | |
| 400 | if (max < rbnode->base_reg + rbnode->blklen) |
| 401 | end = rbnode->base_reg + rbnode->blklen - max; |
| 402 | else |
| 403 | end = rbnode->blklen; |
| 404 | |
| 405 | for (i = base; i < end; i++) { |
Stephen Warren | f01ee60 | 2012-04-09 13:40:24 -0600 | [diff] [blame] | 406 | regtmp = rbnode->base_reg + (i * map->reg_stride); |
Mark Brown | 879082c | 2013-02-21 18:03:13 +0000 | [diff] [blame] | 407 | val = regcache_rbtree_get_register(map, rbnode, i); |
Mark Brown | b03622a | 2011-10-09 12:54:25 +0100 | [diff] [blame] | 408 | |
| 409 | /* Is this the hardware default? If so skip. */ |
Lars-Peter Clausen | 4b4e9e4 | 2012-03-23 11:04:57 +0100 | [diff] [blame] | 410 | ret = regcache_lookup_reg(map, regtmp); |
Mark Brown | 994f5db | 2012-03-05 23:31:39 +0000 | [diff] [blame] | 411 | if (ret >= 0 && val == map->reg_defaults[ret].def) |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 412 | continue; |
Mark Brown | b03622a | 2011-10-09 12:54:25 +0100 | [diff] [blame] | 413 | |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 414 | map->cache_bypass = 1; |
Dimitris Papastamos | 13753a9 | 2011-09-29 14:36:25 +0100 | [diff] [blame] | 415 | ret = _regmap_write(map, regtmp, val); |
Dimitris Papastamos | 28644c80 | 2011-09-19 14:34:02 +0100 | [diff] [blame] | 416 | map->cache_bypass = 0; |
| 417 | if (ret) |
| 418 | return ret; |
| 419 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", |
| 420 | regtmp, val); |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | struct regcache_ops regcache_rbtree_ops = { |
| 428 | .type = REGCACHE_RBTREE, |
| 429 | .name = "rbtree", |
| 430 | .init = regcache_rbtree_init, |
| 431 | .exit = regcache_rbtree_exit, |
| 432 | .read = regcache_rbtree_read, |
| 433 | .write = regcache_rbtree_write, |
| 434 | .sync = regcache_rbtree_sync |
| 435 | }; |