Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Hardware spinlock framework |
| 3 | * |
| 4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com |
| 5 | * |
| 6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License version 2 as published |
| 10 | * by the Free Software Foundation. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | */ |
| 17 | |
| 18 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 19 | |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/err.h> |
| 25 | #include <linux/jiffies.h> |
| 26 | #include <linux/radix-tree.h> |
| 27 | #include <linux/hwspinlock.h> |
| 28 | #include <linux/pm_runtime.h> |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 29 | #include <linux/mutex.h> |
Suman Anna | fb7737e | 2015-03-04 20:01:14 -0600 | [diff] [blame] | 30 | #include <linux/of.h> |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 31 | |
| 32 | #include "hwspinlock_internal.h" |
| 33 | |
| 34 | /* radix tree tags */ |
| 35 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ |
| 36 | |
| 37 | /* |
| 38 | * A radix tree is used to maintain the available hwspinlock instances. |
| 39 | * The tree associates hwspinlock pointers with their integer key id, |
| 40 | * and provides easy-to-use API which makes the hwspinlock core code simple |
| 41 | * and easy to read. |
| 42 | * |
| 43 | * Radix trees are quick on lookups, and reasonably efficient in terms of |
| 44 | * storage, especially with high density usages such as this framework |
| 45 | * requires (a continuous range of integer keys, beginning with zero, is |
| 46 | * used as the ID's of the hwspinlock instances). |
| 47 | * |
| 48 | * The radix tree API supports tagging items in the tree, which this |
| 49 | * framework uses to mark unused hwspinlock instances (see the |
| 50 | * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the |
| 51 | * tree, looking for an unused hwspinlock instance, is now reduced to a |
| 52 | * single radix tree API call. |
| 53 | */ |
| 54 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); |
| 55 | |
| 56 | /* |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 57 | * Synchronization of access to the tree is achieved using this mutex, |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 58 | * as the radix-tree API requires that users provide all synchronisation. |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 59 | * A mutex is needed because we're using non-atomic radix tree allocations. |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 60 | */ |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 61 | static DEFINE_MUTEX(hwspinlock_tree_lock); |
| 62 | |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 63 | |
| 64 | /** |
| 65 | * __hwspin_trylock() - attempt to lock a specific hwspinlock |
| 66 | * @hwlock: an hwspinlock which we want to trylock |
| 67 | * @mode: controls whether local interrupts are disabled or not |
| 68 | * @flags: a pointer where the caller's interrupt state will be saved at (if |
| 69 | * requested) |
| 70 | * |
| 71 | * This function attempts to lock an hwspinlock, and will immediately |
| 72 | * fail if the hwspinlock is already taken. |
| 73 | * |
| 74 | * Upon a successful return from this function, preemption (and possibly |
| 75 | * interrupts) is disabled, so the caller must not sleep, and is advised to |
| 76 | * release the hwspinlock as soon as possible. This is required in order to |
| 77 | * minimize remote cores polling on the hardware interconnect. |
| 78 | * |
| 79 | * The user decides whether local interrupts are disabled or not, and if yes, |
| 80 | * whether he wants their previous state to be saved. It is up to the user |
| 81 | * to choose the appropriate @mode of operation, exactly the same way users |
| 82 | * should decide between spin_trylock, spin_trylock_irq and |
| 83 | * spin_trylock_irqsave. |
| 84 | * |
| 85 | * Returns 0 if we successfully locked the hwspinlock or -EBUSY if |
| 86 | * the hwspinlock was already taken. |
| 87 | * This function will never sleep. |
| 88 | */ |
| 89 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
| 90 | { |
| 91 | int ret; |
| 92 | |
| 93 | BUG_ON(!hwlock); |
| 94 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); |
| 95 | |
| 96 | /* |
| 97 | * This spin_lock{_irq, _irqsave} serves three purposes: |
| 98 | * |
| 99 | * 1. Disable preemption, in order to minimize the period of time |
| 100 | * in which the hwspinlock is taken. This is important in order |
| 101 | * to minimize the possible polling on the hardware interconnect |
| 102 | * by a remote user of this lock. |
| 103 | * 2. Make the hwspinlock SMP-safe (so we can take it from |
| 104 | * additional contexts on the local host). |
| 105 | * 3. Ensure that in_atomic/might_sleep checks catch potential |
| 106 | * problems with hwspinlock usage (e.g. scheduler checks like |
| 107 | * 'scheduling while atomic' etc.) |
| 108 | */ |
| 109 | if (mode == HWLOCK_IRQSTATE) |
| 110 | ret = spin_trylock_irqsave(&hwlock->lock, *flags); |
| 111 | else if (mode == HWLOCK_IRQ) |
| 112 | ret = spin_trylock_irq(&hwlock->lock); |
| 113 | else |
| 114 | ret = spin_trylock(&hwlock->lock); |
| 115 | |
| 116 | /* is lock already taken by another context on the local cpu ? */ |
| 117 | if (!ret) |
| 118 | return -EBUSY; |
| 119 | |
| 120 | /* try to take the hwspinlock device */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 121 | ret = hwlock->bank->ops->trylock(hwlock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 122 | |
| 123 | /* if hwlock is already taken, undo spin_trylock_* and exit */ |
| 124 | if (!ret) { |
| 125 | if (mode == HWLOCK_IRQSTATE) |
| 126 | spin_unlock_irqrestore(&hwlock->lock, *flags); |
| 127 | else if (mode == HWLOCK_IRQ) |
| 128 | spin_unlock_irq(&hwlock->lock); |
| 129 | else |
| 130 | spin_unlock(&hwlock->lock); |
| 131 | |
| 132 | return -EBUSY; |
| 133 | } |
| 134 | |
| 135 | /* |
| 136 | * We can be sure the other core's memory operations |
| 137 | * are observable to us only _after_ we successfully take |
| 138 | * the hwspinlock, and we must make sure that subsequent memory |
| 139 | * operations (both reads and writes) will not be reordered before |
| 140 | * we actually took the hwspinlock. |
| 141 | * |
| 142 | * Note: the implicit memory barrier of the spinlock above is too |
| 143 | * early, so we need this additional explicit memory barrier. |
| 144 | */ |
| 145 | mb(); |
| 146 | |
| 147 | return 0; |
| 148 | } |
| 149 | EXPORT_SYMBOL_GPL(__hwspin_trylock); |
| 150 | |
| 151 | /** |
| 152 | * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit |
| 153 | * @hwlock: the hwspinlock to be locked |
| 154 | * @timeout: timeout value in msecs |
| 155 | * @mode: mode which controls whether local interrupts are disabled or not |
| 156 | * @flags: a pointer to where the caller's interrupt state will be saved at (if |
| 157 | * requested) |
| 158 | * |
| 159 | * This function locks the given @hwlock. If the @hwlock |
| 160 | * is already taken, the function will busy loop waiting for it to |
| 161 | * be released, but give up after @timeout msecs have elapsed. |
| 162 | * |
| 163 | * Upon a successful return from this function, preemption is disabled |
| 164 | * (and possibly local interrupts, too), so the caller must not sleep, |
| 165 | * and is advised to release the hwspinlock as soon as possible. |
| 166 | * This is required in order to minimize remote cores polling on the |
| 167 | * hardware interconnect. |
| 168 | * |
| 169 | * The user decides whether local interrupts are disabled or not, and if yes, |
| 170 | * whether he wants their previous state to be saved. It is up to the user |
| 171 | * to choose the appropriate @mode of operation, exactly the same way users |
| 172 | * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. |
| 173 | * |
| 174 | * Returns 0 when the @hwlock was successfully taken, and an appropriate |
| 175 | * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still |
| 176 | * busy after @timeout msecs). The function will never sleep. |
| 177 | */ |
| 178 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, |
| 179 | int mode, unsigned long *flags) |
| 180 | { |
| 181 | int ret; |
| 182 | unsigned long expire; |
| 183 | |
| 184 | expire = msecs_to_jiffies(to) + jiffies; |
| 185 | |
| 186 | for (;;) { |
| 187 | /* Try to take the hwspinlock */ |
| 188 | ret = __hwspin_trylock(hwlock, mode, flags); |
| 189 | if (ret != -EBUSY) |
| 190 | break; |
| 191 | |
| 192 | /* |
| 193 | * The lock is already taken, let's check if the user wants |
| 194 | * us to try again |
| 195 | */ |
| 196 | if (time_is_before_eq_jiffies(expire)) |
| 197 | return -ETIMEDOUT; |
| 198 | |
| 199 | /* |
| 200 | * Allow platform-specific relax handlers to prevent |
| 201 | * hogging the interconnect (no sleeping, though) |
| 202 | */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 203 | if (hwlock->bank->ops->relax) |
| 204 | hwlock->bank->ops->relax(hwlock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | return ret; |
| 208 | } |
| 209 | EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); |
| 210 | |
| 211 | /** |
| 212 | * __hwspin_unlock() - unlock a specific hwspinlock |
| 213 | * @hwlock: a previously-acquired hwspinlock which we want to unlock |
| 214 | * @mode: controls whether local interrupts needs to be restored or not |
| 215 | * @flags: previous caller's interrupt state to restore (if requested) |
| 216 | * |
| 217 | * This function will unlock a specific hwspinlock, enable preemption and |
| 218 | * (possibly) enable interrupts or restore their previous state. |
| 219 | * @hwlock must be already locked before calling this function: it is a bug |
| 220 | * to call unlock on a @hwlock that is already unlocked. |
| 221 | * |
| 222 | * The user decides whether local interrupts should be enabled or not, and |
| 223 | * if yes, whether he wants their previous state to be restored. It is up |
| 224 | * to the user to choose the appropriate @mode of operation, exactly the |
| 225 | * same way users decide between spin_unlock, spin_unlock_irq and |
| 226 | * spin_unlock_irqrestore. |
| 227 | * |
| 228 | * The function will never sleep. |
| 229 | */ |
| 230 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
| 231 | { |
| 232 | BUG_ON(!hwlock); |
| 233 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); |
| 234 | |
| 235 | /* |
| 236 | * We must make sure that memory operations (both reads and writes), |
| 237 | * done before unlocking the hwspinlock, will not be reordered |
| 238 | * after the lock is released. |
| 239 | * |
| 240 | * That's the purpose of this explicit memory barrier. |
| 241 | * |
| 242 | * Note: the memory barrier induced by the spin_unlock below is too |
| 243 | * late; the other core is going to access memory soon after it will |
| 244 | * take the hwspinlock, and by then we want to be sure our memory |
| 245 | * operations are already observable. |
| 246 | */ |
| 247 | mb(); |
| 248 | |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 249 | hwlock->bank->ops->unlock(hwlock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 250 | |
| 251 | /* Undo the spin_trylock{_irq, _irqsave} called while locking */ |
| 252 | if (mode == HWLOCK_IRQSTATE) |
| 253 | spin_unlock_irqrestore(&hwlock->lock, *flags); |
| 254 | else if (mode == HWLOCK_IRQ) |
| 255 | spin_unlock_irq(&hwlock->lock); |
| 256 | else |
| 257 | spin_unlock(&hwlock->lock); |
| 258 | } |
| 259 | EXPORT_SYMBOL_GPL(__hwspin_unlock); |
| 260 | |
Suman Anna | fb7737e | 2015-03-04 20:01:14 -0600 | [diff] [blame] | 261 | /** |
| 262 | * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id |
| 263 | * @bank: the hwspinlock device bank |
| 264 | * @hwlock_spec: hwlock specifier as found in the device tree |
| 265 | * |
| 266 | * This is a simple translation function, suitable for hwspinlock platform |
| 267 | * drivers that only has a lock specifier length of 1. |
| 268 | * |
| 269 | * Returns a relative index of the lock within a specified bank on success, |
| 270 | * or -EINVAL on invalid specifier cell count. |
| 271 | */ |
| 272 | static inline int |
| 273 | of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec) |
| 274 | { |
| 275 | if (WARN_ON(hwlock_spec->args_count != 1)) |
| 276 | return -EINVAL; |
| 277 | |
| 278 | return hwlock_spec->args[0]; |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock |
| 283 | * @np: device node from which to request the specific hwlock |
| 284 | * @index: index of the hwlock in the list of values |
| 285 | * |
| 286 | * This function provides a means for DT users of the hwspinlock module to |
| 287 | * get the global lock id of a specific hwspinlock using the phandle of the |
| 288 | * hwspinlock device, so that it can be requested using the normal |
| 289 | * hwspin_lock_request_specific() API. |
| 290 | * |
| 291 | * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock |
| 292 | * device is not yet registered, -EINVAL on invalid args specifier value or an |
| 293 | * appropriate error as returned from the OF parsing of the DT client node. |
| 294 | */ |
| 295 | int of_hwspin_lock_get_id(struct device_node *np, int index) |
| 296 | { |
| 297 | struct of_phandle_args args; |
| 298 | struct hwspinlock *hwlock; |
| 299 | struct radix_tree_iter iter; |
| 300 | void **slot; |
| 301 | int id; |
| 302 | int ret; |
| 303 | |
| 304 | ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index, |
| 305 | &args); |
| 306 | if (ret) |
| 307 | return ret; |
| 308 | |
| 309 | /* Find the hwspinlock device: we need its base_id */ |
| 310 | ret = -EPROBE_DEFER; |
| 311 | rcu_read_lock(); |
| 312 | radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) { |
| 313 | hwlock = radix_tree_deref_slot(slot); |
| 314 | if (unlikely(!hwlock)) |
| 315 | continue; |
Matthew Wilcox | b76ba4a | 2016-05-20 17:03:01 -0700 | [diff] [blame] | 316 | if (radix_tree_deref_retry(hwlock)) { |
Matthew Wilcox | c6400ba | 2016-02-02 16:57:55 -0800 | [diff] [blame] | 317 | slot = radix_tree_iter_retry(&iter); |
| 318 | continue; |
| 319 | } |
Suman Anna | fb7737e | 2015-03-04 20:01:14 -0600 | [diff] [blame] | 320 | |
| 321 | if (hwlock->bank->dev->of_node == args.np) { |
| 322 | ret = 0; |
| 323 | break; |
| 324 | } |
| 325 | } |
| 326 | rcu_read_unlock(); |
| 327 | if (ret < 0) |
| 328 | goto out; |
| 329 | |
| 330 | id = of_hwspin_lock_simple_xlate(&args); |
| 331 | if (id < 0 || id >= hwlock->bank->num_locks) { |
| 332 | ret = -EINVAL; |
| 333 | goto out; |
| 334 | } |
| 335 | id += hwlock->bank->base_id; |
| 336 | |
| 337 | out: |
| 338 | of_node_put(args.np); |
| 339 | return ret ? ret : id; |
| 340 | } |
| 341 | EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id); |
| 342 | |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 343 | static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 344 | { |
| 345 | struct hwspinlock *tmp; |
| 346 | int ret; |
| 347 | |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 348 | mutex_lock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 349 | |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 350 | ret = radix_tree_insert(&hwspinlock_tree, id, hwlock); |
| 351 | if (ret) { |
| 352 | if (ret == -EEXIST) |
| 353 | pr_err("hwspinlock id %d already exists!\n", id); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 354 | goto out; |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 355 | } |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 356 | |
| 357 | /* mark this hwspinlock as available */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 358 | tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 359 | |
| 360 | /* self-sanity check which should never fail */ |
| 361 | WARN_ON(tmp != hwlock); |
| 362 | |
| 363 | out: |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 364 | mutex_unlock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 365 | return 0; |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 366 | } |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 367 | |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 368 | static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id) |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 369 | { |
| 370 | struct hwspinlock *hwlock = NULL; |
| 371 | int ret; |
| 372 | |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 373 | mutex_lock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 374 | |
| 375 | /* make sure the hwspinlock is not in use (tag is set) */ |
| 376 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
| 377 | if (ret == 0) { |
| 378 | pr_err("hwspinlock %d still in use (or not present)\n", id); |
| 379 | goto out; |
| 380 | } |
| 381 | |
| 382 | hwlock = radix_tree_delete(&hwspinlock_tree, id); |
| 383 | if (!hwlock) { |
| 384 | pr_err("failed to delete hwspinlock %d\n", id); |
| 385 | goto out; |
| 386 | } |
| 387 | |
| 388 | out: |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 389 | mutex_unlock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 390 | return hwlock; |
| 391 | } |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 392 | |
| 393 | /** |
| 394 | * hwspin_lock_register() - register a new hw spinlock device |
| 395 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
| 396 | * @dev: the backing device |
| 397 | * @ops: hwspinlock handlers for this device |
| 398 | * @base_id: id of the first hardware spinlock in this bank |
| 399 | * @num_locks: number of hwspinlocks provided by this device |
| 400 | * |
| 401 | * This function should be called from the underlying platform-specific |
| 402 | * implementation, to register a new hwspinlock device instance. |
| 403 | * |
| 404 | * Should be called from a process context (might sleep) |
| 405 | * |
| 406 | * Returns 0 on success, or an appropriate error code on failure |
| 407 | */ |
| 408 | int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, |
| 409 | const struct hwspinlock_ops *ops, int base_id, int num_locks) |
| 410 | { |
| 411 | struct hwspinlock *hwlock; |
| 412 | int ret = 0, i; |
| 413 | |
| 414 | if (!bank || !ops || !dev || !num_locks || !ops->trylock || |
| 415 | !ops->unlock) { |
| 416 | pr_err("invalid parameters\n"); |
| 417 | return -EINVAL; |
| 418 | } |
| 419 | |
| 420 | bank->dev = dev; |
| 421 | bank->ops = ops; |
| 422 | bank->base_id = base_id; |
| 423 | bank->num_locks = num_locks; |
| 424 | |
| 425 | for (i = 0; i < num_locks; i++) { |
| 426 | hwlock = &bank->lock[i]; |
| 427 | |
| 428 | spin_lock_init(&hwlock->lock); |
| 429 | hwlock->bank = bank; |
| 430 | |
Shinya Kuribayashi | 476a7eeb | 2012-07-07 13:37:42 +0300 | [diff] [blame] | 431 | ret = hwspin_lock_register_single(hwlock, base_id + i); |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 432 | if (ret) |
| 433 | goto reg_failed; |
| 434 | } |
| 435 | |
| 436 | return 0; |
| 437 | |
| 438 | reg_failed: |
| 439 | while (--i >= 0) |
Shinya Kuribayashi | 476a7eeb | 2012-07-07 13:37:42 +0300 | [diff] [blame] | 440 | hwspin_lock_unregister_single(base_id + i); |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 441 | return ret; |
| 442 | } |
| 443 | EXPORT_SYMBOL_GPL(hwspin_lock_register); |
| 444 | |
| 445 | /** |
| 446 | * hwspin_lock_unregister() - unregister an hw spinlock device |
| 447 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
| 448 | * |
| 449 | * This function should be called from the underlying platform-specific |
| 450 | * implementation, to unregister an existing (and unused) hwspinlock. |
| 451 | * |
| 452 | * Should be called from a process context (might sleep) |
| 453 | * |
| 454 | * Returns 0 on success, or an appropriate error code on failure |
| 455 | */ |
| 456 | int hwspin_lock_unregister(struct hwspinlock_device *bank) |
| 457 | { |
| 458 | struct hwspinlock *hwlock, *tmp; |
| 459 | int i; |
| 460 | |
| 461 | for (i = 0; i < bank->num_locks; i++) { |
| 462 | hwlock = &bank->lock[i]; |
| 463 | |
| 464 | tmp = hwspin_lock_unregister_single(bank->base_id + i); |
| 465 | if (!tmp) |
| 466 | return -EBUSY; |
| 467 | |
| 468 | /* self-sanity check that should never fail */ |
| 469 | WARN_ON(tmp != hwlock); |
| 470 | } |
| 471 | |
| 472 | return 0; |
| 473 | } |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 474 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
| 475 | |
| 476 | /** |
| 477 | * __hwspin_lock_request() - tag an hwspinlock as used and power it up |
| 478 | * |
| 479 | * This is an internal function that prepares an hwspinlock instance |
| 480 | * before it is given to the user. The function assumes that |
| 481 | * hwspinlock_tree_lock is taken. |
| 482 | * |
| 483 | * Returns 0 or positive to indicate success, and a negative value to |
| 484 | * indicate an error (with the appropriate error code) |
| 485 | */ |
| 486 | static int __hwspin_lock_request(struct hwspinlock *hwlock) |
| 487 | { |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 488 | struct device *dev = hwlock->bank->dev; |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 489 | struct hwspinlock *tmp; |
| 490 | int ret; |
| 491 | |
| 492 | /* prevent underlying implementation from being removed */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 493 | if (!try_module_get(dev->driver->owner)) { |
| 494 | dev_err(dev, "%s: can't get owner\n", __func__); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 495 | return -EINVAL; |
| 496 | } |
| 497 | |
| 498 | /* notify PM core that power is now needed */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 499 | ret = pm_runtime_get_sync(dev); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 500 | if (ret < 0) { |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 501 | dev_err(dev, "%s: can't power on device\n", __func__); |
Li Fei | c10b90d | 2013-04-05 21:20:36 +0800 | [diff] [blame] | 502 | pm_runtime_put_noidle(dev); |
| 503 | module_put(dev->driver->owner); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 504 | return ret; |
| 505 | } |
| 506 | |
| 507 | /* mark hwspinlock as used, should not fail */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 508 | tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 509 | HWSPINLOCK_UNUSED); |
| 510 | |
| 511 | /* self-sanity check that should never fail */ |
| 512 | WARN_ON(tmp != hwlock); |
| 513 | |
| 514 | return ret; |
| 515 | } |
| 516 | |
| 517 | /** |
| 518 | * hwspin_lock_get_id() - retrieve id number of a given hwspinlock |
| 519 | * @hwlock: a valid hwspinlock instance |
| 520 | * |
| 521 | * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. |
| 522 | */ |
| 523 | int hwspin_lock_get_id(struct hwspinlock *hwlock) |
| 524 | { |
| 525 | if (!hwlock) { |
| 526 | pr_err("invalid hwlock\n"); |
| 527 | return -EINVAL; |
| 528 | } |
| 529 | |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 530 | return hwlock_to_id(hwlock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 531 | } |
| 532 | EXPORT_SYMBOL_GPL(hwspin_lock_get_id); |
| 533 | |
| 534 | /** |
| 535 | * hwspin_lock_request() - request an hwspinlock |
| 536 | * |
| 537 | * This function should be called by users of the hwspinlock device, |
| 538 | * in order to dynamically assign them an unused hwspinlock. |
| 539 | * Usually the user of this lock will then have to communicate the lock's id |
| 540 | * to the remote core before it can be used for synchronization (to get the |
| 541 | * id of a given hwlock, use hwspin_lock_get_id()). |
| 542 | * |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 543 | * Should be called from a process context (might sleep) |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 544 | * |
| 545 | * Returns the address of the assigned hwspinlock, or NULL on error |
| 546 | */ |
| 547 | struct hwspinlock *hwspin_lock_request(void) |
| 548 | { |
| 549 | struct hwspinlock *hwlock; |
| 550 | int ret; |
| 551 | |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 552 | mutex_lock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 553 | |
| 554 | /* look for an unused lock */ |
| 555 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, |
| 556 | 0, 1, HWSPINLOCK_UNUSED); |
| 557 | if (ret == 0) { |
| 558 | pr_warn("a free hwspinlock is not available\n"); |
| 559 | hwlock = NULL; |
| 560 | goto out; |
| 561 | } |
| 562 | |
| 563 | /* sanity check that should never fail */ |
| 564 | WARN_ON(ret > 1); |
| 565 | |
| 566 | /* mark as used and power up */ |
| 567 | ret = __hwspin_lock_request(hwlock); |
| 568 | if (ret < 0) |
| 569 | hwlock = NULL; |
| 570 | |
| 571 | out: |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 572 | mutex_unlock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 573 | return hwlock; |
| 574 | } |
| 575 | EXPORT_SYMBOL_GPL(hwspin_lock_request); |
| 576 | |
| 577 | /** |
| 578 | * hwspin_lock_request_specific() - request for a specific hwspinlock |
| 579 | * @id: index of the specific hwspinlock that is requested |
| 580 | * |
| 581 | * This function should be called by users of the hwspinlock module, |
| 582 | * in order to assign them a specific hwspinlock. |
| 583 | * Usually early board code will be calling this function in order to |
| 584 | * reserve specific hwspinlock ids for predefined purposes. |
| 585 | * |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 586 | * Should be called from a process context (might sleep) |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 587 | * |
| 588 | * Returns the address of the assigned hwspinlock, or NULL on error |
| 589 | */ |
| 590 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id) |
| 591 | { |
| 592 | struct hwspinlock *hwlock; |
| 593 | int ret; |
| 594 | |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 595 | mutex_lock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 596 | |
| 597 | /* make sure this hwspinlock exists */ |
| 598 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); |
| 599 | if (!hwlock) { |
| 600 | pr_warn("hwspinlock %u does not exist\n", id); |
| 601 | goto out; |
| 602 | } |
| 603 | |
| 604 | /* sanity check (this shouldn't happen) */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 605 | WARN_ON(hwlock_to_id(hwlock) != id); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 606 | |
| 607 | /* make sure this hwspinlock is unused */ |
| 608 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
| 609 | if (ret == 0) { |
| 610 | pr_warn("hwspinlock %u is already in use\n", id); |
| 611 | hwlock = NULL; |
| 612 | goto out; |
| 613 | } |
| 614 | |
| 615 | /* mark as used and power up */ |
| 616 | ret = __hwspin_lock_request(hwlock); |
| 617 | if (ret < 0) |
| 618 | hwlock = NULL; |
| 619 | |
| 620 | out: |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 621 | mutex_unlock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 622 | return hwlock; |
| 623 | } |
| 624 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); |
| 625 | |
| 626 | /** |
| 627 | * hwspin_lock_free() - free a specific hwspinlock |
| 628 | * @hwlock: the specific hwspinlock to free |
| 629 | * |
| 630 | * This function mark @hwlock as free again. |
| 631 | * Should only be called with an @hwlock that was retrieved from |
| 632 | * an earlier call to omap_hwspin_lock_request{_specific}. |
| 633 | * |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 634 | * Should be called from a process context (might sleep) |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 635 | * |
| 636 | * Returns 0 on success, or an appropriate error code on failure |
| 637 | */ |
| 638 | int hwspin_lock_free(struct hwspinlock *hwlock) |
| 639 | { |
Wei Yongjun | e352614 | 2012-09-10 12:52:02 +0800 | [diff] [blame] | 640 | struct device *dev; |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 641 | struct hwspinlock *tmp; |
| 642 | int ret; |
| 643 | |
| 644 | if (!hwlock) { |
| 645 | pr_err("invalid hwlock\n"); |
| 646 | return -EINVAL; |
| 647 | } |
| 648 | |
Wei Yongjun | e352614 | 2012-09-10 12:52:02 +0800 | [diff] [blame] | 649 | dev = hwlock->bank->dev; |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 650 | mutex_lock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 651 | |
| 652 | /* make sure the hwspinlock is used */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 653 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 654 | HWSPINLOCK_UNUSED); |
| 655 | if (ret == 1) { |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 656 | dev_err(dev, "%s: hwlock is already free\n", __func__); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 657 | dump_stack(); |
| 658 | ret = -EINVAL; |
| 659 | goto out; |
| 660 | } |
| 661 | |
| 662 | /* notify the underlying device that power is not needed */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 663 | ret = pm_runtime_put(dev); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 664 | if (ret < 0) |
| 665 | goto out; |
| 666 | |
| 667 | /* mark this hwspinlock as available */ |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 668 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 669 | HWSPINLOCK_UNUSED); |
| 670 | |
| 671 | /* sanity check (this shouldn't happen) */ |
| 672 | WARN_ON(tmp != hwlock); |
| 673 | |
Ohad Ben-Cohen | 300bab9 | 2011-09-06 15:39:21 +0300 | [diff] [blame] | 674 | module_put(dev->driver->owner); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 675 | |
| 676 | out: |
Juan Gutierrez | 93b465c | 2011-09-06 09:30:16 +0300 | [diff] [blame] | 677 | mutex_unlock(&hwspinlock_tree_lock); |
Ohad Ben-Cohen | bd9a4c7 | 2011-02-17 09:52:03 -0800 | [diff] [blame] | 678 | return ret; |
| 679 | } |
| 680 | EXPORT_SYMBOL_GPL(hwspin_lock_free); |
| 681 | |
| 682 | MODULE_LICENSE("GPL v2"); |
| 683 | MODULE_DESCRIPTION("Hardware spinlock interface"); |
| 684 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); |