ARM: OMAP2+: clockdomain: convert existing atomic usecounts into spinlock-protected shorts/ints
The atomic usecounts seem to be confusing, and are no longer needed
since the operations that they are attached to really should take
place under lock. Replace the atomic counters with simple integers,
protected by the enclosing powerdomain spinlock.
Signed-off-by: Paul Walmsley <paul@pwsan.com>
Cc: Kevin Hilman <khilman@deeprootsystems.com>
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index ab19475..2da3b5e 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -210,7 +210,8 @@
return ret;
}
- if (atomic_inc_return(&cd->wkdep_usecount) == 1) {
+ cd->wkdep_usecount++;
+ if (cd->wkdep_usecount == 1) {
pr_debug("clockdomain: hardware will wake up %s when %s wakes up\n",
clkdm1->name, clkdm2->name);
@@ -252,7 +253,8 @@
return ret;
}
- if (atomic_dec_return(&cd->wkdep_usecount) == 0) {
+ cd->wkdep_usecount--;
+ if (cd->wkdep_usecount == 0) {
pr_debug("clockdomain: hardware will no longer wake up %s after %s wakes up\n",
clkdm1->name, clkdm2->name);
@@ -296,7 +298,8 @@
return ret;
}
- if (atomic_inc_return(&cd->sleepdep_usecount) == 1) {
+ cd->sleepdep_usecount++;
+ if (cd->sleepdep_usecount == 1) {
pr_debug("clockdomain: will prevent %s from sleeping if %s is active\n",
clkdm1->name, clkdm2->name);
@@ -340,7 +343,8 @@
return ret;
}
- if (atomic_dec_return(&cd->sleepdep_usecount) == 0) {
+ cd->sleepdep_usecount--;
+ if (cd->sleepdep_usecount == 0) {
pr_debug("clockdomain: will no longer prevent %s from sleeping if %s is active\n",
clkdm1->name, clkdm2->name);
@@ -567,7 +571,21 @@
*/
int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_add_wkdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_add_wkdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -582,7 +600,21 @@
*/
int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_del_wkdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_del_wkdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -620,7 +652,7 @@
return ret;
}
- /* XXX It's faster to return the atomic wkdep_usecount */
+ /* XXX It's faster to return the wkdep_usecount */
return arch_clkdm->clkdm_read_wkdep(clkdm1, clkdm2);
}
@@ -659,7 +691,21 @@
*/
int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_add_sleepdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_add_sleepdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -676,7 +722,21 @@
*/
int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
- return _clkdm_del_sleepdep(clkdm1, clkdm2);
+ struct clkdm_dep *cd;
+ int ret;
+
+ if (!clkdm1 || !clkdm2)
+ return -EINVAL;
+
+ cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
+ if (IS_ERR(cd))
+ return PTR_ERR(cd);
+
+ pwrdm_lock(cd->clkdm->pwrdm.ptr);
+ ret = _clkdm_del_sleepdep(clkdm1, clkdm2);
+ pwrdm_unlock(cd->clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -716,7 +776,7 @@
return ret;
}
- /* XXX It's faster to return the atomic sleepdep_usecount */
+ /* XXX It's faster to return the sleepdep_usecount */
return arch_clkdm->clkdm_read_sleepdep(clkdm1, clkdm2);
}
@@ -1063,7 +1123,8 @@
* should be called for every clock instance or hwmod that is
* enabled, so the clkdm can be force woken up.
*/
- if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
+ clkdm->usecount++;
+ if (clkdm->usecount > 1 && autodeps) {
pwrdm_unlock(clkdm->pwrdm.ptr);
return 0;
}
@@ -1125,17 +1186,17 @@
pwrdm_lock(clkdm->pwrdm.ptr);
/* corner case: disabling unused clocks */
- if ((__clk_get_enable_count(clk) == 0) &&
- (atomic_read(&clkdm->usecount) == 0))
+ if ((__clk_get_enable_count(clk) == 0) && clkdm->usecount == 0)
goto ccd_exit;
- if (atomic_read(&clkdm->usecount) == 0) {
+ if (clkdm->usecount == 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
WARN_ON(1); /* underflow */
return -ERANGE;
}
- if (atomic_dec_return(&clkdm->usecount) > 0) {
+ clkdm->usecount--;
+ if (clkdm->usecount > 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
return 0;
}
@@ -1213,13 +1274,14 @@
pwrdm_lock(clkdm->pwrdm.ptr);
- if (atomic_read(&clkdm->usecount) == 0) {
+ if (clkdm->usecount == 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
WARN_ON(1); /* underflow */
return -ERANGE;
}
- if (atomic_dec_return(&clkdm->usecount) > 0) {
+ clkdm->usecount--;
+ if (clkdm->usecount > 0) {
pwrdm_unlock(clkdm->pwrdm.ptr);
return 0;
}