msm: kgsl: Refcount mmu clock calls and allow multiple disable events
Allow each mmu clock enable call to enable the clock and let the
clock driver refcounting mechanism deal with switching the clock
off at the right time. This also means that each enable call should
have a disable call or a disable event registered, so allow for
multiple disable events to be registered. This simplifies the mmu
clock handling mechanism which is better for multiple ringbuffer
implementation. With multiple ringbuffers each global timestamp is
tied to a specific ringbuffer and with a single clock disable event
it is hard to figure out when the clock should be turned off.
CRs-fixed: 597193 592485
Change-Id: I924c545140bd19d95890a5fdfd0382c511abe973
Signed-off-by: Shubhraprakash Das <sadas@codeaurora.org>
Signed-off-by: Tarun Karra <tkarra@codeaurora.org>
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index eba60ea..0812b0a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1205,9 +1205,11 @@
* after the command has been retired
*/
if (result)
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ kgsl_mmu_disable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
else
- kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
+ KGSL_IOMMU_CONTEXT_USER);
done:
kgsl_context_put(context);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 8f65705..7a885a4 100755
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -461,7 +461,7 @@
* Disables iommu clocks
* Return - void
*/
-static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
{
struct kgsl_iommu *iommu = mmu->priv;
struct msm_iommu_drvdata *iommu_drvdata;
@@ -470,8 +470,15 @@
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
- if (!iommu_unit->dev[j].clk_enabled)
+ if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
+ atomic_dec(&iommu_unit->dev[j].clk_enable_count);
+ BUG_ON(
+ atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
+ /*
+ * the clock calls have a refcount so call them on every
+ * enable/disable call
+ */
iommu_drvdata = dev_get_drvdata(
iommu_unit->dev[j].dev->parent);
if (iommu_drvdata->aclk)
@@ -479,7 +486,6 @@
if (iommu_drvdata->clk)
clk_disable_unprepare(iommu_drvdata->clk);
clk_disable_unprepare(iommu_drvdata->pclk);
- iommu_unit->dev[j].clk_enabled = false;
}
}
}
@@ -500,32 +506,14 @@
unsigned int id, unsigned int ts,
u32 type)
{
- struct kgsl_mmu *mmu = data;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_disable_clk_param *param = data;
- if (!iommu->clk_event_queued) {
- if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
- KGSL_DRV_ERR(device,
- "IOMMU disable clock event being cancelled, "
- "iommu_last_cmd_ts: %x, retired ts: %x\n",
- iommu->iommu_last_cmd_ts, ts);
- return;
- }
-
- if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
- kgsl_iommu_disable_clk(mmu);
- iommu->clk_event_queued = false;
- } else {
- /* add new event to fire when ts is reached, this can happen
- * if we queued an event and someone requested the clocks to
- * be disbaled on a later timestamp */
- if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
- kgsl_iommu_clk_disable_event, mmu, mmu)) {
- KGSL_DRV_ERR(device,
- "Failed to add IOMMU disable clk event\n");
- iommu->clk_event_queued = false;
- }
- }
+ if ((0 <= timestamp_cmp(ts, param->ts)) ||
+ (KGSL_EVENT_CANCELLED == type))
+ kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
+ else
+ /* something went wrong with the event handling mechanism */
+ BUG_ON(1);
}
/*
@@ -535,6 +523,8 @@
* @ts_valid - Indicates whether ts parameter is valid, if this parameter
* is false then it means that the calling function wants to disable the
* IOMMU clocks immediately without waiting for any timestamp
+ * @ctx_id: Context id of the IOMMU context for which clocks are to be
+ * turned off
*
* Creates an event to disable the IOMMU clocks on timestamp and if event
* already exists then updates the timestamp of disabling the IOMMU clocks
@@ -543,28 +533,25 @@
* Return - void
*/
static void
-kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
- bool ts_valid)
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, int ctx_id)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_disable_clk_param *param;
- if (iommu->clk_event_queued) {
- if (ts_valid && (0 <
- timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
- iommu->iommu_last_cmd_ts = ts;
- } else {
- if (ts_valid) {
- iommu->iommu_last_cmd_ts = ts;
- iommu->clk_event_queued = true;
- if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
- ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
- KGSL_DRV_ERR(mmu->device,
- "Failed to add IOMMU disable clk event\n");
- iommu->clk_event_queued = false;
- }
- } else {
- kgsl_iommu_disable_clk(mmu);
- }
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*param));
+ return;
+ }
+ param->mmu = mmu;
+ param->ctx_id = ctx_id;
+ param->ts = ts;
+
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, param, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ kfree(param);
}
}
@@ -587,8 +574,7 @@
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
- if (iommu_unit->dev[j].clk_enabled ||
- ctx_id != iommu_unit->dev[j].ctx_id)
+ if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
iommu_drvdata =
dev_get_drvdata(iommu_unit->dev[j].dev->parent);
@@ -614,12 +600,25 @@
goto done;
}
}
- iommu_unit->dev[j].clk_enabled = true;
+ atomic_inc(&iommu_unit->dev[j].clk_enable_count);
}
}
done:
- if (ret)
- kgsl_iommu_disable_clk(mmu);
+ if (ret) {
+ struct kgsl_iommu_unit *iommu_unit;
+ if (iommu->unit_count == i)
+ i--;
+ iommu_unit = &iommu->iommu_units[i];
+ do {
+ for (j--; j >= 0; j--)
+ kgsl_iommu_disable_clk(mmu, ctx_id);
+ i--;
+ if (i >= 0) {
+ iommu_unit = &iommu->iommu_units[i];
+ j = iommu_unit->dev_count;
+ }
+ } while (i >= 0);
+ }
return ret;
}
@@ -848,6 +847,9 @@
ret = -EINVAL;
goto done;
}
+ atomic_set(
+ &(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
+ 0);
iommu_unit->dev[iommu_unit->dev_count].dev =
msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -1674,6 +1676,7 @@
}
status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
if (status) {
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
KGSL_CORE_ERR("clk enable failed\n");
goto done;
}
@@ -1728,14 +1731,11 @@
KGSL_IOMMU_SETSTATE_NOP_OFFSET,
cp_nop_packet(1), sizeof(unsigned int));
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
- if (status) {
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
- kgsl_detach_pagetable_iommu_domain(mmu);
- }
return status;
}
@@ -1858,6 +1858,7 @@
iommu_unit,
iommu_unit->dev[j].ctx_id,
FSR, 0);
+ kgsl_iommu_disable_clk(mmu, j);
_iommu_unlock(iommu);
iommu_unit->dev[j].fault = 0;
}
@@ -1870,7 +1871,6 @@
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
/*
* stop device mmu
*
@@ -1886,9 +1886,7 @@
kgsl_iommu_pagefault_resume(mmu);
}
/* switch off MMU clocks and cancel any events it has queued */
- iommu->clk_event_queued = false;
kgsl_cancel_events(mmu->device, mmu);
- kgsl_iommu_disable_clk(mmu);
}
static int kgsl_iommu_close(struct kgsl_mmu *mmu)
@@ -1938,10 +1936,10 @@
return 0;
/* Return the current pt base by reading IOMMU pt_base register */
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]),
- KGSL_IOMMU_CONTEXT_USER,
- TTBR0);
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ pt_base = KGSL_IOMMU_GET_CTX_REG(iommu,
+ (&iommu->iommu_units[0]),
+ KGSL_IOMMU_CONTEXT_USER, TTBR0);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
@@ -1969,7 +1967,6 @@
phys_addr_t pt_val;
ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
-
if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
return ret;
@@ -2056,7 +2053,7 @@
_iommu_unlock(iommu);
/* Disable smmu clock */
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return ret;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 7dca40e..3878107 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,6 +143,7 @@
* are on, else the clocks are off
* fault: Flag when set indicates that this iommu device has caused a page
* fault
+ * @clk_enable_count: The ref count of clock enable calls
*/
struct kgsl_iommu_device {
struct device *dev;
@@ -152,6 +153,7 @@
bool clk_enabled;
struct kgsl_device *kgsldev;
int fault;
+ atomic_t clk_enable_count;
};
/*
@@ -182,10 +184,6 @@
* iommu contexts owned by graphics cores
* @unit_count: Number of IOMMU units that are available for this
* instance of the IOMMU driver
- * @iommu_last_cmd_ts: The timestamp of last command submitted that
- * aceeses iommu registers
- * @clk_event_queued: Indicates whether an event to disable clocks
- * is already queued or not
* @device: Pointer to kgsl device
* @ctx_offset: The context offset to be added to base address when
* accessing IOMMU registers
@@ -201,8 +199,6 @@
struct kgsl_iommu {
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
unsigned int unit_count;
- unsigned int iommu_last_cmd_ts;
- bool clk_event_queued;
struct kgsl_device *device;
unsigned int ctx_offset;
struct kgsl_iommu_register_list *iommu_reg_list;
@@ -222,4 +218,18 @@
struct kgsl_iommu *iommu;
};
+/*
+ * struct kgsl_iommu_disable_clk_param - Parameter struct for disble clk event
+ * @mmu: The mmu pointer
+ * @rb_level: the rb level in which the timestamp of the event belongs to
+ * @ctx_id: The IOMMU context whose clock is to be turned off
+ * @ts: Timestamp on which clock is to be disabled
+ */
+struct kgsl_iommu_disable_clk_param {
+ struct kgsl_mmu *mmu;
+ int rb_level;
+ int ctx_id;
+ unsigned int ts;
+};
+
#endif
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 8bc9962..5c977d7 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -144,11 +144,12 @@
void (*mmu_pagefault_resume)
(struct kgsl_mmu *mmu);
void (*mmu_disable_clk_on_ts)
- (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
+ (struct kgsl_mmu *mmu,
+ uint32_t ts, int ctx_id);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
void (*mmu_disable_clk)
- (struct kgsl_mmu *mmu);
+ (struct kgsl_mmu *mmu, int ctx_id);
phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -326,17 +327,18 @@
return 0;
}
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
- mmu->mmu_ops->mmu_disable_clk(mmu);
+ mmu->mmu_ops->mmu_disable_clk(mmu, ctx_id);
}
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
- unsigned int ts, bool ts_valid)
+ unsigned int ts,
+ int ctx_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
- mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ctx_id);
}
static inline unsigned int kgsl_mmu_get_int_mask(void)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 96ff1b8..c00e978 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1430,8 +1430,6 @@
break;
}
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
-
return 0;
}