Merge "msm: kgsl: Add return value for perfcounter enable function"
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 9b45432..8a92d75 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -224,32 +224,47 @@
* performance counters will remain active as long as the device is alive.
*/
-static void adreno_perfcounter_init(struct kgsl_device *device)
+static int adreno_perfcounter_init(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (adreno_dev->gpudev->perfcounter_init)
- adreno_dev->gpudev->perfcounter_init(adreno_dev);
+ return adreno_dev->gpudev->perfcounter_init(adreno_dev);
+ return 0;
};
/**
+ * adreno_perfcounter_close: Release counters initialized by
+ * adreno_perfcounter_init
+ * @device: device to realease counters for
+ *
+ */
+static void adreno_perfcounter_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ if (adreno_dev->gpudev->perfcounter_close)
+ return adreno_dev->gpudev->perfcounter_close(adreno_dev);
+}
+
+/**
* adreno_perfcounter_start: Enable performance counters
* @adreno_dev: Adreno device to configure
*
* Ensure all performance counters are enabled that are allocated. Since
* the device was most likely stopped, we can't trust that the counters
* are still valid so make it so.
+ * Returns 0 on success else error code
*/
-static void adreno_perfcounter_start(struct adreno_device *adreno_dev)
+static int adreno_perfcounter_start(struct adreno_device *adreno_dev)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
unsigned int i, j;
+ int ret = 0;
- /* perfcounter start does nothing on a2xx */
- if (adreno_is_a2xx(adreno_dev))
- return;
+ if (NULL == counters)
+ return 0;
/* group id iter */
for (i = 0; i < counters->group_count; i++) {
@@ -264,11 +279,15 @@
continue;
if (adreno_dev->gpudev->perfcounter_enable)
- adreno_dev->gpudev->perfcounter_enable(
+ ret = adreno_dev->gpudev->perfcounter_enable(
adreno_dev, i, j,
group->regs[j].countable);
+ if (ret)
+ goto done;
}
}
+done:
+ return ret;
}
/**
@@ -290,8 +309,7 @@
unsigned int i, j;
int ret = 0;
- /* perfcounter get/put/query/read not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
+ if (NULL == counters)
return -EINVAL;
/* sanity check for later */
@@ -366,8 +384,7 @@
if (name == NULL)
return -EINVAL;
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
+ if (NULL == counters)
return -EINVAL;
for (i = 0; i < counters->group_count; ++i) {
@@ -392,8 +409,7 @@
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
+ if (NULL == counters)
return NULL;
if (groupid >= counters->group_count)
@@ -423,8 +439,7 @@
*max_counters = 0;
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
+ if (NULL == counters)
return -EINVAL;
if (groupid >= counters->group_count)
@@ -473,13 +488,13 @@
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
unsigned int i, empty = -1;
+ int ret = 0;
/* always clear return variables */
if (offset)
*offset = 0;
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
+ if (NULL == counters)
return -EINVAL;
if (groupid >= counters->group_count)
@@ -514,6 +529,11 @@
if (empty == -1)
return -EBUSY;
+ /* enable the new counter */
+ ret = adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
+ countable);
+ if (ret)
+ return ret;
/* initialize the new counter */
group->regs[empty].countable = countable;
@@ -526,14 +546,10 @@
group->regs[empty].usercount = 1;
}
- /* enable the new counter */
- adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
- countable);
-
if (offset)
*offset = group->regs[empty].offset;
- return 0;
+ return ret;
}
@@ -555,8 +571,7 @@
unsigned int i;
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
+ if (NULL == counters)
return -EINVAL;
if (groupid >= counters->group_count)
@@ -1638,6 +1653,7 @@
adreno_dispatcher_close(adreno_dev);
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+ adreno_perfcounter_close(device);
kgsl_device_platform_remove(device);
clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
@@ -1649,6 +1665,7 @@
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
+ int ret;
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/*
@@ -1710,7 +1727,9 @@
for (i = 6; i < FT_DETECT_REGS_COUNT; i++)
ft_detect_regs[i] = 0;
- adreno_perfcounter_init(device);
+ ret = adreno_perfcounter_init(device);
+ if (ret)
+ goto done;
/* Power down the device */
kgsl_pwrctrl_disable(device);
@@ -1720,8 +1739,8 @@
adreno_a3xx_pwron_fixup_init(adreno_dev);
set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
-
- return 0;
+done:
+ return ret;
}
static int adreno_start(struct kgsl_device *device)
@@ -1790,7 +1809,9 @@
if (status)
goto error_irq_off;
- adreno_perfcounter_start(adreno_dev);
+ status = adreno_perfcounter_start(adreno_dev);
+ if (status)
+ goto error_rb_stop;
/* Start the dispatcher */
adreno_dispatcher_start(adreno_dev);
@@ -1799,6 +1820,8 @@
return 0;
+error_rb_stop:
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
error_irq_off:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 6fe3027..89f81be 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -345,10 +345,11 @@
unsigned int (*irq_pending)(struct adreno_device *);
void * (*snapshot)(struct adreno_device *, void *, int *, int);
int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
- void (*perfcounter_init)(struct adreno_device *);
+ int (*perfcounter_init)(struct adreno_device *);
+ void (*perfcounter_close)(struct adreno_device *);
void (*start)(struct adreno_device *);
unsigned int (*busy_cycles)(struct adreno_device *);
- void (*perfcounter_enable)(struct adreno_device *, unsigned int group,
+ int (*perfcounter_enable)(struct adreno_device *, unsigned int group,
unsigned int counter, unsigned int countable);
uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 2cbc877..ada7151 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -3131,41 +3131,41 @@
}
-static void a3xx_perfcounter_enable_pwr(struct kgsl_device *device,
- unsigned int countable)
+static int a3xx_perfcounter_enable_pwr(struct kgsl_device *device,
+ unsigned int counter)
{
unsigned int in, out;
- if (countable > 1)
- return;
+ if (counter > 1)
+ return -EINVAL;
kgsl_regread(device, A3XX_RBBM_RBBM_CTL, &in);
- if (countable == 0)
+ if (counter == 0)
out = in | RBBM_RBBM_CTL_RESET_PWR_CTR0;
else
out = in | RBBM_RBBM_CTL_RESET_PWR_CTR1;
kgsl_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
- if (countable == 0)
+ if (counter == 0)
out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR0;
else
out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
kgsl_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
- return;
+ return 0;
}
-static void a3xx_perfcounter_enable_vbif(struct kgsl_device *device,
+static int a3xx_perfcounter_enable_vbif(struct kgsl_device *device,
unsigned int counter,
unsigned int countable)
{
unsigned int in, out, bit, sel;
if (counter > 1 || countable > 0x7f)
- return;
+ return -EINVAL;
kgsl_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
kgsl_regread(device, A3XX_VBIF_PERF_CNT_SEL, &sel);
@@ -3187,20 +3187,21 @@
kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+ return 0;
}
-static void a3xx_perfcounter_enable_vbif_pwr(struct kgsl_device *device,
- unsigned int countable)
+static int a3xx_perfcounter_enable_vbif_pwr(struct kgsl_device *device,
+ unsigned int counter)
{
unsigned int in, out, bit;
- if (countable > 2)
- return;
+ if (counter > 2)
+ return -EINVAL;
kgsl_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
- if (countable == 0)
+ if (counter == 0)
bit = VBIF_PERF_PWR_CNT_0;
- else if (countable == 1)
+ else if (counter == 1)
bit = VBIF_PERF_PWR_CNT_1;
else
bit = VBIF_PERF_PWR_CNT_2;
@@ -3211,6 +3212,7 @@
kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+ return 0;
}
/*
@@ -3221,9 +3223,10 @@
* @countable - Desired countable
*
* Physically set up a counter within a group with the desired countable
+ * Return 0 on success else error code
*/
-static void a3xx_perfcounter_enable(struct adreno_device *adreno_dev,
+static int a3xx_perfcounter_enable(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter, unsigned int countable)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -3232,18 +3235,20 @@
/* Special cases */
if (group == KGSL_PERFCOUNTER_GROUP_PWR)
- return a3xx_perfcounter_enable_pwr(device, countable);
+ return a3xx_perfcounter_enable_pwr(device, counter);
else if (group == KGSL_PERFCOUNTER_GROUP_VBIF)
- return a3xx_perfcounter_enable_vbif(device, counter, countable);
+ return a3xx_perfcounter_enable_vbif(device, counter,
+ countable);
else if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR)
- return a3xx_perfcounter_enable_vbif_pwr(device, countable);
+ return a3xx_perfcounter_enable_vbif_pwr(device, counter);
if (group >= adreno_dev->gpudev->perfcounters->group_count)
- return;
+ return -EINVAL;
- if (counter >=
- adreno_dev->gpudev->perfcounters->groups[group].reg_count)
- return;
+ if ((0 == adreno_dev->gpudev->perfcounters->groups[group].reg_count) ||
+ (counter >=
+ adreno_dev->gpudev->perfcounters->groups[group].reg_count))
+ return -EINVAL;
reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
@@ -3251,12 +3256,15 @@
kgsl_regwrite(device, reg->select, countable);
if (reg->load_bit < 32) {
- val = 1 << reg->load_bit;
+ kgsl_regread(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, &val);
+ val |= (1 << reg->load_bit);
kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val);
} else {
- val = 1 << (reg->load_bit - 32);
+ kgsl_regread(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, &val);
+ val |= (1 << (reg->load_bit - 32));
kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val);
}
+ return 0;
}
static uint64_t a3xx_perfcounter_read_pwr(struct adreno_device *adreno_dev,
@@ -3367,8 +3375,9 @@
if (group >= adreno_dev->gpudev->perfcounters->group_count)
return 0;
- if (counter >=
- adreno_dev->gpudev->perfcounters->groups[group].reg_count)
+ if ((0 == adreno_dev->gpudev->perfcounters->groups[group].reg_count) ||
+ (counter >=
+ adreno_dev->gpudev->perfcounters->groups[group].reg_count))
return 0;
reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
@@ -3803,8 +3812,30 @@
ARRAY_SIZE(a3xx_perfcounter_groups),
};
-static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
+/*
+ * a3xx_perfcounter_close() - Return counters that were initialized in
+ * a3xx_perfcounter_init
+ * @adreno_dev: The device for which counters were initialized
+ */
+static void a3xx_perfcounter_close(struct adreno_device *adreno_dev)
{
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_FULL_ALU_INSTRUCTIONS,
+ PERFCOUNTER_FLAG_KERNEL);
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_CFLOW_INSTRUCTIONS,
+ PERFCOUNTER_FLAG_KERNEL);
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP0_ICL1_MISSES,
+ PERFCOUNTER_FLAG_KERNEL);
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_ALU_ACTIVE_CYCLES,
+ PERFCOUNTER_FLAG_KERNEL);
+}
+
+static int a3xx_perfcounter_init(struct adreno_device *adreno_dev)
+{
+ int ret;
/* SP[3] counter is broken on a330 so disable it if a330 device */
if (adreno_is_a330(adreno_dev))
a3xx_perfcounters_sp[3].countable = KGSL_PERFCOUNTER_BROKEN;
@@ -3819,29 +3850,47 @@
* we will use this to augment our hang detection
*/
if (adreno_dev->fast_hang_detect) {
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_SP,
SP_ALU_ACTIVE_CYCLES, &ft_detect_regs[6],
PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
ft_detect_regs[7] = ft_detect_regs[6] + 1;
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_SP,
SP0_ICL1_MISSES, &ft_detect_regs[8],
PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
ft_detect_regs[9] = ft_detect_regs[8] + 1;
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_SP,
SP_FS_CFLOW_INSTRUCTIONS, &ft_detect_regs[10],
PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
ft_detect_regs[11] = ft_detect_regs[10] + 1;
}
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
SP_FS_FULL_ALU_INSTRUCTIONS, NULL, PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
/* Reserve and start countable 1 in the PWR perfcounter group */
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
+ ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
NULL, PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
/* Default performance counter profiling to false */
adreno_dev->profile.enabled = false;
+ return ret;
+
+err:
+ a3xx_perfcounter_close(adreno_dev);
+ return ret;
}
/**
@@ -4318,6 +4367,7 @@
.ctxt_draw_workaround = NULL,
.rb_init = a3xx_rb_init,
.perfcounter_init = a3xx_perfcounter_init,
+ .perfcounter_close = a3xx_perfcounter_close,
.irq_control = a3xx_irq_control,
.irq_handler = a3xx_irq_handler,
.irq_pending = a3xx_irq_pending,