pmem: ensure SMI correctness
Due to hardware constraints, the SMI should not be mapped
in the kernel space when not in use. Unmap the SMI from
the kernel address space when the last allocation is freed.
Vote for bus bandwidth as well while the SMI is allocated.
CRs-Fixed: 281723
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 8f42c25..4333cfb 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -2708,11 +2708,54 @@
.dev = { .platform_data = &android_pmem_audio_pdata },
};
+#define PMEM_BUS_WIDTH(_bw) \
+ { \
+ .vectors = &(struct msm_bus_vectors){ \
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_SMI, \
+ .ib = (_bw), \
+ .ab = 0, \
+ }, \
+ .num_paths = 1, \
+ }
+static struct msm_bus_paths pmem_smi_table[] = {
+ [0] = PMEM_BUS_WIDTH(0), /* Off */
+ [1] = PMEM_BUS_WIDTH(1), /* On */
+};
+
+static struct msm_bus_scale_pdata smi_client_pdata = {
+ .usecase = pmem_smi_table,
+ .num_usecases = ARRAY_SIZE(pmem_smi_table),
+ .name = "pmem_smi",
+};
+
+void pmem_request_smi_region(void *data)
+{
+ int bus_id = (int) data;
+
+ msm_bus_scale_client_update_request(bus_id, 1);
+}
+
+void pmem_release_smi_region(void *data)
+{
+ int bus_id = (int) data;
+
+ msm_bus_scale_client_update_request(bus_id, 0);
+}
+
+void *pmem_setup_smi_region(void)
+{
+ return (void *)msm_bus_scale_register_client(&smi_client_pdata);
+}
static struct android_pmem_platform_data android_pmem_smipool_pdata = {
.name = "pmem_smipool",
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
.cached = 0,
.memory_type = MEMTYPE_SMI,
+ .request_region = pmem_request_smi_region,
+ .release_region = pmem_release_smi_region,
+ .setup_region = pmem_setup_smi_region,
+ .map_on_demand = 1,
};
static struct platform_device android_pmem_smipool_device = {
.name = "android_pmem",
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
index 5063551..df36ad5 100644
--- a/drivers/misc/pmem.c
+++ b/drivers/misc/pmem.c
@@ -216,9 +216,33 @@
long (*ioctl)(struct file *, unsigned int, unsigned long);
int (*release)(struct inode *, struct file *);
+ /* reference count of allocations */
+ atomic_t allocation_cnt;
+ /*
+ * request function for a region when the allocation count goes
+ * from 0 -> 1
+ */
+ void (*mem_request)(void *);
+ /*
+ * release function for a region when the allocation count goes
+ * from 1 -> 0
+ */
+ void (*mem_release)(void *);
+ /*
+ * private data for the request/release callback
+ */
+ void *region_data;
+ /*
+ * map and unmap as needed
+ */
+ int map_on_demand;
};
#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
+static void ioremap_pmem(int id);
+static void pmem_put_region(int id);
+static int pmem_get_region(int id);
+
static struct pmem_info pmem[PMEM_MAX_DEVICES];
static int id_count;
@@ -522,6 +546,66 @@
.default_attrs = pmem_system_attrs,
};
+static int pmem_allocate_from_id(const int id, const unsigned long size,
+ const unsigned int align)
+{
+ int ret;
+ ret = pmem_get_region(id);
+
+ if (ret)
+ return -1;
+
+ ret = pmem[id].allocate(id, size, align);
+
+ if (ret < 0)
+ pmem_put_region(id);
+
+ return ret;
+}
+
+static int pmem_free_from_id(const int id, const int index)
+{
+ pmem_put_region(id);
+ return pmem[id].free(id, index);
+}
+
+static int pmem_get_region(int id)
+{
+ /* Must be called with arena mutex locked */
+ atomic_inc(&pmem[id].allocation_cnt);
+ if (!pmem[id].vbase) {
+ DLOG("PMEMDEBUG: mapping for %s", pmem[id].name);
+ if (pmem[id].mem_request)
+ pmem[id].mem_request(pmem[id].region_data);
+ ioremap_pmem(id);
+ }
+
+ if (pmem[id].vbase) {
+ return 0;
+ } else {
+ if (pmem[id].mem_release)
+ pmem[id].mem_release(pmem[id].region_data);
+ atomic_dec(&pmem[id].allocation_cnt);
+ return 1;
+ }
+}
+
+static void pmem_put_region(int id)
+{
+ /* Must be called with arena mutex locked */
+ if (atomic_dec_and_test(&pmem[id].allocation_cnt)) {
+ DLOG("PMEMDEBUG: unmapping for %s", pmem[id].name);
+ BUG_ON(!pmem[id].vbase);
+ if (pmem[id].map_on_demand) {
+ iounmap(pmem[id].vbase);
+ pmem[id].vbase = NULL;
+ if (pmem[id].mem_release)
+ pmem[id].mem_release(pmem[id].region_data);
+
+ }
+ }
+}
+
static int get_id(struct file *file)
{
return MINOR(file->f_dentry->d_inode->i_rdev);
@@ -840,7 +924,7 @@
/* if it is not a connected file and it has an allocation, free it */
if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
mutex_lock(&pmem[id].arena_mutex);
- ret = pmem[id].free(id, data->index);
+ ret = pmem_free_from_id(id, data->index);
mutex_unlock(&pmem[id].arena_mutex);
}
@@ -1516,7 +1600,7 @@
static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pmem_data *data = file->private_data;
- int index;
+ int index = -1;
unsigned long vma_size = vma->vm_end - vma->vm_start;
int ret = 0, id = get_id(file);
#if PMEM_DEBUG_MSGS
@@ -1553,7 +1637,7 @@
/* if file->private_data == unalloced, alloc*/
if (data->index == -1) {
mutex_lock(&pmem[id].arena_mutex);
- index = pmem[id].allocate(id,
+ index = pmem_allocate_from_id(id,
vma->vm_end - vma->vm_start,
SZ_4K);
mutex_unlock(&pmem[id].arena_mutex);
@@ -2353,7 +2437,7 @@
}
mutex_lock(&pmem[id].arena_mutex);
- data->index = pmem[id].allocate(id,
+ data->index = pmem_allocate_from_id(id,
arg,
SZ_4K);
mutex_unlock(&pmem[id].arena_mutex);
@@ -2400,9 +2484,9 @@
}
mutex_lock(&pmem[id].arena_mutex);
- data->index = pmem[id].allocate(id,
- alloc.size,
- alloc.align);
+ data->index = pmem_allocate_from_id(id,
+ alloc.size,
+ alloc.align);
mutex_unlock(&pmem[id].arena_mutex);
ret = data->index == -1 ? -ENOMEM :
data->index;
@@ -2436,6 +2520,8 @@
static void ioremap_pmem(int id)
{
+ DLOG("PMEMDEBUG: ioremaping for %s\n", pmem[id].name);
+
if (pmem[id].cached)
pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size);
#ifdef ioremap_ext_buffered
@@ -2653,24 +2739,24 @@
pmem[id].base = allocate_contiguous_memory_nomap(pmem[id].size,
pmem[id].memory_type, PAGE_SIZE);
- if (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM) {
- ioremap_pmem(id);
- if (pmem[id].vbase == 0) {
- pr_err("pmem: ioremap failed for device %s\n",
- pmem[id].name);
- goto error_cant_remap;
- }
- }
-
pr_info("allocating %lu bytes at %p (%lx physical) for %s\n",
pmem[id].size, pmem[id].vbase, pmem[id].base, pmem[id].name);
pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
+ atomic_set(&pmem[id].allocation_cnt, 0);
+ pmem[id].map_on_demand = pdata->map_on_demand;
+
+ if (pdata->setup_region)
+ pmem[id].region_data = pdata->setup_region();
+
+ if (pdata->request_region)
+ pmem[id].mem_request = pdata->request_region;
+
+ if (pdata->release_region)
+ pmem[id].mem_release = pdata->release_region;
return 0;
-error_cant_remap:
- misc_deregister(&pmem[id].dev);
err_cant_register_device:
out_put_kobj:
kobject_put(&pmem[id].kobj);
diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h
index 91f8f1b..c029196 100644
--- a/include/linux/android_pmem.h
+++ b/include/linux/android_pmem.h
@@ -153,6 +153,24 @@
unsigned buffered;
/* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
unsigned memory_type;
+ /*
+ * function to be called when the number of allocations goes from
+ * 0 -> 1
+ */
+ void (*request_region)(void *);
+ /*
+ * function to be called when the number of allocations goes from
+ * 1 -> 0
+ */
+ void (*release_region)(void *);
+ /*
+ * function to be called upon pmem registration
+ */
+ void *(*setup_region)(void);
+ /*
+ * indicates that this region should be mapped/unmaped as needed
+ */
+ int map_on_demand;
};
int pmem_setup(struct android_pmem_platform_data *pdata,