msm: ocmem: Add support for low power clients
Add support for low power clients to map and unmap OCMEM buffers.
These operations trigger RDM transfers using the BR/DM core which
enables low power clients to migrate segments between main memory and
OCMEM.
Also add the required APIs for supporting eviction and
restoration of buffers.
Change-Id: I6b641510f8ef27da2b347d13af468a1ed6664c9e
Signed-off-by: Naveen Ramaraj <nramaraj@codeaurora.org>
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 59e285b..4c020e3 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -339,7 +339,7 @@
obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
endif
obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o ocmem_notifier.o
-obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o
+obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o
obj-$(CONFIG_ARCH_MSM7X27) += gpiomux-7x27.o gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index 927624d..415f8ed 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -22,7 +22,7 @@
/* Maximum number of slots in DM */
#define OCMEM_MAX_CHUNKS 32
-#define MIN_CHUNK_SIZE (SZ_1K/8)
+#define MIN_CHUNK_SIZE 128
struct ocmem_notifier;
@@ -61,7 +61,7 @@
/* IMEM Clients */
OCMEM_LP_AUDIO,
OCMEM_SENSORS,
- OCMEM_BLAST,
+ OCMEM_OTHER_OS,
OCMEM_CLIENT_MAX,
};
@@ -108,8 +108,13 @@
int ocmem_shrink(int client_id, struct ocmem_buf *buf,
unsigned long new_size);
-int ocmem_expand(int client_id, struct ocmem_buf *buf,
- unsigned long new_size);
+/* Transfer APIs */
+int ocmem_map(int client_id, struct ocmem_buf *buffer,
+ struct ocmem_map_list *list);
+
+
+int ocmem_unmap(int client_id, struct ocmem_buf *buffer,
+ struct ocmem_map_list *list);
/* Priority Enforcement APIs */
int ocmem_evict(int client_id);
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 64c9ffe..70b5a45 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -20,12 +20,13 @@
#include "ocmem.h"
#include <mach/msm_iomap.h>
#include <asm/io.h>
+#include <linux/platform_device.h>
#define OCMEM_PHYS_BASE 0xFEC00000
#define OCMEM_PHYS_SIZE 0x180000
-#define TO_OCMEM 0x1
-#define TO_DDR 0x2
+#define TO_OCMEM 0x0
+#define TO_DDR 0x1
struct ocmem_zone;
@@ -38,7 +39,7 @@
int owner;
int active_regions;
int max_regions;
- struct list_head region_list;
+ struct list_head req_list;
unsigned long z_start;
unsigned long z_end;
unsigned long z_head;
@@ -78,12 +79,24 @@
bool interleaved;
};
+struct ocmem_eviction_data {
+ struct completion completion;
+ struct list_head victim_list;
+ struct list_head req_list;
+ struct work_struct work;
+ int prio;
+ int pending;
+ bool passive;
+};
+
struct ocmem_req {
struct rw_semaphore rw_sem;
/* Chain in sched queue */
struct list_head sched_list;
/* Chain in zone list */
struct list_head zone_list;
+ /* Chain in eviction list */
+ struct list_head eviction_list;
int owner;
int prio;
uint32_t req_id;
@@ -100,6 +113,7 @@
unsigned long req_start;
unsigned long req_end;
unsigned long req_sz;
+ struct ocmem_eviction_data *edata;
};
struct ocmem_handle {
@@ -150,13 +164,20 @@
int ocmem_notifier_init(void);
int check_notifier(int);
+const char *get_name(int);
+int check_id(int);
int dispatch_notification(int, enum ocmem_notif_type, struct ocmem_buf *);
int ocmem_sched_init(void);
+int ocmem_rdm_init(struct platform_device *);
int process_allocate(int, struct ocmem_handle *, unsigned long, unsigned long,
unsigned long, bool, bool);
int process_free(int, struct ocmem_handle *);
-int process_xfer(int, struct ocmem_handle *, struct ocmem_map_list *,
- int direction);
+int process_xfer(int, struct ocmem_handle *, struct ocmem_map_list *, int);
+int process_evict(int);
+int process_restore(int);
+int process_shrink(int, struct ocmem_handle *, unsigned long);
+int ocmem_rdm_transfer(int, struct ocmem_map_list *,
+ unsigned long, int);
unsigned long process_quota(int);
#endif
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index a233080..b42ac8f 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -55,6 +55,7 @@
static struct ocmem_plat_data *ocmem_pdata;
#define CLIENT_NAME_MAX 10
+
/* Must be in sync with enum ocmem_client */
static const char *client_names[OCMEM_CLIENT_MAX] = {
"graphics",
@@ -64,7 +65,7 @@
"voice",
"lp_audio",
"sensors",
- "blast",
+ "other_os",
};
struct ocmem_quota_table {
@@ -85,7 +86,7 @@
{ "voice", OCMEM_VOICE, 0x0, 0x0, 0x0, 0 },
{ "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0},
{ "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0},
- { "blast", OCMEM_BLAST, 0x120000, 0x20000, 0x20000, 0},
+ { "other_os", OCMEM_OTHER_OS, 0x120000, 0x20000, 0x20000, 0},
{ "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0},
};
@@ -99,6 +100,18 @@
return -EINVAL;
}
+int check_id(int id)
+{
+ return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
+}
+
+const char *get_name(int id)
+{
+ if (!check_id(id))
+ return NULL;
+ return client_names[id];
+}
+
inline unsigned long phys_to_offset(unsigned long addr)
{
if (!ocmem_pdata)
@@ -455,7 +468,7 @@
zone->owner = part->id;
zone->active_regions = 0;
zone->max_regions = 0;
- INIT_LIST_HEAD(&zone->region_list);
+ INIT_LIST_HEAD(&zone->req_list);
zone->z_ops = z_ops;
if (part->p_tail) {
z_ops->allocate = allocate_tail;
@@ -519,6 +532,10 @@
writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
+
+ if (ocmem_rdm_init(pdev))
+ return -EBUSY;
+
dev_dbg(dev, "initialized successfully\n");
return 0;
}
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index bed13de..bb32fca 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -13,10 +13,8 @@
#include <linux/slab.h>
#include <mach/ocmem_priv.h>
-static inline int check_id(int id)
-{
- return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
-}
+static DEFINE_MUTEX(ocmem_eviction_lock);
+static DECLARE_BITMAP(evicted, OCMEM_CLIENT_MAX);
static struct ocmem_handle *generate_handle(void)
{
@@ -61,6 +59,24 @@
return 0;
}
+static int __ocmem_shrink(int id, struct ocmem_buf *buf, unsigned long len)
+{
+ int ret = 0;
+ struct ocmem_handle *handle = buffer_to_handle(buf);
+
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&handle->handle_mutex);
+ ret = process_shrink(id, handle, len);
+ mutex_unlock(&handle->handle_mutex);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static struct ocmem_buf *__ocmem_allocate_range(int id, unsigned long min,
unsigned long max, unsigned long step, bool block, bool wait)
{
@@ -218,6 +234,15 @@
return __ocmem_free(client_id, buffer);
}
+int ocmem_shrink(int client_id, struct ocmem_buf *buffer, unsigned long len)
+{
+ if (!buffer)
+ return -EINVAL;
+ if (len >= buffer->len)
+ return -EINVAL;
+ return __ocmem_shrink(client_id, buffer, len);
+}
+
int pre_validate_chunk_list(struct ocmem_map_list *list)
{
int i = 0;
@@ -236,8 +261,12 @@
for (i = 0; i < list->num_chunks; i++) {
if (!chunks[i].ddr_paddr ||
- chunks[i].size < MIN_CHUNK_SIZE)
+ chunks[i].size < MIN_CHUNK_SIZE ||
+ !IS_ALIGNED(chunks[i].size, MIN_CHUNK_SIZE)) {
+ pr_err("Invalid ocmem chunk at index %d (p: %lx, size %lx)\n",
+ i, chunks[i].ddr_paddr, chunks[i].size);
return -EINVAL;
+ }
}
return 0;
}
@@ -265,7 +294,7 @@
return -EINVAL;
}
- if (!pre_validate_chunk_list(list))
+ if (pre_validate_chunk_list(list) != 0)
return -EINVAL;
handle = buffer_to_handle(buffer);
@@ -303,14 +332,10 @@
return -EINVAL;
}
- if (!pre_validate_chunk_list(list))
+ if (pre_validate_chunk_list(list) != 0)
return -EINVAL;
handle = buffer_to_handle(buffer);
-
- if (!handle)
- return -EINVAL;
-
mutex_lock(&handle->handle_mutex);
ret = process_xfer(client_id, handle, list, TO_DDR);
mutex_unlock(&handle->handle_mutex);
@@ -325,3 +350,52 @@
}
return process_quota(client_id);
}
+
+/* Synchronous eviction/restore calls */
+/* Only a single eviction or restoration is allowed */
+/* Evictions/Restorations cannot be concurrent with other maps */
+int ocmem_evict(int client_id)
+{
+ int ret = 0;
+
+ if (!check_id(client_id)) {
+ pr_err("ocmem: Invalid client id: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ocmem_eviction_lock);
+ if (test_bit(client_id, evicted)) {
+ pr_err("ocmem: Previous eviction was not restored by %d\n",
+ client_id);
+ mutex_unlock(&ocmem_eviction_lock);
+ return -EINVAL;
+ }
+
+ ret = process_evict(client_id);
+ if (ret == 0)
+ set_bit(client_id, evicted);
+
+ mutex_unlock(&ocmem_eviction_lock);
+ return ret;
+}
+
+int ocmem_restore(int client_id)
+{
+ int ret = 0;
+
+ if (!check_id(client_id)) {
+ pr_err("ocmem: Invalid client id: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ocmem_eviction_lock);
+ if (!test_bit(client_id, evicted)) {
+ pr_err("ocmem: No previous eviction by %d\n", client_id);
+ mutex_unlock(&ocmem_eviction_lock);
+ return -EINVAL;
+ }
+ ret = process_restore(client_id);
+ clear_bit(client_id, evicted);
+ mutex_unlock(&ocmem_eviction_lock);
+ return ret;
+}
diff --git a/arch/arm/mach-msm/ocmem_notifier.c b/arch/arm/mach-msm/ocmem_notifier.c
index 4754f44..9fbcd73 100644
--- a/arch/arm/mach-msm/ocmem_notifier.c
+++ b/arch/arm/mach-msm/ocmem_notifier.c
@@ -24,11 +24,6 @@
unsigned listeners;
} notifiers[OCMEM_CLIENT_MAX];
-static int check_id(int id)
-{
- return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
-}
-
int check_notifier(int id)
{
int ret = 0;
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
new file mode 100644
index 0000000..6b93d04
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/genalloc.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <mach/ocmem_priv.h>
+
+#define RDM_MAX_ENTRIES 32
+#define RDM_MAX_CLIENTS 2
+
+/* Data Mover Parameters */
+#define DM_BLOCK_128 0x0
+#define DM_BLOCK_256 0x1
+#define DM_BR_ID_LPASS 0x0
+#define DM_BR_ID_GPS 0x1
+
+#define DM_INTR_CLR (0x8)
+#define DM_INTR_MASK (0xC)
+#define DM_GEN_STATUS (0x10)
+#define DM_STATUS (0x14)
+#define DM_CTRL (0x1000)
+#define DM_TBL_BASE (0x1010)
+#define DM_TBL_IDX(x) ((x) * 0x18)
+#define DM_TBL_n(x) (DM_TBL_BASE + (DM_TBL_IDX(x)))
+#define DM_TBL_n_offset(x) DM_TBL_n(x)
+#define DM_TBL_n_size(x) (DM_TBL_n(x)+0x4)
+#define DM_TBL_n_paddr(x) (DM_TBL_n(x)+0x8)
+#define DM_TBL_n_ctrl(x) (DM_TBL_n(x)+0x10)
+
+#define BR_CTRL (0x0)
+#define BR_CLIENT_BASE (0x4)
+#define BR_CLIENT_n_IDX(x) ((x) * 0x4)
+#define BR_CLIENT_n_ctrl(x) (BR_CLIENT_BASE + (BR_CLIENT_n_IDX(x)))
+#define BR_STATUS (0x14)
+/* 16 entries per client are supported */
+/* Use entries 0 - 15 for client0 */
+#define BR_CLIENT0_MASK (0x1000)
+/* Use entries 16- 31 for client1 */
+#define BR_CLIENT1_MASK (0x2010)
+
+#define BR_TBL_BASE (0x40)
+#define BR_TBL_IDX(x) ((x) * 0x18)
+#define BR_TBL_n(x) (BR_TBL_BASE + (BR_TBL_IDX(x)))
+#define BR_TBL_n_offset(x) BR_TBL_n(x)
+#define BR_TBL_n_size(x) (BR_TBL_n(x)+0x4)
+#define BR_TBL_n_paddr(x) (BR_TBL_n(x)+0x8)
+#define BR_TBL_n_ctrl(x) (BR_TBL_n(x)+0x10)
+
+/* Constants and Shifts */
+#define BR_TBL_ENTRY_ENABLE 0x1
+#define BR_TBL_START 0x0
+#define BR_TBL_END 0x8
+#define BR_RW_SHIFT 0x2
+
+#define DM_TBL_START 0x10
+#define DM_TBL_END 0x18
+#define DM_CLIENT_SHIFT 0x8
+#define DM_BR_ID_SHIFT 0x4
+#define DM_BR_BLK_SHIFT 0x1
+#define DM_DIR_SHIFT 0x0
+
+#define DM_DONE 0x1
+#define DM_INTR_ENABLE 0x0
+#define DM_INTR_DISABLE 0x1
+
+static void *br_base;
+static void *dm_base;
+
+static atomic_t dm_pending;
+static wait_queue_head_t dm_wq;
+/* Shadow tables for debug purposes */
+struct ocmem_br_table {
+ unsigned int offset;
+ unsigned int size;
+ unsigned int ddr_low;
+ unsigned int ddr_high;
+ unsigned int ctrl;
+} br_table[RDM_MAX_ENTRIES];
+
+/* DM Table replicates an entire BR table */
+/* Note: There are more than 1 BRs in the system */
+struct ocmem_dm_table {
+ unsigned int offset;
+ unsigned int size;
+ unsigned int ddr_low;
+ unsigned int ddr_high;
+ unsigned int ctrl;
+} dm_table[RDM_MAX_ENTRIES];
+
+/* Wrapper that will shadow these values later */
+static int ocmem_read(void *at)
+{
+ return readl_relaxed(at);
+}
+
+/* Wrapper that will shadow these values later */
+static int ocmem_write(unsigned long val, void *at)
+{
+ writel_relaxed(val, at);
+ return 0;
+}
+
+static inline int client_ctrl_id(int id)
+{
+ return (id == OCMEM_SENSORS) ? 1 : 0;
+}
+
+static inline int client_slot_start(int id)
+{
+
+ return client_ctrl_id(id) * 16;
+}
+
+static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
+{
+ atomic_set(&dm_pending, 0);
+ ocmem_write(DM_INTR_DISABLE, dm_base + DM_INTR_CLR);
+ wake_up_interruptible(&dm_wq);
+ return IRQ_HANDLED;
+}
+
+/* Lock during transfers */
+int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
+ unsigned long start, int direction)
+{
+ int num_chunks = clist->num_chunks;
+ int slot = client_slot_start(id);
+ int table_start = 0;
+ int table_end = 0;
+ int br_ctrl = 0;
+ int br_id = 0;
+ int dm_ctrl = 0;
+ int i = 0;
+ int j = 0;
+ int status = 0;
+
+ for (i = 0, j = slot; i < num_chunks; i++, j++) {
+
+ struct ocmem_chunk *chunk = &clist->chunks[i];
+ int sz = chunk->size;
+ int paddr = chunk->ddr_paddr;
+ int tbl_n_ctrl = 0;
+
+ tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE;
+ if (chunk->ro)
+ tbl_n_ctrl |= (1 << BR_RW_SHIFT);
+
+ /* Table Entry n of BR and DM */
+ ocmem_write(start, br_base + BR_TBL_n_offset(j));
+ ocmem_write(sz, br_base + BR_TBL_n_size(j));
+ ocmem_write(paddr, br_base + BR_TBL_n_paddr(j));
+ ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j));
+
+ ocmem_write(start, dm_base + DM_TBL_n_offset(j));
+ ocmem_write(sz, dm_base + DM_TBL_n_size(j));
+ ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j));
+ ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j));
+
+ start += sz;
+ }
+
+ br_id = client_ctrl_id(id);
+ table_start = slot;
+ table_end = slot + num_chunks - 1;
+ br_ctrl |= (table_start << BR_TBL_START);
+ br_ctrl |= (table_end << BR_TBL_END);
+
+ ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id)));
+ /* Enable BR */
+ ocmem_write(0x1, br_base + BR_CTRL);
+
+ /* Compute DM Control Value */
+ dm_ctrl |= (table_start << DM_TBL_START);
+ dm_ctrl |= (table_end << DM_TBL_END);
+
+ dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT);
+ dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
+ dm_ctrl |= (direction << DM_DIR_SHIFT);
+
+ status = ocmem_read(dm_base + DM_STATUS);
+ pr_debug("Transfer status before %x\n", status);
+ atomic_set(&dm_pending, 1);
+ /* Trigger DM */
+ ocmem_write(dm_ctrl, dm_base + DM_CTRL);
+ pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
+
+ wait_event_interruptible(dm_wq,
+ atomic_read(&dm_pending) == 0);
+
+ return 0;
+}
+
+int ocmem_rdm_init(struct platform_device *pdev)
+{
+
+ struct ocmem_plat_data *pdata = NULL;
+ int rc = 0;
+
+ pdata = platform_get_drvdata(pdev);
+
+ br_base = pdata->br_base;
+ dm_base = pdata->dm_base;
+
+ rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler,
+ IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata);
+
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request dm irq");
+ return -EINVAL;
+ }
+
+ init_waitqueue_head(&dm_wq);
+ /* enable dm interrupts */
+ ocmem_write(DM_INTR_ENABLE, dm_base + DM_INTR_MASK);
+ return 0;
+}
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index 10a267c..f6d066d 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -54,7 +54,7 @@
MIN_PRIO = 0x0,
NO_PRIO = MIN_PRIO,
PRIO_SENSORS = 0x1,
- PRIO_BLAST = 0x1,
+ PRIO_OTHER_OS = 0x1,
PRIO_LP_AUDIO = 0x1,
PRIO_HP_AUDIO = 0x2,
PRIO_VOICE = 0x3,
@@ -75,6 +75,21 @@
*/
#define SCHED_DELAY 10
+static struct list_head rdm_queue;
+static struct mutex rdm_mutex;
+static struct workqueue_struct *ocmem_rdm_wq;
+static struct workqueue_struct *ocmem_eviction_wq;
+
+static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
+
+struct ocmem_rdm_work {
+ int id;
+ struct ocmem_map_list *list;
+ struct ocmem_handle *handle;
+ int direction;
+ struct work_struct work;
+};
+
/* OCMEM Operational modes */
enum ocmem_client_modes {
OCMEM_PERFORMANCE = 1,
@@ -107,7 +122,7 @@
{OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
{OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
{OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
- {OCMEM_BLAST, PRIO_BLAST, OCMEM_LOW_POWER, OCMEM_SYSNOC},
+ {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
};
static struct rb_root sched_tree;
@@ -119,6 +134,8 @@
struct rb_node region_rb;
/* Hash map of requests */
struct idr region_idr;
+ /* Chain in eviction list */
+ struct list_head eviction_list;
unsigned long r_start;
unsigned long r_end;
unsigned long r_sz;
@@ -244,6 +261,7 @@
if (!p)
return NULL;
idr_init(&p->region_idr);
+ INIT_LIST_HEAD(&p->eviction_list);
p->r_start = p->r_end = p->r_sz = 0x0;
p->max_prio = NO_PRIO;
return p;
@@ -461,10 +479,14 @@
{
int rc = 0;
+ down_write(&req->rw_sem);
+
mutex_lock(&sched_mutex);
rc = __sched_map(req);
mutex_unlock(&sched_mutex);
+ up_write(&req->rw_sem);
+
if (rc == OP_FAIL)
return -EINVAL;
@@ -475,10 +497,14 @@
{
int rc = 0;
+ down_write(&req->rw_sem);
+
mutex_lock(&sched_mutex);
rc = __sched_unmap(req);
mutex_unlock(&sched_mutex);
+ up_write(&req->rw_sem);
+
if (rc == OP_FAIL)
return -EINVAL;
@@ -713,6 +739,104 @@
}
/* Must be called with sched_mutex held */
+static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
+{
+ int owner = req->owner;
+ int ret = 0;
+
+ struct ocmem_req *matched_req = NULL;
+ struct ocmem_region *matched_region = NULL;
+ struct ocmem_region *region = NULL;
+ unsigned long alloc_addr = 0x0;
+
+ struct ocmem_zone *zone = get_zone(owner);
+
+ BUG_ON(!zone);
+
+ /* The shrink should not be called for zero size */
+ BUG_ON(new_sz == 0);
+
+ matched_region = find_region_match(req->req_start, req->req_end);
+ matched_req = find_req_match(req->req_id, matched_region);
+
+ if (!matched_region || !matched_req)
+ goto invalid_op_error;
+ if (matched_req != req)
+ goto invalid_op_error;
+
+
+ ret = zone->z_ops->free(zone,
+ matched_req->req_start, matched_req->req_sz);
+
+ if (ret < 0) {
+ pr_err("Zone Allocation operation failed\n");
+ goto internal_error;
+ }
+
+ alloc_addr = zone->z_ops->allocate(zone, new_sz);
+
+ if (alloc_addr < 0) {
+ pr_err("Zone Allocation operation failed\n");
+ goto internal_error;
+ }
+
+ /* Detach the region from the interval tree */
+ /* This is to guarantee that the change in size
+ * causes the tree to be rebalanced if required */
+
+ detach_req(matched_region, req);
+ if (req_count(matched_region) == 0) {
+ remove_region(matched_region);
+ region = matched_region;
+ } else {
+ region = create_region();
+ if (!region) {
+ pr_err("ocmem: Unable to create region\n");
+ goto internal_error;
+ }
+ }
+ /* update the request */
+ req->req_start = alloc_addr;
+ req->req_sz = new_sz;
+ req->req_end = alloc_addr + req->req_sz;
+
+ if (req_count(region) == 0) {
+ remove_region(matched_region);
+ destroy_region(matched_region);
+ }
+
+ /* update request state */
+ SET_STATE(req, R_MUST_GROW);
+ SET_STATE(req, R_MUST_MAP);
+ req->op = SCHED_MAP;
+
+ /* attach the request to the region */
+ attach_req(region, req);
+ populate_region(region, req);
+ update_region_prio(region);
+
+ /* update the tree with new region */
+ if (insert_region(region)) {
+ pr_err("ocmem: Failed to insert the region\n");
+ zone->z_ops->free(zone, alloc_addr, new_sz);
+ detach_req(region, req);
+ update_region_prio(region);
+ /* req will be destroyed by the caller */
+ goto region_error;
+ }
+ return OP_COMPLETE;
+
+region_error:
+ destroy_region(region);
+internal_error:
+ pr_err("ocmem: shrink: Failed\n");
+ return OP_FAIL;
+invalid_op_error:
+ pr_err("ocmem: shrink: Failed to find matching region\n");
+ return OP_FAIL;
+}
+
+/* Must be called with sched_mutex held */
static int __sched_allocate(struct ocmem_req *req, bool can_block,
bool can_wait)
{
@@ -906,12 +1030,6 @@
return req;
}
-int process_xfer(int id, struct ocmem_handle *handle,
- struct ocmem_map_list *list, int direction)
-{
-
- return 0;
-}
unsigned long process_quota(int id)
{
@@ -989,6 +1107,35 @@
return 0;
}
+static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
+{
+
+ int rc = 0;
+ struct ocmem_buf *buffer = NULL;
+
+ down_write(&req->rw_sem);
+ buffer = req->buffer;
+
+ /* Take the scheduler mutex */
+ mutex_lock(&sched_mutex);
+ rc = __sched_shrink(req, shrink_size);
+ mutex_unlock(&sched_mutex);
+
+ if (rc == OP_FAIL)
+ goto err_op_fail;
+
+ else if (rc == OP_COMPLETE) {
+ buffer->addr = device_address(req->owner, req->req_start);
+ buffer->len = req->req_sz;
+ }
+
+ up_write(&req->rw_sem);
+ return 0;
+err_op_fail:
+ up_write(&req->rw_sem);
+ return -EINVAL;
+}
+
static void ocmem_sched_wk_func(struct work_struct *work);
DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
@@ -1076,6 +1223,250 @@
return 0;
}
+static void ocmem_rdm_worker(struct work_struct *work)
+{
+ int offset = 0;
+ int rc = 0;
+ int event;
+ struct ocmem_rdm_work *work_data = container_of(work,
+ struct ocmem_rdm_work, work);
+ int id = work_data->id;
+ struct ocmem_map_list *list = work_data->list;
+ int direction = work_data->direction;
+ struct ocmem_handle *handle = work_data->handle;
+ struct ocmem_req *req = handle_to_req(handle);
+ struct ocmem_buf *buffer = handle_to_buffer(handle);
+
+ down_write(&req->rw_sem);
+ offset = phys_to_offset(req->req_start);
+ rc = ocmem_rdm_transfer(id, list, offset, direction);
+ if (work_data->direction == TO_OCMEM)
+ event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
+ else
+ event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
+
+ up_write(&req->rw_sem);
+ kfree(work_data);
+ dispatch_notification(id, event, buffer);
+}
+
+int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
+ struct ocmem_map_list *list, int direction)
+{
+ struct ocmem_rdm_work *work_data = NULL;
+
+ down_write(&req->rw_sem);
+
+ work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
+ if (!work_data)
+ BUG();
+
+ work_data->handle = handle;
+ work_data->list = list;
+ work_data->id = req->owner;
+ work_data->direction = direction;
+ INIT_WORK(&work_data->work, ocmem_rdm_worker);
+ up_write(&req->rw_sem);
+ queue_work(ocmem_rdm_wq, &work_data->work);
+ return 0;
+}
+
+int process_xfer_out(int id, struct ocmem_handle *handle,
+ struct ocmem_map_list *list)
+{
+ struct ocmem_req *req = NULL;
+ int rc = 0;
+
+ req = handle_to_req(handle);
+
+ if (!req)
+ return -EINVAL;
+
+ if (!is_mapped(req)) {
+ pr_err("Buffer is not already mapped\n");
+ goto transfer_out_error;
+ }
+
+ rc = process_unmap(req, req->req_start, req->req_end);
+ if (rc < 0) {
+ pr_err("Unmapping the buffer failed\n");
+ goto transfer_out_error;
+ }
+
+ rc = queue_transfer(req, handle, list, TO_DDR);
+
+ if (rc < 0) {
+ pr_err("Failed to queue rdm transfer to DDR\n");
+ goto transfer_out_error;
+ }
+
+ return 0;
+
+transfer_out_error:
+ return -EINVAL;
+}
+
+int process_xfer_in(int id, struct ocmem_handle *handle,
+ struct ocmem_map_list *list)
+{
+ struct ocmem_req *req = NULL;
+ int rc = 0;
+
+ req = handle_to_req(handle);
+
+ if (!req)
+ return -EINVAL;
+
+ if (is_mapped(req)) {
+ pr_err("Buffer is already mapped\n");
+ goto transfer_in_error;
+ }
+
+ rc = process_map(req, req->req_start, req->req_end);
+ if (rc < 0) {
+ pr_err("Mapping the buffer failed\n");
+ goto transfer_in_error;
+ }
+
+ rc = queue_transfer(req, handle, list, TO_OCMEM);
+
+ if (rc < 0) {
+ pr_err("Failed to queue rdm transfer to OCMEM\n");
+ goto transfer_in_error;
+ }
+
+ return 0;
+transfer_in_error:
+ return -EINVAL;
+}
+
+int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
+{
+ struct ocmem_req *req = NULL;
+ struct ocmem_buf *buffer = NULL;
+ struct ocmem_eviction_data *edata = NULL;
+ int rc = 0;
+
+ if (is_blocked(id)) {
+ pr_err("Client %d cannot request free\n", id);
+ return -EINVAL;
+ }
+
+ req = handle_to_req(handle);
+ buffer = handle_to_buffer(handle);
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->req_start != core_address(id, buffer->addr)) {
+ pr_err("Invalid buffer handle passed for shrink\n");
+ return -EINVAL;
+ }
+
+ edata = req->edata;
+
+ if (is_tcm(req->owner))
+ do_unmap(req);
+
+ if (size == 0) {
+ pr_info("req %p being shrunk to zero\n", req);
+ rc = do_free(req);
+ if (rc < 0)
+ return -EINVAL;
+ } else {
+ rc = do_shrink(req, size);
+ if (rc < 0)
+ return -EINVAL;
+ }
+
+ edata->pending--;
+ if (edata->pending == 0) {
+ pr_debug("All regions evicted");
+ complete(&edata->completion);
+ }
+
+ return 0;
+}
+
+int process_xfer(int id, struct ocmem_handle *handle,
+ struct ocmem_map_list *list, int direction)
+{
+ int rc = 0;
+
+ if (is_tcm(id)) {
+ WARN(1, "Mapping operation is invalid for client\n");
+ return -EINVAL;
+ }
+
+ if (direction == TO_DDR)
+ rc = process_xfer_out(id, handle, list);
+ else if (direction == TO_OCMEM)
+ rc = process_xfer_in(id, handle, list);
+ return rc;
+}
+
+int ocmem_eviction_thread(struct work_struct *work)
+{
+ return 0;
+}
+
+int process_evict(int id)
+{
+ struct ocmem_eviction_data *edata = NULL;
+ int prio = ocmem_client_table[id].priority;
+ struct rb_node *rb_node = NULL;
+ struct ocmem_req *req = NULL;
+ struct ocmem_buf buffer;
+ int j = 0;
+
+ edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
+
+ INIT_LIST_HEAD(&edata->victim_list);
+ INIT_LIST_HEAD(&edata->req_list);
+ edata->prio = prio;
+ edata->pending = 0;
+ edata->passive = 1;
+ evictions[id] = edata;
+
+ mutex_lock(&sched_mutex);
+
+ for (rb_node = rb_first(&sched_tree); rb_node;
+ rb_node = rb_next(rb_node)) {
+ struct ocmem_region *tmp_region = NULL;
+ tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
+ if (tmp_region->max_prio < prio) {
+ for (j = id - 1; j > NO_PRIO; j--) {
+ req = find_req_match(j, tmp_region);
+ if (req) {
+ pr_info("adding %p to eviction list\n",
+ tmp_region);
+ list_add_tail(
+ &tmp_region->eviction_list,
+ &edata->victim_list);
+ list_add_tail(
+ &req->eviction_list,
+ &edata->req_list);
+ edata->pending++;
+ req->edata = edata;
+ buffer.addr = req->req_start;
+ buffer.len = 0x0;
+ dispatch_notification(req->owner,
+ OCMEM_ALLOC_SHRINK, &buffer);
+ }
+ }
+ } else {
+ pr_info("skipping %p from eviction\n", tmp_region);
+ }
+ }
+ mutex_unlock(&sched_mutex);
+ pr_debug("Waiting for all regions to be shrunk\n");
+ if (edata->pending > 0) {
+ init_completion(&edata->completion);
+ wait_for_completion(&edata->completion);
+ }
+ return 0;
+}
+
static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
{
int rc = 0;
@@ -1113,6 +1504,31 @@
return -EINVAL;
}
+int process_restore(int id)
+{
+ struct ocmem_req *req = NULL;
+ struct ocmem_req *next = NULL;
+ struct ocmem_eviction_data *edata = evictions[id];
+
+ if (!edata)
+ return 0;
+
+ list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
+ {
+ if (req) {
+ pr_debug("ocmem: Fetched evicted request %p\n",
+ req);
+ list_del(&req->sched_list);
+ req->op = SCHED_ALLOCATE;
+ sched_enqueue(req);
+ }
+ }
+ kfree(edata);
+ evictions[id] = NULL;
+ pr_debug("Restore all evicted regions\n");
+ ocmem_schedule_pending();
+ return 0;
+}
int process_allocate(int id, struct ocmem_handle *handle,
unsigned long min, unsigned long max,
@@ -1185,6 +1601,7 @@
rc = do_allocate(req, true, false);
+
if (rc < 0)
goto do_allocate_error;
@@ -1251,5 +1668,13 @@
for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
INIT_LIST_HEAD(&sched_queue[i]);
+ mutex_init(&rdm_mutex);
+ INIT_LIST_HEAD(&rdm_queue);
+ ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
+ if (!ocmem_rdm_wq)
+ return -ENOMEM;
+ ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
+ if (!ocmem_eviction_wq)
+ return -ENOMEM;
return 0;
}