blob: 4ff7212b9bcb0b3eec6e4dc5a8dd1310c70e6019 [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/genalloc.h>
19#include <linux/of.h>
20#include <linux/io.h>
21#include <linux/platform_device.h>
22#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/wait.h>
27#include <linux/sched.h>
28#include <mach/ocmem_priv.h>
29
30#define RDM_MAX_ENTRIES 32
31#define RDM_MAX_CLIENTS 2
32
33/* Data Mover Parameters */
34#define DM_BLOCK_128 0x0
35#define DM_BLOCK_256 0x1
36#define DM_BR_ID_LPASS 0x0
37#define DM_BR_ID_GPS 0x1
38
39#define DM_INTR_CLR (0x8)
40#define DM_INTR_MASK (0xC)
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -070041#define DM_INT_STATUS (0x10)
42#define DM_GEN_STATUS (0x14)
43#define DM_CLR_OFFSET (0x18)
44#define DM_CLR_SIZE (0x1C)
45#define DM_CLR_PATTERN (0x20)
46#define DM_CLR_TRIGGER (0x24)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070047#define DM_CTRL (0x1000)
48#define DM_TBL_BASE (0x1010)
49#define DM_TBL_IDX(x) ((x) * 0x18)
50#define DM_TBL_n(x) (DM_TBL_BASE + (DM_TBL_IDX(x)))
51#define DM_TBL_n_offset(x) DM_TBL_n(x)
52#define DM_TBL_n_size(x) (DM_TBL_n(x)+0x4)
53#define DM_TBL_n_paddr(x) (DM_TBL_n(x)+0x8)
54#define DM_TBL_n_ctrl(x) (DM_TBL_n(x)+0x10)
55
56#define BR_CTRL (0x0)
57#define BR_CLIENT_BASE (0x4)
58#define BR_CLIENT_n_IDX(x) ((x) * 0x4)
59#define BR_CLIENT_n_ctrl(x) (BR_CLIENT_BASE + (BR_CLIENT_n_IDX(x)))
60#define BR_STATUS (0x14)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070061#define BR_LAST_ADDR (0x18)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070062/* 16 entries per client are supported */
63/* Use entries 0 - 15 for client0 */
64#define BR_CLIENT0_MASK (0x1000)
65/* Use entries 16- 31 for client1 */
66#define BR_CLIENT1_MASK (0x2010)
67
68#define BR_TBL_BASE (0x40)
69#define BR_TBL_IDX(x) ((x) * 0x18)
70#define BR_TBL_n(x) (BR_TBL_BASE + (BR_TBL_IDX(x)))
71#define BR_TBL_n_offset(x) BR_TBL_n(x)
72#define BR_TBL_n_size(x) (BR_TBL_n(x)+0x4)
73#define BR_TBL_n_paddr(x) (BR_TBL_n(x)+0x8)
74#define BR_TBL_n_ctrl(x) (BR_TBL_n(x)+0x10)
75
76/* Constants and Shifts */
77#define BR_TBL_ENTRY_ENABLE 0x1
78#define BR_TBL_START 0x0
79#define BR_TBL_END 0x8
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070080#define BR_RW_SHIFT 0x1
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070081
82#define DM_TBL_START 0x10
83#define DM_TBL_END 0x18
84#define DM_CLIENT_SHIFT 0x8
85#define DM_BR_ID_SHIFT 0x4
86#define DM_BR_BLK_SHIFT 0x1
87#define DM_DIR_SHIFT 0x0
88
89#define DM_DONE 0x1
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -070090#define DM_MASK_RESET 0x0
91#define DM_INTR_RESET 0x20003
92#define DM_CLR_ENABLE 0x1
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070093
94static void *br_base;
95static void *dm_base;
96
Naveen Ramarajce976fc2012-09-13 13:44:37 -070097struct completion dm_clear_event;
98struct completion dm_transfer_event;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070099/* Shadow tables for debug purposes */
100struct ocmem_br_table {
101 unsigned int offset;
102 unsigned int size;
103 unsigned int ddr_low;
104 unsigned int ddr_high;
105 unsigned int ctrl;
106} br_table[RDM_MAX_ENTRIES];
107
108/* DM Table replicates an entire BR table */
109/* Note: There are more than 1 BRs in the system */
110struct ocmem_dm_table {
111 unsigned int offset;
112 unsigned int size;
113 unsigned int ddr_low;
114 unsigned int ddr_high;
115 unsigned int ctrl;
116} dm_table[RDM_MAX_ENTRIES];
117
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700118static inline int client_ctrl_id(int id)
119{
120 return (id == OCMEM_SENSORS) ? 1 : 0;
121}
122
123static inline int client_slot_start(int id)
124{
125
126 return client_ctrl_id(id) * 16;
127}
128
129static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
130{
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700131 unsigned status;
132 unsigned irq_status;
133 status = ocmem_read(dm_base + DM_GEN_STATUS);
134 irq_status = ocmem_read(dm_base + DM_INT_STATUS);
135 pr_debug("irq:dm_status %x irq_status %x\n", status, irq_status);
136 if (irq_status & BIT(0)) {
137 pr_debug("Data mover completed\n");
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700138 ocmem_write(BIT(0), dm_base + DM_INTR_CLR);
139 pr_debug("Last re-mapped address block %x\n",
140 ocmem_read(br_base + BR_LAST_ADDR));
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700141 complete(&dm_transfer_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700142 } else if (irq_status & BIT(1)) {
143 pr_debug("Data clear engine completed\n");
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700144 ocmem_write(BIT(1), dm_base + DM_INTR_CLR);
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700145 complete(&dm_clear_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700146 } else {
147 BUG_ON(1);
148 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700149 return IRQ_HANDLED;
150}
151
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700152#ifdef CONFIG_MSM_OCMEM_NONSECURE
153int ocmem_clear(unsigned long start, unsigned long size)
154{
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700155 INIT_COMPLETION(dm_clear_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700156 /* Clear DM Mask */
157 ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
158 /* Clear DM Interrupts */
159 ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
160 /* DM CLR offset */
161 ocmem_write(start, dm_base + DM_CLR_OFFSET);
162 /* DM CLR size */
163 ocmem_write(size, dm_base + DM_CLR_SIZE);
164 /* Wipe out memory as "OCMM" */
165 ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
166 /* The offset, size and pattern for clearing must be set
167 * before triggering the clearing engine
168 */
169 mb();
170 /* Trigger Data Clear */
171 ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
172
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700173 wait_for_completion(&dm_clear_event);
174
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700175 return 0;
176}
177#else
178int ocmem_clear(unsigned long start, unsigned long size)
179{
180 return 0;
181}
182#endif
183
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700184/* Lock during transfers */
185int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
186 unsigned long start, int direction)
187{
188 int num_chunks = clist->num_chunks;
189 int slot = client_slot_start(id);
190 int table_start = 0;
191 int table_end = 0;
192 int br_ctrl = 0;
193 int br_id = 0;
Neeti Desaicdcbc2c2012-12-06 12:30:07 -0800194 int client_id = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700195 int dm_ctrl = 0;
196 int i = 0;
197 int j = 0;
198 int status = 0;
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700199 int rc = 0;
200
201 rc = ocmem_enable_core_clock();
202
203 if (rc < 0) {
204 pr_err("RDM transfer failed for client %s (id: %d)\n",
205 get_name(id), id);
206 return rc;
207 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700208
Naveen Ramaraj485bac62013-02-22 17:09:00 -0800209 /* Clear DM Mask */
210 ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
211 /* Clear DM Interrupts */
212 ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
213
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700214 for (i = 0, j = slot; i < num_chunks; i++, j++) {
215
216 struct ocmem_chunk *chunk = &clist->chunks[i];
217 int sz = chunk->size;
218 int paddr = chunk->ddr_paddr;
219 int tbl_n_ctrl = 0;
220
221 tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE;
222 if (chunk->ro)
223 tbl_n_ctrl |= (1 << BR_RW_SHIFT);
224
225 /* Table Entry n of BR and DM */
226 ocmem_write(start, br_base + BR_TBL_n_offset(j));
227 ocmem_write(sz, br_base + BR_TBL_n_size(j));
228 ocmem_write(paddr, br_base + BR_TBL_n_paddr(j));
229 ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j));
230
231 ocmem_write(start, dm_base + DM_TBL_n_offset(j));
232 ocmem_write(sz, dm_base + DM_TBL_n_size(j));
233 ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j));
234 ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j));
235
236 start += sz;
237 }
238
239 br_id = client_ctrl_id(id);
240 table_start = slot;
241 table_end = slot + num_chunks - 1;
242 br_ctrl |= (table_start << BR_TBL_START);
243 br_ctrl |= (table_end << BR_TBL_END);
244
245 ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id)));
246 /* Enable BR */
247 ocmem_write(0x1, br_base + BR_CTRL);
248
249 /* Compute DM Control Value */
250 dm_ctrl |= (table_start << DM_TBL_START);
251 dm_ctrl |= (table_end << DM_TBL_END);
252
Neeti Desaicdcbc2c2012-12-06 12:30:07 -0800253 client_id = client_ctrl_id(id);
254 dm_ctrl |= (client_id << DM_CLIENT_SHIFT);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700255 dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT);
256 dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
257 dm_ctrl |= (direction << DM_DIR_SHIFT);
258
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700259 status = ocmem_read(dm_base + DM_GEN_STATUS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700260 pr_debug("Transfer status before %x\n", status);
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700261 INIT_COMPLETION(dm_transfer_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700262 /* The DM and BR tables must be programmed before triggering the
263 * Data Mover else the coherent transfer would be corrupted
264 */
265 mb();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700266 /* Trigger DM */
267 ocmem_write(dm_ctrl, dm_base + DM_CTRL);
268 pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
269
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700270 wait_for_completion(&dm_transfer_event);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700271 pr_debug("Completed transferring %d segments\n", num_chunks);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700272 ocmem_disable_core_clock();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700273 return 0;
274}
275
276int ocmem_rdm_init(struct platform_device *pdev)
277{
278
279 struct ocmem_plat_data *pdata = NULL;
280 int rc = 0;
281
282 pdata = platform_get_drvdata(pdev);
283
284 br_base = pdata->br_base;
285 dm_base = pdata->dm_base;
286
287 rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler,
288 IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata);
289
290 if (rc) {
291 dev_err(&pdev->dev, "Failed to request dm irq");
292 return -EINVAL;
293 }
294
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700295 rc = ocmem_enable_core_clock();
296
297 if (rc < 0) {
298 pr_err("RDM initialization failed\n");
299 return rc;
300 }
301
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700302 init_completion(&dm_clear_event);
303 init_completion(&dm_transfer_event);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700304 ocmem_disable_core_clock();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700305 return 0;
306}