blob: 89627292feb98db81a46eca37702a898c352db25 [file] [log] [blame]
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/genalloc.h>
19#include <linux/of.h>
20#include <linux/io.h>
21#include <linux/platform_device.h>
22#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/wait.h>
27#include <linux/sched.h>
28#include <mach/ocmem_priv.h>
29
30#define RDM_MAX_ENTRIES 32
31#define RDM_MAX_CLIENTS 2
32
33/* Data Mover Parameters */
34#define DM_BLOCK_128 0x0
35#define DM_BLOCK_256 0x1
36#define DM_BR_ID_LPASS 0x0
37#define DM_BR_ID_GPS 0x1
38
39#define DM_INTR_CLR (0x8)
40#define DM_INTR_MASK (0xC)
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -070041#define DM_INT_STATUS (0x10)
42#define DM_GEN_STATUS (0x14)
43#define DM_CLR_OFFSET (0x18)
44#define DM_CLR_SIZE (0x1C)
45#define DM_CLR_PATTERN (0x20)
46#define DM_CLR_TRIGGER (0x24)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070047#define DM_CTRL (0x1000)
48#define DM_TBL_BASE (0x1010)
49#define DM_TBL_IDX(x) ((x) * 0x18)
50#define DM_TBL_n(x) (DM_TBL_BASE + (DM_TBL_IDX(x)))
51#define DM_TBL_n_offset(x) DM_TBL_n(x)
52#define DM_TBL_n_size(x) (DM_TBL_n(x)+0x4)
53#define DM_TBL_n_paddr(x) (DM_TBL_n(x)+0x8)
54#define DM_TBL_n_ctrl(x) (DM_TBL_n(x)+0x10)
55
56#define BR_CTRL (0x0)
57#define BR_CLIENT_BASE (0x4)
58#define BR_CLIENT_n_IDX(x) ((x) * 0x4)
59#define BR_CLIENT_n_ctrl(x) (BR_CLIENT_BASE + (BR_CLIENT_n_IDX(x)))
60#define BR_STATUS (0x14)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070061#define BR_LAST_ADDR (0x18)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070062/* 16 entries per client are supported */
63/* Use entries 0 - 15 for client0 */
64#define BR_CLIENT0_MASK (0x1000)
65/* Use entries 16- 31 for client1 */
66#define BR_CLIENT1_MASK (0x2010)
67
68#define BR_TBL_BASE (0x40)
69#define BR_TBL_IDX(x) ((x) * 0x18)
70#define BR_TBL_n(x) (BR_TBL_BASE + (BR_TBL_IDX(x)))
71#define BR_TBL_n_offset(x) BR_TBL_n(x)
72#define BR_TBL_n_size(x) (BR_TBL_n(x)+0x4)
73#define BR_TBL_n_paddr(x) (BR_TBL_n(x)+0x8)
74#define BR_TBL_n_ctrl(x) (BR_TBL_n(x)+0x10)
75
76/* Constants and Shifts */
77#define BR_TBL_ENTRY_ENABLE 0x1
78#define BR_TBL_START 0x0
79#define BR_TBL_END 0x8
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070080#define BR_RW_SHIFT 0x1
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070081
82#define DM_TBL_START 0x10
83#define DM_TBL_END 0x18
84#define DM_CLIENT_SHIFT 0x8
85#define DM_BR_ID_SHIFT 0x4
86#define DM_BR_BLK_SHIFT 0x1
87#define DM_DIR_SHIFT 0x0
88
89#define DM_DONE 0x1
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -070090#define DM_MASK_RESET 0x0
91#define DM_INTR_RESET 0x20003
92#define DM_CLR_ENABLE 0x1
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070093
94static void *br_base;
95static void *dm_base;
96
Naveen Ramarajce976fc2012-09-13 13:44:37 -070097struct completion dm_clear_event;
98struct completion dm_transfer_event;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070099/* Shadow tables for debug purposes */
100struct ocmem_br_table {
101 unsigned int offset;
102 unsigned int size;
103 unsigned int ddr_low;
104 unsigned int ddr_high;
105 unsigned int ctrl;
106} br_table[RDM_MAX_ENTRIES];
107
108/* DM Table replicates an entire BR table */
109/* Note: There are more than 1 BRs in the system */
110struct ocmem_dm_table {
111 unsigned int offset;
112 unsigned int size;
113 unsigned int ddr_low;
114 unsigned int ddr_high;
115 unsigned int ctrl;
116} dm_table[RDM_MAX_ENTRIES];
117
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700118static inline int client_ctrl_id(int id)
119{
120 return (id == OCMEM_SENSORS) ? 1 : 0;
121}
122
123static inline int client_slot_start(int id)
124{
125
126 return client_ctrl_id(id) * 16;
127}
128
129static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
130{
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700131 unsigned status;
132 unsigned irq_status;
133 status = ocmem_read(dm_base + DM_GEN_STATUS);
134 irq_status = ocmem_read(dm_base + DM_INT_STATUS);
135 pr_debug("irq:dm_status %x irq_status %x\n", status, irq_status);
136 if (irq_status & BIT(0)) {
137 pr_debug("Data mover completed\n");
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700138 ocmem_write(BIT(0), dm_base + DM_INTR_CLR);
139 pr_debug("Last re-mapped address block %x\n",
140 ocmem_read(br_base + BR_LAST_ADDR));
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700141 complete(&dm_transfer_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700142 } else if (irq_status & BIT(1)) {
143 pr_debug("Data clear engine completed\n");
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700144 ocmem_write(BIT(1), dm_base + DM_INTR_CLR);
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700145 complete(&dm_clear_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700146 } else {
147 BUG_ON(1);
148 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700149 return IRQ_HANDLED;
150}
151
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700152#ifdef CONFIG_MSM_OCMEM_NONSECURE
153int ocmem_clear(unsigned long start, unsigned long size)
154{
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700155 INIT_COMPLETION(dm_clear_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700156 /* Clear DM Mask */
157 ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
158 /* Clear DM Interrupts */
159 ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
160 /* DM CLR offset */
161 ocmem_write(start, dm_base + DM_CLR_OFFSET);
162 /* DM CLR size */
163 ocmem_write(size, dm_base + DM_CLR_SIZE);
164 /* Wipe out memory as "OCMM" */
165 ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
166 /* The offset, size and pattern for clearing must be set
167 * before triggering the clearing engine
168 */
169 mb();
170 /* Trigger Data Clear */
171 ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
172
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700173 wait_for_completion(&dm_clear_event);
174
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700175 return 0;
176}
177#else
178int ocmem_clear(unsigned long start, unsigned long size)
179{
180 return 0;
181}
182#endif
183
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700184/* Lock during transfers */
185int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
186 unsigned long start, int direction)
187{
188 int num_chunks = clist->num_chunks;
189 int slot = client_slot_start(id);
190 int table_start = 0;
191 int table_end = 0;
192 int br_ctrl = 0;
193 int br_id = 0;
Neeti Desaicdcbc2c2012-12-06 12:30:07 -0800194 int client_id = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700195 int dm_ctrl = 0;
196 int i = 0;
197 int j = 0;
198 int status = 0;
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700199 int rc = 0;
200
201 rc = ocmem_enable_core_clock();
202
203 if (rc < 0) {
204 pr_err("RDM transfer failed for client %s (id: %d)\n",
205 get_name(id), id);
206 return rc;
207 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700208
209 for (i = 0, j = slot; i < num_chunks; i++, j++) {
210
211 struct ocmem_chunk *chunk = &clist->chunks[i];
212 int sz = chunk->size;
213 int paddr = chunk->ddr_paddr;
214 int tbl_n_ctrl = 0;
215
216 tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE;
217 if (chunk->ro)
218 tbl_n_ctrl |= (1 << BR_RW_SHIFT);
219
220 /* Table Entry n of BR and DM */
221 ocmem_write(start, br_base + BR_TBL_n_offset(j));
222 ocmem_write(sz, br_base + BR_TBL_n_size(j));
223 ocmem_write(paddr, br_base + BR_TBL_n_paddr(j));
224 ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j));
225
226 ocmem_write(start, dm_base + DM_TBL_n_offset(j));
227 ocmem_write(sz, dm_base + DM_TBL_n_size(j));
228 ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j));
229 ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j));
230
231 start += sz;
232 }
233
234 br_id = client_ctrl_id(id);
235 table_start = slot;
236 table_end = slot + num_chunks - 1;
237 br_ctrl |= (table_start << BR_TBL_START);
238 br_ctrl |= (table_end << BR_TBL_END);
239
240 ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id)));
241 /* Enable BR */
242 ocmem_write(0x1, br_base + BR_CTRL);
243
244 /* Compute DM Control Value */
245 dm_ctrl |= (table_start << DM_TBL_START);
246 dm_ctrl |= (table_end << DM_TBL_END);
247
Neeti Desaicdcbc2c2012-12-06 12:30:07 -0800248 client_id = client_ctrl_id(id);
249 dm_ctrl |= (client_id << DM_CLIENT_SHIFT);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700250 dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT);
251 dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
252 dm_ctrl |= (direction << DM_DIR_SHIFT);
253
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700254 status = ocmem_read(dm_base + DM_GEN_STATUS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700255 pr_debug("Transfer status before %x\n", status);
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700256 INIT_COMPLETION(dm_transfer_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700257 /* The DM and BR tables must be programmed before triggering the
258 * Data Mover else the coherent transfer would be corrupted
259 */
260 mb();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700261 /* Trigger DM */
262 ocmem_write(dm_ctrl, dm_base + DM_CTRL);
263 pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
264
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700265 wait_for_completion(&dm_transfer_event);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700266 pr_debug("Completed transferring %d segments\n", num_chunks);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700267 ocmem_disable_core_clock();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700268 return 0;
269}
270
271int ocmem_rdm_init(struct platform_device *pdev)
272{
273
274 struct ocmem_plat_data *pdata = NULL;
275 int rc = 0;
276
277 pdata = platform_get_drvdata(pdev);
278
279 br_base = pdata->br_base;
280 dm_base = pdata->dm_base;
281
282 rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler,
283 IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata);
284
285 if (rc) {
286 dev_err(&pdev->dev, "Failed to request dm irq");
287 return -EINVAL;
288 }
289
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700290 rc = ocmem_enable_core_clock();
291
292 if (rc < 0) {
293 pr_err("RDM initialization failed\n");
294 return rc;
295 }
296
Naveen Ramarajce976fc2012-09-13 13:44:37 -0700297 init_completion(&dm_clear_event);
298 init_completion(&dm_transfer_event);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700299 /* Clear DM Mask */
300 ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700301 /* enable dm interrupts */
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700302 ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700303 ocmem_disable_core_clock();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700304 return 0;
305}