blob: 88a00d06def686bbf721a5581cf9c5a899b1e9c1 [file] [log] [blame]
Peter Ujfalusia074ae32015-04-09 12:35:49 +03001/*
2 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
3 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10#include <linux/slab.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/list.h>
14#include <linux/io.h>
Peter Ujfalusia074ae32015-04-09 12:35:49 +030015#include <linux/of_address.h>
16#include <linux/of_device.h>
17#include <linux/of_dma.h>
18
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030019#define TI_XBAR_DRA7 0
20#define TI_XBAR_AM335X 1
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +030021static const u32 ti_xbar_type[] = {
22 [TI_XBAR_DRA7] = TI_XBAR_DRA7,
23 [TI_XBAR_AM335X] = TI_XBAR_AM335X,
24};
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030025
26static const struct of_device_id ti_dma_xbar_match[] = {
27 {
28 .compatible = "ti,dra7-dma-crossbar",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +030029 .data = &ti_xbar_type[TI_XBAR_DRA7],
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030030 },
31 {
32 .compatible = "ti,am335x-edma-crossbar",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +030033 .data = &ti_xbar_type[TI_XBAR_AM335X],
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030034 },
35 {},
36};
37
38/* Crossbar on AM335x/AM437x family */
39#define TI_AM335X_XBAR_LINES 64
40
41struct ti_am335x_xbar_data {
42 void __iomem *iomem;
43
44 struct dma_router dmarouter;
45
46 u32 xbar_events; /* maximum number of events to select in xbar */
47 u32 dma_requests; /* number of DMA requests on eDMA */
48};
49
50struct ti_am335x_xbar_map {
51 u16 dma_line;
52 u16 mux_val;
53};
54
55static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
56{
57 writeb_relaxed(val & 0x1f, iomem + event);
58}
59
60static void ti_am335x_xbar_free(struct device *dev, void *route_data)
61{
62 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
63 struct ti_am335x_xbar_map *map = route_data;
64
65 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
66 map->mux_val, map->dma_line);
67
68 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
69 kfree(map);
70}
71
72static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
73 struct of_dma *ofdma)
74{
75 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
76 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
77 struct ti_am335x_xbar_map *map;
78
79 if (dma_spec->args_count != 3)
80 return ERR_PTR(-EINVAL);
81
82 if (dma_spec->args[2] >= xbar->xbar_events) {
83 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
84 dma_spec->args[2]);
85 return ERR_PTR(-EINVAL);
86 }
87
88 if (dma_spec->args[0] >= xbar->dma_requests) {
89 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
90 dma_spec->args[0]);
91 return ERR_PTR(-EINVAL);
92 }
93
94 /* The of_node_put() will be done in the core for the node */
95 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
96 if (!dma_spec->np) {
97 dev_err(&pdev->dev, "Can't get DMA master\n");
98 return ERR_PTR(-EINVAL);
99 }
100
101 map = kzalloc(sizeof(*map), GFP_KERNEL);
102 if (!map) {
103 of_node_put(dma_spec->np);
104 return ERR_PTR(-ENOMEM);
105 }
106
107 map->dma_line = (u16)dma_spec->args[0];
108 map->mux_val = (u16)dma_spec->args[2];
109
110 dma_spec->args[2] = 0;
111 dma_spec->args_count = 2;
112
113 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
114 map->mux_val, map->dma_line);
115
116 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
117
118 return map;
119}
120
121static const struct of_device_id ti_am335x_master_match[] = {
122 { .compatible = "ti,edma3-tpcc", },
123 {},
124};
125
126static int ti_am335x_xbar_probe(struct platform_device *pdev)
127{
128 struct device_node *node = pdev->dev.of_node;
129 const struct of_device_id *match;
130 struct device_node *dma_node;
131 struct ti_am335x_xbar_data *xbar;
132 struct resource *res;
133 void __iomem *iomem;
134 int i, ret;
135
136 if (!node)
137 return -ENODEV;
138
139 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
140 if (!xbar)
141 return -ENOMEM;
142
143 dma_node = of_parse_phandle(node, "dma-masters", 0);
144 if (!dma_node) {
145 dev_err(&pdev->dev, "Can't get DMA master node\n");
146 return -ENODEV;
147 }
148
149 match = of_match_node(ti_am335x_master_match, dma_node);
150 if (!match) {
151 dev_err(&pdev->dev, "DMA master is not supported\n");
Christophe JAILLET29e0adf2016-12-19 06:33:51 +0100152 of_node_put(dma_node);
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300153 return -EINVAL;
154 }
155
156 if (of_property_read_u32(dma_node, "dma-requests",
157 &xbar->dma_requests)) {
158 dev_info(&pdev->dev,
159 "Missing XBAR output information, using %u.\n",
160 TI_AM335X_XBAR_LINES);
161 xbar->dma_requests = TI_AM335X_XBAR_LINES;
162 }
163 of_node_put(dma_node);
164
165 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
166 dev_info(&pdev->dev,
167 "Missing XBAR input information, using %u.\n",
168 TI_AM335X_XBAR_LINES);
169 xbar->xbar_events = TI_AM335X_XBAR_LINES;
170 }
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 iomem = devm_ioremap_resource(&pdev->dev, res);
174 if (IS_ERR(iomem))
175 return PTR_ERR(iomem);
176
177 xbar->iomem = iomem;
178
179 xbar->dmarouter.dev = &pdev->dev;
180 xbar->dmarouter.route_free = ti_am335x_xbar_free;
181
182 platform_set_drvdata(pdev, xbar);
183
184 /* Reset the crossbar */
185 for (i = 0; i < xbar->dma_requests; i++)
186 ti_am335x_xbar_write(xbar->iomem, i, 0);
187
188 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
189 &xbar->dmarouter);
190
191 return ret;
192}
193
194/* Crossbar on DRA7xx family */
195#define TI_DRA7_XBAR_OUTPUTS 127
196#define TI_DRA7_XBAR_INPUTS 256
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300197
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300198struct ti_dra7_xbar_data {
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300199 void __iomem *iomem;
200
201 struct dma_router dmarouter;
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200202 struct mutex mutex;
203 unsigned long *dma_inuse;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300204
205 u16 safe_val; /* Value to rest the crossbar lines */
206 u32 xbar_requests; /* number of DMA requests connected to XBAR */
207 u32 dma_requests; /* number of DMA requests forwarded to DMA */
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300208 u32 dma_offset;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300209};
210
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300211struct ti_dra7_xbar_map {
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300212 u16 xbar_in;
213 int xbar_out;
214};
215
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300216static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300217{
218 writew_relaxed(val, iomem + (xbar * 2));
219}
220
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300221static void ti_dra7_xbar_free(struct device *dev, void *route_data)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300222{
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300223 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
224 struct ti_dra7_xbar_map *map = route_data;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300225
226 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
227 map->xbar_in, map->xbar_out);
228
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300229 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200230 mutex_lock(&xbar->mutex);
231 clear_bit(map->xbar_out, xbar->dma_inuse);
232 mutex_unlock(&xbar->mutex);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300233 kfree(map);
234}
235
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300236static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
237 struct of_dma *ofdma)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300238{
239 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300240 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
241 struct ti_dra7_xbar_map *map;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300242
243 if (dma_spec->args[0] >= xbar->xbar_requests) {
244 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
245 dma_spec->args[0]);
246 return ERR_PTR(-EINVAL);
247 }
248
249 /* The of_node_put() will be done in the core for the node */
250 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
251 if (!dma_spec->np) {
252 dev_err(&pdev->dev, "Can't get DMA master\n");
253 return ERR_PTR(-EINVAL);
254 }
255
256 map = kzalloc(sizeof(*map), GFP_KERNEL);
257 if (!map) {
258 of_node_put(dma_spec->np);
259 return ERR_PTR(-ENOMEM);
260 }
261
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200262 mutex_lock(&xbar->mutex);
263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
264 xbar->dma_requests);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200265 if (map->xbar_out == xbar->dma_requests) {
Peter Ujfalusi9d9c2882017-09-21 14:35:32 +0300266 mutex_unlock(&xbar->mutex);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200267 dev_err(&pdev->dev, "Run out of free DMA requests\n");
268 kfree(map);
269 return ERR_PTR(-ENOMEM);
270 }
271 set_bit(map->xbar_out, xbar->dma_inuse);
Peter Ujfalusi9d9c2882017-09-21 14:35:32 +0300272 mutex_unlock(&xbar->mutex);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200273
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300274 map->xbar_in = (u16)dma_spec->args[0];
275
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300276 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300277
278 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
279 map->xbar_in, map->xbar_out);
280
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300281 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300282
283 return map;
284}
285
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300286#define TI_XBAR_EDMA_OFFSET 0
287#define TI_XBAR_SDMA_OFFSET 1
288static const u32 ti_dma_offset[] = {
289 [TI_XBAR_EDMA_OFFSET] = 0,
290 [TI_XBAR_SDMA_OFFSET] = 1,
291};
292
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300293static const struct of_device_id ti_dra7_master_match[] = {
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300294 {
295 .compatible = "ti,omap4430-sdma",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300296 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300297 },
298 {
299 .compatible = "ti,edma3",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300300 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300301 },
Peter Ujfalusi2adb2742015-10-30 10:00:38 +0200302 {
303 .compatible = "ti,edma3-tpcc",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300304 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
Peter Ujfalusi2adb2742015-10-30 10:00:38 +0200305 },
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300306 {},
307};
308
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200309static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
310{
311 for (; len > 0; len--)
312 clear_bit(offset + (len - 1), p);
313}
314
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300315static int ti_dra7_xbar_probe(struct platform_device *pdev)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300316{
317 struct device_node *node = pdev->dev.of_node;
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300318 const struct of_device_id *match;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300319 struct device_node *dma_node;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300320 struct ti_dra7_xbar_data *xbar;
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200321 struct property *prop;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300322 struct resource *res;
323 u32 safe_val;
Peter Ujfalusie7282b62016-09-21 15:41:31 +0300324 int sz;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300325 void __iomem *iomem;
326 int i, ret;
327
328 if (!node)
329 return -ENODEV;
330
331 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
332 if (!xbar)
333 return -ENOMEM;
334
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300335 dma_node = of_parse_phandle(node, "dma-masters", 0);
336 if (!dma_node) {
337 dev_err(&pdev->dev, "Can't get DMA master node\n");
338 return -ENODEV;
339 }
340
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300341 match = of_match_node(ti_dra7_master_match, dma_node);
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300342 if (!match) {
343 dev_err(&pdev->dev, "DMA master is not supported\n");
Christophe JAILLET29e0adf2016-12-19 06:33:51 +0100344 of_node_put(dma_node);
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300345 return -EINVAL;
346 }
347
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300348 if (of_property_read_u32(dma_node, "dma-requests",
349 &xbar->dma_requests)) {
350 dev_info(&pdev->dev,
351 "Missing XBAR output information, using %u.\n",
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300352 TI_DRA7_XBAR_OUTPUTS);
353 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300354 }
355 of_node_put(dma_node);
356
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200357 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
358 BITS_TO_LONGS(xbar->dma_requests),
359 sizeof(unsigned long), GFP_KERNEL);
360 if (!xbar->dma_inuse)
361 return -ENOMEM;
362
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300363 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
364 dev_info(&pdev->dev,
365 "Missing XBAR input information, using %u.\n",
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300366 TI_DRA7_XBAR_INPUTS);
367 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300368 }
369
370 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
371 xbar->safe_val = (u16)safe_val;
372
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200373
374 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
375 if (prop) {
376 const char pname[] = "ti,reserved-dma-request-ranges";
377 u32 (*rsv_events)[2];
378 size_t nelm = sz / sizeof(*rsv_events);
379 int i;
380
381 if (!nelm)
382 return -EINVAL;
383
384 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
385 if (!rsv_events)
386 return -ENOMEM;
387
388 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
389 nelm * 2);
390 if (ret)
391 return ret;
392
393 for (i = 0; i < nelm; i++) {
394 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
395 xbar->dma_inuse);
396 }
397 kfree(rsv_events);
398 }
399
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300400 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300401 iomem = devm_ioremap_resource(&pdev->dev, res);
Axel Lin28eb2322015-07-10 22:13:19 +0800402 if (IS_ERR(iomem))
403 return PTR_ERR(iomem);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300404
405 xbar->iomem = iomem;
406
407 xbar->dmarouter.dev = &pdev->dev;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300408 xbar->dmarouter.route_free = ti_dra7_xbar_free;
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300409 xbar->dma_offset = *(u32 *)match->data;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300410
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200411 mutex_init(&xbar->mutex);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300412 platform_set_drvdata(pdev, xbar);
413
414 /* Reset the crossbar */
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200415 for (i = 0; i < xbar->dma_requests; i++) {
416 if (!test_bit(i, xbar->dma_inuse))
417 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
418 }
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300419
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300420 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300421 &xbar->dmarouter);
422 if (ret) {
423 /* Restore the defaults for the crossbar */
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200424 for (i = 0; i < xbar->dma_requests; i++) {
425 if (!test_bit(i, xbar->dma_inuse))
426 ti_dra7_xbar_write(xbar->iomem, i, i);
427 }
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300428 }
429
430 return ret;
431}
432
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300433static int ti_dma_xbar_probe(struct platform_device *pdev)
434{
435 const struct of_device_id *match;
436 int ret;
437
438 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
439 if (unlikely(!match))
440 return -EINVAL;
441
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300442 switch (*(u32 *)match->data) {
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300443 case TI_XBAR_DRA7:
444 ret = ti_dra7_xbar_probe(pdev);
445 break;
446 case TI_XBAR_AM335X:
447 ret = ti_am335x_xbar_probe(pdev);
448 break;
449 default:
450 dev_err(&pdev->dev, "Unsupported crossbar\n");
451 ret = -ENODEV;
452 break;
453 }
454
455 return ret;
456}
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300457
458static struct platform_driver ti_dma_xbar_driver = {
459 .driver = {
460 .name = "ti-dma-crossbar",
461 .of_match_table = of_match_ptr(ti_dma_xbar_match),
462 },
463 .probe = ti_dma_xbar_probe,
464};
465
Ben Dooksd6461622016-06-21 18:12:39 +0100466static int omap_dmaxbar_init(void)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300467{
468 return platform_driver_register(&ti_dma_xbar_driver);
469}
470arch_initcall(omap_dmaxbar_init);