blob: 5ae294b256a7e45e6480b74c3c1cccc9cb480ba8 [file] [log] [blame]
Peter Ujfalusia074ae32015-04-09 12:35:49 +03001/*
2 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
3 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10#include <linux/slab.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/list.h>
14#include <linux/io.h>
Peter Ujfalusia074ae32015-04-09 12:35:49 +030015#include <linux/of_address.h>
16#include <linux/of_device.h>
17#include <linux/of_dma.h>
18
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030019#define TI_XBAR_DRA7 0
20#define TI_XBAR_AM335X 1
21
22static const struct of_device_id ti_dma_xbar_match[] = {
23 {
24 .compatible = "ti,dra7-dma-crossbar",
25 .data = (void *)TI_XBAR_DRA7,
26 },
27 {
28 .compatible = "ti,am335x-edma-crossbar",
29 .data = (void *)TI_XBAR_AM335X,
30 },
31 {},
32};
33
34/* Crossbar on AM335x/AM437x family */
35#define TI_AM335X_XBAR_LINES 64
36
37struct ti_am335x_xbar_data {
38 void __iomem *iomem;
39
40 struct dma_router dmarouter;
41
42 u32 xbar_events; /* maximum number of events to select in xbar */
43 u32 dma_requests; /* number of DMA requests on eDMA */
44};
45
46struct ti_am335x_xbar_map {
47 u16 dma_line;
48 u16 mux_val;
49};
50
51static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
52{
53 writeb_relaxed(val & 0x1f, iomem + event);
54}
55
56static void ti_am335x_xbar_free(struct device *dev, void *route_data)
57{
58 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
59 struct ti_am335x_xbar_map *map = route_data;
60
61 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
62 map->mux_val, map->dma_line);
63
64 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
65 kfree(map);
66}
67
68static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
69 struct of_dma *ofdma)
70{
71 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
72 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
73 struct ti_am335x_xbar_map *map;
74
75 if (dma_spec->args_count != 3)
76 return ERR_PTR(-EINVAL);
77
78 if (dma_spec->args[2] >= xbar->xbar_events) {
79 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
80 dma_spec->args[2]);
81 return ERR_PTR(-EINVAL);
82 }
83
84 if (dma_spec->args[0] >= xbar->dma_requests) {
85 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
86 dma_spec->args[0]);
87 return ERR_PTR(-EINVAL);
88 }
89
90 /* The of_node_put() will be done in the core for the node */
91 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
92 if (!dma_spec->np) {
93 dev_err(&pdev->dev, "Can't get DMA master\n");
94 return ERR_PTR(-EINVAL);
95 }
96
97 map = kzalloc(sizeof(*map), GFP_KERNEL);
98 if (!map) {
99 of_node_put(dma_spec->np);
100 return ERR_PTR(-ENOMEM);
101 }
102
103 map->dma_line = (u16)dma_spec->args[0];
104 map->mux_val = (u16)dma_spec->args[2];
105
106 dma_spec->args[2] = 0;
107 dma_spec->args_count = 2;
108
109 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
110 map->mux_val, map->dma_line);
111
112 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
113
114 return map;
115}
116
117static const struct of_device_id ti_am335x_master_match[] = {
118 { .compatible = "ti,edma3-tpcc", },
119 {},
120};
121
122static int ti_am335x_xbar_probe(struct platform_device *pdev)
123{
124 struct device_node *node = pdev->dev.of_node;
125 const struct of_device_id *match;
126 struct device_node *dma_node;
127 struct ti_am335x_xbar_data *xbar;
128 struct resource *res;
129 void __iomem *iomem;
130 int i, ret;
131
132 if (!node)
133 return -ENODEV;
134
135 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
136 if (!xbar)
137 return -ENOMEM;
138
139 dma_node = of_parse_phandle(node, "dma-masters", 0);
140 if (!dma_node) {
141 dev_err(&pdev->dev, "Can't get DMA master node\n");
142 return -ENODEV;
143 }
144
145 match = of_match_node(ti_am335x_master_match, dma_node);
146 if (!match) {
147 dev_err(&pdev->dev, "DMA master is not supported\n");
148 return -EINVAL;
149 }
150
151 if (of_property_read_u32(dma_node, "dma-requests",
152 &xbar->dma_requests)) {
153 dev_info(&pdev->dev,
154 "Missing XBAR output information, using %u.\n",
155 TI_AM335X_XBAR_LINES);
156 xbar->dma_requests = TI_AM335X_XBAR_LINES;
157 }
158 of_node_put(dma_node);
159
160 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
161 dev_info(&pdev->dev,
162 "Missing XBAR input information, using %u.\n",
163 TI_AM335X_XBAR_LINES);
164 xbar->xbar_events = TI_AM335X_XBAR_LINES;
165 }
166
167 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
168 iomem = devm_ioremap_resource(&pdev->dev, res);
169 if (IS_ERR(iomem))
170 return PTR_ERR(iomem);
171
172 xbar->iomem = iomem;
173
174 xbar->dmarouter.dev = &pdev->dev;
175 xbar->dmarouter.route_free = ti_am335x_xbar_free;
176
177 platform_set_drvdata(pdev, xbar);
178
179 /* Reset the crossbar */
180 for (i = 0; i < xbar->dma_requests; i++)
181 ti_am335x_xbar_write(xbar->iomem, i, 0);
182
183 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
184 &xbar->dmarouter);
185
186 return ret;
187}
188
189/* Crossbar on DRA7xx family */
190#define TI_DRA7_XBAR_OUTPUTS 127
191#define TI_DRA7_XBAR_INPUTS 256
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300192
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300193#define TI_XBAR_EDMA_OFFSET 0
194#define TI_XBAR_SDMA_OFFSET 1
195
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300196struct ti_dra7_xbar_data {
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300197 void __iomem *iomem;
198
199 struct dma_router dmarouter;
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200200 struct mutex mutex;
201 unsigned long *dma_inuse;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300202
203 u16 safe_val; /* Value to rest the crossbar lines */
204 u32 xbar_requests; /* number of DMA requests connected to XBAR */
205 u32 dma_requests; /* number of DMA requests forwarded to DMA */
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300206 u32 dma_offset;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300207};
208
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300209struct ti_dra7_xbar_map {
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300210 u16 xbar_in;
211 int xbar_out;
212};
213
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300214static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300215{
216 writew_relaxed(val, iomem + (xbar * 2));
217}
218
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300219static void ti_dra7_xbar_free(struct device *dev, void *route_data)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300220{
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300221 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
222 struct ti_dra7_xbar_map *map = route_data;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300223
224 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
225 map->xbar_in, map->xbar_out);
226
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300227 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200228 mutex_lock(&xbar->mutex);
229 clear_bit(map->xbar_out, xbar->dma_inuse);
230 mutex_unlock(&xbar->mutex);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300231 kfree(map);
232}
233
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300234static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
235 struct of_dma *ofdma)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300236{
237 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300238 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
239 struct ti_dra7_xbar_map *map;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300240
241 if (dma_spec->args[0] >= xbar->xbar_requests) {
242 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
243 dma_spec->args[0]);
244 return ERR_PTR(-EINVAL);
245 }
246
247 /* The of_node_put() will be done in the core for the node */
248 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
249 if (!dma_spec->np) {
250 dev_err(&pdev->dev, "Can't get DMA master\n");
251 return ERR_PTR(-EINVAL);
252 }
253
254 map = kzalloc(sizeof(*map), GFP_KERNEL);
255 if (!map) {
256 of_node_put(dma_spec->np);
257 return ERR_PTR(-ENOMEM);
258 }
259
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200260 mutex_lock(&xbar->mutex);
261 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
262 xbar->dma_requests);
263 mutex_unlock(&xbar->mutex);
264 if (map->xbar_out == xbar->dma_requests) {
265 dev_err(&pdev->dev, "Run out of free DMA requests\n");
266 kfree(map);
267 return ERR_PTR(-ENOMEM);
268 }
269 set_bit(map->xbar_out, xbar->dma_inuse);
270
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300271 map->xbar_in = (u16)dma_spec->args[0];
272
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300273 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300274
275 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
276 map->xbar_in, map->xbar_out);
277
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300278 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300279
280 return map;
281}
282
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300283static const struct of_device_id ti_dra7_master_match[] = {
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300284 {
285 .compatible = "ti,omap4430-sdma",
286 .data = (void *)TI_XBAR_SDMA_OFFSET,
287 },
288 {
289 .compatible = "ti,edma3",
290 .data = (void *)TI_XBAR_EDMA_OFFSET,
291 },
Peter Ujfalusi2adb2742015-10-30 10:00:38 +0200292 {
293 .compatible = "ti,edma3-tpcc",
294 .data = (void *)TI_XBAR_EDMA_OFFSET,
295 },
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300296 {},
297};
298
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200299static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
300{
301 for (; len > 0; len--)
302 clear_bit(offset + (len - 1), p);
303}
304
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300305static int ti_dra7_xbar_probe(struct platform_device *pdev)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300306{
307 struct device_node *node = pdev->dev.of_node;
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300308 const struct of_device_id *match;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300309 struct device_node *dma_node;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300310 struct ti_dra7_xbar_data *xbar;
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200311 struct property *prop;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300312 struct resource *res;
313 u32 safe_val;
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200314 size_t sz;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300315 void __iomem *iomem;
316 int i, ret;
317
318 if (!node)
319 return -ENODEV;
320
321 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
322 if (!xbar)
323 return -ENOMEM;
324
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300325 dma_node = of_parse_phandle(node, "dma-masters", 0);
326 if (!dma_node) {
327 dev_err(&pdev->dev, "Can't get DMA master node\n");
328 return -ENODEV;
329 }
330
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300331 match = of_match_node(ti_dra7_master_match, dma_node);
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300332 if (!match) {
333 dev_err(&pdev->dev, "DMA master is not supported\n");
334 return -EINVAL;
335 }
336
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300337 if (of_property_read_u32(dma_node, "dma-requests",
338 &xbar->dma_requests)) {
339 dev_info(&pdev->dev,
340 "Missing XBAR output information, using %u.\n",
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300341 TI_DRA7_XBAR_OUTPUTS);
342 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300343 }
344 of_node_put(dma_node);
345
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200346 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
347 BITS_TO_LONGS(xbar->dma_requests),
348 sizeof(unsigned long), GFP_KERNEL);
349 if (!xbar->dma_inuse)
350 return -ENOMEM;
351
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300352 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
353 dev_info(&pdev->dev,
354 "Missing XBAR input information, using %u.\n",
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300355 TI_DRA7_XBAR_INPUTS);
356 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300357 }
358
359 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
360 xbar->safe_val = (u16)safe_val;
361
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200362
363 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
364 if (prop) {
365 const char pname[] = "ti,reserved-dma-request-ranges";
366 u32 (*rsv_events)[2];
367 size_t nelm = sz / sizeof(*rsv_events);
368 int i;
369
370 if (!nelm)
371 return -EINVAL;
372
373 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
374 if (!rsv_events)
375 return -ENOMEM;
376
377 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
378 nelm * 2);
379 if (ret)
380 return ret;
381
382 for (i = 0; i < nelm; i++) {
383 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
384 xbar->dma_inuse);
385 }
386 kfree(rsv_events);
387 }
388
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300389 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300390 iomem = devm_ioremap_resource(&pdev->dev, res);
Axel Lin28eb2322015-07-10 22:13:19 +0800391 if (IS_ERR(iomem))
392 return PTR_ERR(iomem);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300393
394 xbar->iomem = iomem;
395
396 xbar->dmarouter.dev = &pdev->dev;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300397 xbar->dmarouter.route_free = ti_dra7_xbar_free;
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300398 xbar->dma_offset = (u32)match->data;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300399
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200400 mutex_init(&xbar->mutex);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300401 platform_set_drvdata(pdev, xbar);
402
403 /* Reset the crossbar */
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200404 for (i = 0; i < xbar->dma_requests; i++) {
405 if (!test_bit(i, xbar->dma_inuse))
406 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
407 }
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300408
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300409 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300410 &xbar->dmarouter);
411 if (ret) {
412 /* Restore the defaults for the crossbar */
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200413 for (i = 0; i < xbar->dma_requests; i++) {
414 if (!test_bit(i, xbar->dma_inuse))
415 ti_dra7_xbar_write(xbar->iomem, i, i);
416 }
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300417 }
418
419 return ret;
420}
421
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300422static int ti_dma_xbar_probe(struct platform_device *pdev)
423{
424 const struct of_device_id *match;
425 int ret;
426
427 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
428 if (unlikely(!match))
429 return -EINVAL;
430
431 switch ((u32)match->data) {
432 case TI_XBAR_DRA7:
433 ret = ti_dra7_xbar_probe(pdev);
434 break;
435 case TI_XBAR_AM335X:
436 ret = ti_am335x_xbar_probe(pdev);
437 break;
438 default:
439 dev_err(&pdev->dev, "Unsupported crossbar\n");
440 ret = -ENODEV;
441 break;
442 }
443
444 return ret;
445}
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300446
447static struct platform_driver ti_dma_xbar_driver = {
448 .driver = {
449 .name = "ti-dma-crossbar",
450 .of_match_table = of_match_ptr(ti_dma_xbar_match),
451 },
452 .probe = ti_dma_xbar_probe,
453};
454
Ben Dooksd6461622016-06-21 18:12:39 +0100455static int omap_dmaxbar_init(void)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300456{
457 return platform_driver_register(&ti_dma_xbar_driver);
458}
459arch_initcall(omap_dmaxbar_init);