blob: a7e1f6e17e3d177f85d350ba5a78befe5d220980 [file] [log] [blame]
Peter Ujfalusia074ae32015-04-09 12:35:49 +03001/*
2 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
3 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10#include <linux/slab.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/list.h>
14#include <linux/io.h>
Peter Ujfalusia074ae32015-04-09 12:35:49 +030015#include <linux/of_address.h>
16#include <linux/of_device.h>
17#include <linux/of_dma.h>
18
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030019#define TI_XBAR_DRA7 0
20#define TI_XBAR_AM335X 1
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +030021static const u32 ti_xbar_type[] = {
22 [TI_XBAR_DRA7] = TI_XBAR_DRA7,
23 [TI_XBAR_AM335X] = TI_XBAR_AM335X,
24};
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030025
26static const struct of_device_id ti_dma_xbar_match[] = {
27 {
28 .compatible = "ti,dra7-dma-crossbar",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +030029 .data = &ti_xbar_type[TI_XBAR_DRA7],
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030030 },
31 {
32 .compatible = "ti,am335x-edma-crossbar",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +030033 .data = &ti_xbar_type[TI_XBAR_AM335X],
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030034 },
35 {},
36};
37
38/* Crossbar on AM335x/AM437x family */
39#define TI_AM335X_XBAR_LINES 64
40
41struct ti_am335x_xbar_data {
42 void __iomem *iomem;
43
44 struct dma_router dmarouter;
45
46 u32 xbar_events; /* maximum number of events to select in xbar */
47 u32 dma_requests; /* number of DMA requests on eDMA */
48};
49
50struct ti_am335x_xbar_map {
51 u16 dma_line;
Peter Ujfalusi5a7192b2017-11-08 12:02:25 +020052 u8 mux_val;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030053};
54
Peter Ujfalusi5a7192b2017-11-08 12:02:25 +020055static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030056{
Vignesh R559205f2017-12-19 12:51:16 +020057 /*
58 * TPCC_EVT_MUX_60_63 register layout is different than the
59 * rest, in the sense, that event 63 is mapped to lowest byte
60 * and event 60 is mapped to highest, handle it separately.
61 */
62 if (event >= 60 && event <= 63)
63 writeb_relaxed(val, iomem + (63 - event % 4));
64 else
65 writeb_relaxed(val, iomem + event);
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +030066}
67
68static void ti_am335x_xbar_free(struct device *dev, void *route_data)
69{
70 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
71 struct ti_am335x_xbar_map *map = route_data;
72
73 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
74 map->mux_val, map->dma_line);
75
76 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
77 kfree(map);
78}
79
80static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
81 struct of_dma *ofdma)
82{
83 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
84 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
85 struct ti_am335x_xbar_map *map;
86
87 if (dma_spec->args_count != 3)
88 return ERR_PTR(-EINVAL);
89
90 if (dma_spec->args[2] >= xbar->xbar_events) {
91 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
92 dma_spec->args[2]);
93 return ERR_PTR(-EINVAL);
94 }
95
96 if (dma_spec->args[0] >= xbar->dma_requests) {
97 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
98 dma_spec->args[0]);
99 return ERR_PTR(-EINVAL);
100 }
101
102 /* The of_node_put() will be done in the core for the node */
103 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
104 if (!dma_spec->np) {
105 dev_err(&pdev->dev, "Can't get DMA master\n");
106 return ERR_PTR(-EINVAL);
107 }
108
109 map = kzalloc(sizeof(*map), GFP_KERNEL);
110 if (!map) {
111 of_node_put(dma_spec->np);
112 return ERR_PTR(-ENOMEM);
113 }
114
115 map->dma_line = (u16)dma_spec->args[0];
Peter Ujfalusi5a7192b2017-11-08 12:02:25 +0200116 map->mux_val = (u8)dma_spec->args[2];
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300117
118 dma_spec->args[2] = 0;
119 dma_spec->args_count = 2;
120
121 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
122 map->mux_val, map->dma_line);
123
124 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
125
126 return map;
127}
128
129static const struct of_device_id ti_am335x_master_match[] = {
130 { .compatible = "ti,edma3-tpcc", },
131 {},
132};
133
134static int ti_am335x_xbar_probe(struct platform_device *pdev)
135{
136 struct device_node *node = pdev->dev.of_node;
137 const struct of_device_id *match;
138 struct device_node *dma_node;
139 struct ti_am335x_xbar_data *xbar;
140 struct resource *res;
141 void __iomem *iomem;
142 int i, ret;
143
144 if (!node)
145 return -ENODEV;
146
147 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
148 if (!xbar)
149 return -ENOMEM;
150
151 dma_node = of_parse_phandle(node, "dma-masters", 0);
152 if (!dma_node) {
153 dev_err(&pdev->dev, "Can't get DMA master node\n");
154 return -ENODEV;
155 }
156
157 match = of_match_node(ti_am335x_master_match, dma_node);
158 if (!match) {
159 dev_err(&pdev->dev, "DMA master is not supported\n");
Christophe JAILLET29e0adf2016-12-19 06:33:51 +0100160 of_node_put(dma_node);
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300161 return -EINVAL;
162 }
163
164 if (of_property_read_u32(dma_node, "dma-requests",
165 &xbar->dma_requests)) {
166 dev_info(&pdev->dev,
167 "Missing XBAR output information, using %u.\n",
168 TI_AM335X_XBAR_LINES);
169 xbar->dma_requests = TI_AM335X_XBAR_LINES;
170 }
171 of_node_put(dma_node);
172
173 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
174 dev_info(&pdev->dev,
175 "Missing XBAR input information, using %u.\n",
176 TI_AM335X_XBAR_LINES);
177 xbar->xbar_events = TI_AM335X_XBAR_LINES;
178 }
179
180 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 iomem = devm_ioremap_resource(&pdev->dev, res);
182 if (IS_ERR(iomem))
183 return PTR_ERR(iomem);
184
185 xbar->iomem = iomem;
186
187 xbar->dmarouter.dev = &pdev->dev;
188 xbar->dmarouter.route_free = ti_am335x_xbar_free;
189
190 platform_set_drvdata(pdev, xbar);
191
192 /* Reset the crossbar */
193 for (i = 0; i < xbar->dma_requests; i++)
194 ti_am335x_xbar_write(xbar->iomem, i, 0);
195
196 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
197 &xbar->dmarouter);
198
199 return ret;
200}
201
202/* Crossbar on DRA7xx family */
203#define TI_DRA7_XBAR_OUTPUTS 127
204#define TI_DRA7_XBAR_INPUTS 256
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300205
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300206struct ti_dra7_xbar_data {
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300207 void __iomem *iomem;
208
209 struct dma_router dmarouter;
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200210 struct mutex mutex;
211 unsigned long *dma_inuse;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300212
213 u16 safe_val; /* Value to rest the crossbar lines */
214 u32 xbar_requests; /* number of DMA requests connected to XBAR */
215 u32 dma_requests; /* number of DMA requests forwarded to DMA */
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300216 u32 dma_offset;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300217};
218
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300219struct ti_dra7_xbar_map {
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300220 u16 xbar_in;
221 int xbar_out;
222};
223
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300224static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300225{
226 writew_relaxed(val, iomem + (xbar * 2));
227}
228
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300229static void ti_dra7_xbar_free(struct device *dev, void *route_data)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300230{
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300231 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
232 struct ti_dra7_xbar_map *map = route_data;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300233
234 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
235 map->xbar_in, map->xbar_out);
236
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300237 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200238 mutex_lock(&xbar->mutex);
239 clear_bit(map->xbar_out, xbar->dma_inuse);
240 mutex_unlock(&xbar->mutex);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300241 kfree(map);
242}
243
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300244static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
245 struct of_dma *ofdma)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300246{
247 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300248 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
249 struct ti_dra7_xbar_map *map;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300250
251 if (dma_spec->args[0] >= xbar->xbar_requests) {
252 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
253 dma_spec->args[0]);
254 return ERR_PTR(-EINVAL);
255 }
256
257 /* The of_node_put() will be done in the core for the node */
258 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
259 if (!dma_spec->np) {
260 dev_err(&pdev->dev, "Can't get DMA master\n");
261 return ERR_PTR(-EINVAL);
262 }
263
264 map = kzalloc(sizeof(*map), GFP_KERNEL);
265 if (!map) {
266 of_node_put(dma_spec->np);
267 return ERR_PTR(-ENOMEM);
268 }
269
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200270 mutex_lock(&xbar->mutex);
271 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
272 xbar->dma_requests);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200273 if (map->xbar_out == xbar->dma_requests) {
Peter Ujfalusi9d9c2882017-09-21 14:35:32 +0300274 mutex_unlock(&xbar->mutex);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200275 dev_err(&pdev->dev, "Run out of free DMA requests\n");
276 kfree(map);
277 return ERR_PTR(-ENOMEM);
278 }
279 set_bit(map->xbar_out, xbar->dma_inuse);
Peter Ujfalusi9d9c2882017-09-21 14:35:32 +0300280 mutex_unlock(&xbar->mutex);
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200281
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300282 map->xbar_in = (u16)dma_spec->args[0];
283
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300284 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300285
286 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
287 map->xbar_in, map->xbar_out);
288
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300289 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300290
291 return map;
292}
293
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300294#define TI_XBAR_EDMA_OFFSET 0
295#define TI_XBAR_SDMA_OFFSET 1
296static const u32 ti_dma_offset[] = {
297 [TI_XBAR_EDMA_OFFSET] = 0,
298 [TI_XBAR_SDMA_OFFSET] = 1,
299};
300
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300301static const struct of_device_id ti_dra7_master_match[] = {
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300302 {
303 .compatible = "ti,omap4430-sdma",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300304 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300305 },
306 {
307 .compatible = "ti,edma3",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300308 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300309 },
Peter Ujfalusi2adb2742015-10-30 10:00:38 +0200310 {
311 .compatible = "ti,edma3-tpcc",
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300312 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
Peter Ujfalusi2adb2742015-10-30 10:00:38 +0200313 },
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300314 {},
315};
316
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200317static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
318{
319 for (; len > 0; len--)
320 clear_bit(offset + (len - 1), p);
321}
322
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300323static int ti_dra7_xbar_probe(struct platform_device *pdev)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300324{
325 struct device_node *node = pdev->dev.of_node;
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300326 const struct of_device_id *match;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300327 struct device_node *dma_node;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300328 struct ti_dra7_xbar_data *xbar;
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200329 struct property *prop;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300330 struct resource *res;
331 u32 safe_val;
Peter Ujfalusie7282b62016-09-21 15:41:31 +0300332 int sz;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300333 void __iomem *iomem;
334 int i, ret;
335
336 if (!node)
337 return -ENODEV;
338
339 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
340 if (!xbar)
341 return -ENOMEM;
342
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300343 dma_node = of_parse_phandle(node, "dma-masters", 0);
344 if (!dma_node) {
345 dev_err(&pdev->dev, "Can't get DMA master node\n");
346 return -ENODEV;
347 }
348
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300349 match = of_match_node(ti_dra7_master_match, dma_node);
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300350 if (!match) {
351 dev_err(&pdev->dev, "DMA master is not supported\n");
Christophe JAILLET29e0adf2016-12-19 06:33:51 +0100352 of_node_put(dma_node);
Peter Ujfalusi1eb995b2015-07-22 11:48:10 +0300353 return -EINVAL;
354 }
355
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300356 if (of_property_read_u32(dma_node, "dma-requests",
357 &xbar->dma_requests)) {
358 dev_info(&pdev->dev,
359 "Missing XBAR output information, using %u.\n",
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300360 TI_DRA7_XBAR_OUTPUTS);
361 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300362 }
363 of_node_put(dma_node);
364
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200365 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
366 BITS_TO_LONGS(xbar->dma_requests),
367 sizeof(unsigned long), GFP_KERNEL);
368 if (!xbar->dma_inuse)
369 return -ENOMEM;
370
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300371 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
372 dev_info(&pdev->dev,
373 "Missing XBAR input information, using %u.\n",
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300374 TI_DRA7_XBAR_INPUTS);
375 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300376 }
377
378 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
379 xbar->safe_val = (u16)safe_val;
380
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200381
382 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
383 if (prop) {
384 const char pname[] = "ti,reserved-dma-request-ranges";
385 u32 (*rsv_events)[2];
386 size_t nelm = sz / sizeof(*rsv_events);
387 int i;
388
389 if (!nelm)
390 return -EINVAL;
391
392 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
393 if (!rsv_events)
394 return -ENOMEM;
395
396 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
397 nelm * 2);
Wenwen Wang5a835f52019-08-16 01:48:55 -0500398 if (ret) {
399 kfree(rsv_events);
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200400 return ret;
Wenwen Wang5a835f52019-08-16 01:48:55 -0500401 }
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200402
403 for (i = 0; i < nelm; i++) {
404 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
405 xbar->dma_inuse);
406 }
407 kfree(rsv_events);
408 }
409
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300410 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300411 iomem = devm_ioremap_resource(&pdev->dev, res);
Axel Lin28eb2322015-07-10 22:13:19 +0800412 if (IS_ERR(iomem))
413 return PTR_ERR(iomem);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300414
415 xbar->iomem = iomem;
416
417 xbar->dmarouter.dev = &pdev->dev;
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300418 xbar->dmarouter.route_free = ti_dra7_xbar_free;
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300419 xbar->dma_offset = *(u32 *)match->data;
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300420
Peter Ujfalusiec9bfa12015-10-30 10:00:36 +0200421 mutex_init(&xbar->mutex);
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300422 platform_set_drvdata(pdev, xbar);
423
424 /* Reset the crossbar */
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200425 for (i = 0; i < xbar->dma_requests; i++) {
426 if (!test_bit(i, xbar->dma_inuse))
427 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
428 }
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300429
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300430 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300431 &xbar->dmarouter);
432 if (ret) {
433 /* Restore the defaults for the crossbar */
Peter Ujfalusi0f73f3e2015-10-30 10:00:37 +0200434 for (i = 0; i < xbar->dma_requests; i++) {
435 if (!test_bit(i, xbar->dma_inuse))
436 ti_dra7_xbar_write(xbar->iomem, i, i);
437 }
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300438 }
439
440 return ret;
441}
442
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300443static int ti_dma_xbar_probe(struct platform_device *pdev)
444{
445 const struct of_device_id *match;
446 int ret;
447
448 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
449 if (unlikely(!match))
450 return -EINVAL;
451
Peter Ujfalusi5f9367a2016-09-21 15:41:32 +0300452 switch (*(u32 *)match->data) {
Peter Ujfalusi42dbdcc2015-10-16 10:18:08 +0300453 case TI_XBAR_DRA7:
454 ret = ti_dra7_xbar_probe(pdev);
455 break;
456 case TI_XBAR_AM335X:
457 ret = ti_am335x_xbar_probe(pdev);
458 break;
459 default:
460 dev_err(&pdev->dev, "Unsupported crossbar\n");
461 ret = -ENODEV;
462 break;
463 }
464
465 return ret;
466}
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300467
468static struct platform_driver ti_dma_xbar_driver = {
469 .driver = {
470 .name = "ti-dma-crossbar",
471 .of_match_table = of_match_ptr(ti_dma_xbar_match),
472 },
473 .probe = ti_dma_xbar_probe,
474};
475
Ben Dooksd6461622016-06-21 18:12:39 +0100476static int omap_dmaxbar_init(void)
Peter Ujfalusia074ae32015-04-09 12:35:49 +0300477{
478 return platform_driver_register(&ti_dma_xbar_driver);
479}
480arch_initcall(omap_dmaxbar_init);