blob: 7536fe80bc33f6df861ff74a966fba221321e3b0 [file] [log] [blame]
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001/*
2 * Platform driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
7 *
8 * Some parts of this driver are derived from the original dw_dmac.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/clk.h>
Andy Shevchenko6acf3992015-01-13 18:57:15 +020018#include <linux/pm_runtime.h>
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030019#include <linux/platform_device.h>
20#include <linux/dmaengine.h>
21#include <linux/dma-mapping.h>
22#include <linux/of.h>
23#include <linux/of_dma.h>
24#include <linux/acpi.h>
25#include <linux/acpi_dma.h>
26
27#include "internal.h"
28
Andy Shevchenkoa104a452015-03-09 12:16:42 +020029#define DRV_NAME "dw_dmac"
30
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030031static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
32 struct of_dma *ofdma)
33{
34 struct dw_dma *dw = ofdma->of_dma_data;
Andy Shevchenko4d130de2014-08-19 20:29:16 +030035 struct dw_dma_slave slave = {
36 .dma_dev = dw->dma.dev,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030037 };
38 dma_cap_mask_t cap;
39
40 if (dma_spec->args_count != 3)
41 return NULL;
42
Andy Shevchenko4d130de2014-08-19 20:29:16 +030043 slave.src_id = dma_spec->args[0];
44 slave.dst_id = dma_spec->args[0];
Andy Shevchenkoc4220252016-03-18 16:24:41 +020045 slave.m_master = dma_spec->args[1];
46 slave.p_master = dma_spec->args[2];
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030047
Andy Shevchenko4d130de2014-08-19 20:29:16 +030048 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
49 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
Andy Shevchenko161c3d02016-04-27 14:15:39 +030050 slave.m_master >= dw->pdata->nr_masters ||
51 slave.p_master >= dw->pdata->nr_masters))
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030052 return NULL;
53
54 dma_cap_zero(cap);
55 dma_cap_set(DMA_SLAVE, cap);
56
57 /* TODO: there should be a simpler way to do this */
Andy Shevchenko4d130de2014-08-19 20:29:16 +030058 return dma_request_channel(cap, dw_dma_filter, &slave);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030059}
60
61#ifdef CONFIG_ACPI
62static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
63{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030064 struct acpi_dma_spec *dma_spec = param;
Andy Shevchenko4d130de2014-08-19 20:29:16 +030065 struct dw_dma_slave slave = {
66 .dma_dev = dma_spec->dev,
67 .src_id = dma_spec->slave_id,
68 .dst_id = dma_spec->slave_id,
Andy Shevchenkoc4220252016-03-18 16:24:41 +020069 .m_master = 0,
70 .p_master = 1,
Andy Shevchenko4d130de2014-08-19 20:29:16 +030071 };
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030072
Andy Shevchenko4d130de2014-08-19 20:29:16 +030073 return dw_dma_filter(chan, &slave);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030074}
75
76static void dw_dma_acpi_controller_register(struct dw_dma *dw)
77{
78 struct device *dev = dw->dma.dev;
79 struct acpi_dma_filter_info *info;
80 int ret;
81
82 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
83 if (!info)
84 return;
85
86 dma_cap_zero(info->dma_cap);
87 dma_cap_set(DMA_SLAVE, info->dma_cap);
88 info->filter_fn = dw_dma_acpi_filter;
89
Andy Shevchenkoa95b5e82019-08-20 16:15:43 +030090 ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030091 if (ret)
92 dev_err(dev, "could not register acpi_dma_controller\n");
93}
Andy Shevchenkoa95b5e82019-08-20 16:15:43 +030094
95static void dw_dma_acpi_controller_free(struct dw_dma *dw)
96{
97 struct device *dev = dw->dma.dev;
98
99 acpi_dma_controller_free(dev);
100}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300101#else /* !CONFIG_ACPI */
102static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
Andy Shevchenkoa95b5e82019-08-20 16:15:43 +0300103static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300104#endif /* !CONFIG_ACPI */
105
106#ifdef CONFIG_OF
107static struct dw_dma_platform_data *
108dw_dma_parse_dt(struct platform_device *pdev)
109{
110 struct device_node *np = pdev->dev.of_node;
111 struct dw_dma_platform_data *pdata;
Andy Shevchenkod8ded502015-01-13 19:08:14 +0200112 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
Andy Shevchenko969f7502016-04-27 14:15:37 +0300113 u32 nr_masters;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000114 u32 nr_channels;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300115
116 if (!np) {
117 dev_err(&pdev->dev, "Missing DT data\n");
118 return NULL;
119 }
120
Andy Shevchenko969f7502016-04-27 14:15:37 +0300121 if (of_property_read_u32(np, "dma-masters", &nr_masters))
122 return NULL;
123 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
124 return NULL;
125
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000126 if (of_property_read_u32(np, "dma-channels", &nr_channels))
127 return NULL;
128
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300129 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
130 if (!pdata)
131 return NULL;
132
Andy Shevchenko969f7502016-04-27 14:15:37 +0300133 pdata->nr_masters = nr_masters;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000134 pdata->nr_channels = nr_channels;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300135
136 if (of_property_read_bool(np, "is_private"))
137 pdata->is_private = true;
138
139 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
140 pdata->chan_allocation_order = (unsigned char)tmp;
141
142 if (!of_property_read_u32(np, "chan_priority", &tmp))
143 pdata->chan_priority = tmp;
144
145 if (!of_property_read_u32(np, "block_size", &tmp))
146 pdata->block_size = tmp;
147
Andy Shevchenko2e650602016-04-27 14:15:38 +0300148 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
Andy Shevchenko969f7502016-04-27 14:15:37 +0300149 for (tmp = 0; tmp < nr_masters; tmp++)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300150 pdata->data_width[tmp] = arr[tmp];
Andy Shevchenko2e650602016-04-27 14:15:38 +0300151 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
152 for (tmp = 0; tmp < nr_masters; tmp++)
153 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
Andy Shevchenko969f7502016-04-27 14:15:37 +0300154 }
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300155
156 return pdata;
157}
158#else
159static inline struct dw_dma_platform_data *
160dw_dma_parse_dt(struct platform_device *pdev)
161{
162 return NULL;
163}
164#endif
165
166static int dw_probe(struct platform_device *pdev)
167{
168 struct dw_dma_chip *chip;
169 struct device *dev = &pdev->dev;
170 struct resource *mem;
Andy Shevchenko3a14c662016-04-27 14:15:40 +0300171 const struct dw_dma_platform_data *pdata;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300172 int err;
173
174 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
175 if (!chip)
176 return -ENOMEM;
177
178 chip->irq = platform_get_irq(pdev, 0);
179 if (chip->irq < 0)
180 return chip->irq;
181
182 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183 chip->regs = devm_ioremap_resource(dev, mem);
184 if (IS_ERR(chip->regs))
185 return PTR_ERR(chip->regs);
186
Russell King24353b82013-06-27 13:37:21 +0100187 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
188 if (err)
189 return err;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300190
191 pdata = dev_get_platdata(dev);
192 if (!pdata)
193 pdata = dw_dma_parse_dt(pdev);
194
195 chip->dev = dev;
Andy Shevchenko3a14c662016-04-27 14:15:40 +0300196 chip->pdata = pdata;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300197
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300198 chip->clk = devm_clk_get(chip->dev, "hclk");
199 if (IS_ERR(chip->clk))
200 return PTR_ERR(chip->clk);
201 err = clk_prepare_enable(chip->clk);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300202 if (err)
203 return err;
204
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200205 pm_runtime_enable(&pdev->dev);
206
Andy Shevchenko3a14c662016-04-27 14:15:40 +0300207 err = dw_dma_probe(chip);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300208 if (err)
209 goto err_dw_dma_probe;
210
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300211 platform_set_drvdata(pdev, chip);
212
213 if (pdev->dev.of_node) {
214 err = of_dma_controller_register(pdev->dev.of_node,
215 dw_dma_of_xlate, chip->dw);
216 if (err)
217 dev_err(&pdev->dev,
218 "could not register of_dma_controller\n");
219 }
220
221 if (ACPI_HANDLE(&pdev->dev))
222 dw_dma_acpi_controller_register(chip->dw);
223
224 return 0;
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300225
226err_dw_dma_probe:
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200227 pm_runtime_disable(&pdev->dev);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300228 clk_disable_unprepare(chip->clk);
229 return err;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300230}
231
232static int dw_remove(struct platform_device *pdev)
233{
234 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
235
Andy Shevchenkoa95b5e82019-08-20 16:15:43 +0300236 if (ACPI_HANDLE(&pdev->dev))
237 dw_dma_acpi_controller_free(chip->dw);
238
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300239 if (pdev->dev.of_node)
240 of_dma_controller_free(pdev->dev.of_node);
241
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300242 dw_dma_remove(chip);
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200243 pm_runtime_disable(&pdev->dev);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300244 clk_disable_unprepare(chip->clk);
245
246 return 0;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300247}
248
249static void dw_shutdown(struct platform_device *pdev)
250{
251 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
252
Andy Shevchenko32146582015-12-04 23:49:23 +0200253 /*
254 * We have to call dw_dma_disable() to stop any ongoing transfer. On
255 * some platforms we can't do that since DMA device is powered off.
256 * Moreover we have no possibility to check if the platform is affected
257 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
258 * unconditionally. On the other hand we can't use
259 * pm_runtime_suspended() because runtime PM framework is not fully
260 * used by the driver.
261 */
262 pm_runtime_get_sync(chip->dev);
Andy Shevchenko2540f742014-09-23 17:18:13 +0300263 dw_dma_disable(chip);
Andy Shevchenko32146582015-12-04 23:49:23 +0200264 pm_runtime_put_sync_suspend(chip->dev);
265
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300266 clk_disable_unprepare(chip->clk);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300267}
268
269#ifdef CONFIG_OF
270static const struct of_device_id dw_dma_of_id_table[] = {
271 { .compatible = "snps,dma-spear1340" },
272 {}
273};
274MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
275#endif
276
277#ifdef CONFIG_ACPI
278static const struct acpi_device_id dw_dma_acpi_id_table[] = {
Andy Shevchenkobc0bb1f2015-12-04 23:49:25 +0200279 { "INTL9C60", 0 },
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300280 { }
281};
Andy Shevchenkobe480dc2013-07-15 15:04:37 +0300282MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300283#endif
284
285#ifdef CONFIG_PM_SLEEP
286
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300287static int dw_suspend_late(struct device *dev)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300288{
289 struct platform_device *pdev = to_platform_device(dev);
290 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
291
Andy Shevchenko2540f742014-09-23 17:18:13 +0300292 dw_dma_disable(chip);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300293 clk_disable_unprepare(chip->clk);
294
295 return 0;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300296}
297
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300298static int dw_resume_early(struct device *dev)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300299{
300 struct platform_device *pdev = to_platform_device(dev);
301 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
302
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300303 clk_prepare_enable(chip->clk);
Andy Shevchenko2540f742014-09-23 17:18:13 +0300304 return dw_dma_enable(chip);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300305}
306
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300307#endif /* CONFIG_PM_SLEEP */
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300308
309static const struct dev_pm_ops dw_dev_pm_ops = {
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300310 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300311};
312
313static struct platform_driver dw_driver = {
314 .probe = dw_probe,
315 .remove = dw_remove,
Andy Shevchenko2540f742014-09-23 17:18:13 +0300316 .shutdown = dw_shutdown,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300317 .driver = {
Andy Shevchenkoa104a452015-03-09 12:16:42 +0200318 .name = DRV_NAME,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300319 .pm = &dw_dev_pm_ops,
320 .of_match_table = of_match_ptr(dw_dma_of_id_table),
321 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
322 },
323};
324
325static int __init dw_init(void)
326{
327 return platform_driver_register(&dw_driver);
328}
329subsys_initcall(dw_init);
330
331static void __exit dw_exit(void)
332{
333 platform_driver_unregister(&dw_driver);
334}
335module_exit(dw_exit);
336
337MODULE_LICENSE("GPL v2");
338MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
Andy Shevchenkoa104a452015-03-09 12:16:42 +0200339MODULE_ALIAS("platform:" DRV_NAME);