blob: 0a49011633b967c05598cd89847993d603ef1b45 [file] [log] [blame]
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03001/*
2 * Platform driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
7 *
8 * Some parts of this driver are derived from the original dw_dmac.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/clk.h>
Andy Shevchenko6acf3992015-01-13 18:57:15 +020018#include <linux/pm_runtime.h>
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030019#include <linux/platform_device.h>
20#include <linux/dmaengine.h>
21#include <linux/dma-mapping.h>
22#include <linux/of.h>
23#include <linux/of_dma.h>
24#include <linux/acpi.h>
25#include <linux/acpi_dma.h>
26
27#include "internal.h"
28
Andy Shevchenkoa104a452015-03-09 12:16:42 +020029#define DRV_NAME "dw_dmac"
30
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030031static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
32 struct of_dma *ofdma)
33{
34 struct dw_dma *dw = ofdma->of_dma_data;
Andy Shevchenko4d130de2014-08-19 20:29:16 +030035 struct dw_dma_slave slave = {
36 .dma_dev = dw->dma.dev,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030037 };
38 dma_cap_mask_t cap;
39
40 if (dma_spec->args_count != 3)
41 return NULL;
42
Andy Shevchenko4d130de2014-08-19 20:29:16 +030043 slave.src_id = dma_spec->args[0];
44 slave.dst_id = dma_spec->args[0];
Andy Shevchenkoc4220252016-03-18 16:24:41 +020045 slave.m_master = dma_spec->args[1];
46 slave.p_master = dma_spec->args[2];
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030047
Andy Shevchenko4d130de2014-08-19 20:29:16 +030048 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
49 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
Andy Shevchenko161c3d02016-04-27 14:15:39 +030050 slave.m_master >= dw->pdata->nr_masters ||
51 slave.p_master >= dw->pdata->nr_masters))
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030052 return NULL;
53
54 dma_cap_zero(cap);
55 dma_cap_set(DMA_SLAVE, cap);
56
57 /* TODO: there should be a simpler way to do this */
Andy Shevchenko4d130de2014-08-19 20:29:16 +030058 return dma_request_channel(cap, dw_dma_filter, &slave);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030059}
60
61#ifdef CONFIG_ACPI
62static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
63{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030064 struct acpi_dma_spec *dma_spec = param;
Andy Shevchenko4d130de2014-08-19 20:29:16 +030065 struct dw_dma_slave slave = {
66 .dma_dev = dma_spec->dev,
67 .src_id = dma_spec->slave_id,
68 .dst_id = dma_spec->slave_id,
Andy Shevchenkoc4220252016-03-18 16:24:41 +020069 .m_master = 0,
70 .p_master = 1,
Andy Shevchenko4d130de2014-08-19 20:29:16 +030071 };
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030072
Andy Shevchenko4d130de2014-08-19 20:29:16 +030073 return dw_dma_filter(chan, &slave);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030074}
75
76static void dw_dma_acpi_controller_register(struct dw_dma *dw)
77{
78 struct device *dev = dw->dma.dev;
79 struct acpi_dma_filter_info *info;
80 int ret;
81
82 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
83 if (!info)
84 return;
85
86 dma_cap_zero(info->dma_cap);
87 dma_cap_set(DMA_SLAVE, info->dma_cap);
88 info->filter_fn = dw_dma_acpi_filter;
89
90 ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
91 info);
92 if (ret)
93 dev_err(dev, "could not register acpi_dma_controller\n");
94}
95#else /* !CONFIG_ACPI */
96static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
97#endif /* !CONFIG_ACPI */
98
99#ifdef CONFIG_OF
100static struct dw_dma_platform_data *
101dw_dma_parse_dt(struct platform_device *pdev)
102{
103 struct device_node *np = pdev->dev.of_node;
104 struct dw_dma_platform_data *pdata;
Andy Shevchenkod8ded502015-01-13 19:08:14 +0200105 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
Andy Shevchenko969f7502016-04-27 14:15:37 +0300106 u32 nr_masters;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000107 u32 nr_channels;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300108
109 if (!np) {
110 dev_err(&pdev->dev, "Missing DT data\n");
111 return NULL;
112 }
113
Andy Shevchenko969f7502016-04-27 14:15:37 +0300114 if (of_property_read_u32(np, "dma-masters", &nr_masters))
115 return NULL;
116 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
117 return NULL;
118
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000119 if (of_property_read_u32(np, "dma-channels", &nr_channels))
120 return NULL;
121
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300122 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
123 if (!pdata)
124 return NULL;
125
Andy Shevchenko969f7502016-04-27 14:15:37 +0300126 pdata->nr_masters = nr_masters;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000127 pdata->nr_channels = nr_channels;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300128
129 if (of_property_read_bool(np, "is_private"))
130 pdata->is_private = true;
131
132 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
133 pdata->chan_allocation_order = (unsigned char)tmp;
134
135 if (!of_property_read_u32(np, "chan_priority", &tmp))
136 pdata->chan_priority = tmp;
137
138 if (!of_property_read_u32(np, "block_size", &tmp))
139 pdata->block_size = tmp;
140
Andy Shevchenko2e650602016-04-27 14:15:38 +0300141 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
Andy Shevchenko969f7502016-04-27 14:15:37 +0300142 for (tmp = 0; tmp < nr_masters; tmp++)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300143 pdata->data_width[tmp] = arr[tmp];
Andy Shevchenko2e650602016-04-27 14:15:38 +0300144 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
145 for (tmp = 0; tmp < nr_masters; tmp++)
146 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
Andy Shevchenko969f7502016-04-27 14:15:37 +0300147 }
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300148
149 return pdata;
150}
151#else
152static inline struct dw_dma_platform_data *
153dw_dma_parse_dt(struct platform_device *pdev)
154{
155 return NULL;
156}
157#endif
158
159static int dw_probe(struct platform_device *pdev)
160{
161 struct dw_dma_chip *chip;
162 struct device *dev = &pdev->dev;
163 struct resource *mem;
164 struct dw_dma_platform_data *pdata;
165 int err;
166
167 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
168 if (!chip)
169 return -ENOMEM;
170
171 chip->irq = platform_get_irq(pdev, 0);
172 if (chip->irq < 0)
173 return chip->irq;
174
175 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 chip->regs = devm_ioremap_resource(dev, mem);
177 if (IS_ERR(chip->regs))
178 return PTR_ERR(chip->regs);
179
Russell King24353b82013-06-27 13:37:21 +0100180 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
181 if (err)
182 return err;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300183
184 pdata = dev_get_platdata(dev);
185 if (!pdata)
186 pdata = dw_dma_parse_dt(pdev);
187
188 chip->dev = dev;
189
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300190 chip->clk = devm_clk_get(chip->dev, "hclk");
191 if (IS_ERR(chip->clk))
192 return PTR_ERR(chip->clk);
193 err = clk_prepare_enable(chip->clk);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300194 if (err)
195 return err;
196
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200197 pm_runtime_enable(&pdev->dev);
198
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300199 err = dw_dma_probe(chip, pdata);
200 if (err)
201 goto err_dw_dma_probe;
202
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300203 platform_set_drvdata(pdev, chip);
204
205 if (pdev->dev.of_node) {
206 err = of_dma_controller_register(pdev->dev.of_node,
207 dw_dma_of_xlate, chip->dw);
208 if (err)
209 dev_err(&pdev->dev,
210 "could not register of_dma_controller\n");
211 }
212
213 if (ACPI_HANDLE(&pdev->dev))
214 dw_dma_acpi_controller_register(chip->dw);
215
216 return 0;
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300217
218err_dw_dma_probe:
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200219 pm_runtime_disable(&pdev->dev);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300220 clk_disable_unprepare(chip->clk);
221 return err;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300222}
223
224static int dw_remove(struct platform_device *pdev)
225{
226 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
227
228 if (pdev->dev.of_node)
229 of_dma_controller_free(pdev->dev.of_node);
230
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300231 dw_dma_remove(chip);
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200232 pm_runtime_disable(&pdev->dev);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300233 clk_disable_unprepare(chip->clk);
234
235 return 0;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300236}
237
238static void dw_shutdown(struct platform_device *pdev)
239{
240 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
241
Andy Shevchenko32146582015-12-04 23:49:23 +0200242 /*
243 * We have to call dw_dma_disable() to stop any ongoing transfer. On
244 * some platforms we can't do that since DMA device is powered off.
245 * Moreover we have no possibility to check if the platform is affected
246 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
247 * unconditionally. On the other hand we can't use
248 * pm_runtime_suspended() because runtime PM framework is not fully
249 * used by the driver.
250 */
251 pm_runtime_get_sync(chip->dev);
Andy Shevchenko2540f742014-09-23 17:18:13 +0300252 dw_dma_disable(chip);
Andy Shevchenko32146582015-12-04 23:49:23 +0200253 pm_runtime_put_sync_suspend(chip->dev);
254
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300255 clk_disable_unprepare(chip->clk);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300256}
257
258#ifdef CONFIG_OF
259static const struct of_device_id dw_dma_of_id_table[] = {
260 { .compatible = "snps,dma-spear1340" },
261 {}
262};
263MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
264#endif
265
266#ifdef CONFIG_ACPI
267static const struct acpi_device_id dw_dma_acpi_id_table[] = {
Andy Shevchenkobc0bb1f2015-12-04 23:49:25 +0200268 { "INTL9C60", 0 },
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300269 { }
270};
Andy Shevchenkobe480dc2013-07-15 15:04:37 +0300271MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300272#endif
273
274#ifdef CONFIG_PM_SLEEP
275
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300276static int dw_suspend_late(struct device *dev)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300277{
278 struct platform_device *pdev = to_platform_device(dev);
279 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
280
Andy Shevchenko2540f742014-09-23 17:18:13 +0300281 dw_dma_disable(chip);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300282 clk_disable_unprepare(chip->clk);
283
284 return 0;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300285}
286
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300287static int dw_resume_early(struct device *dev)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300288{
289 struct platform_device *pdev = to_platform_device(dev);
290 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
291
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300292 clk_prepare_enable(chip->clk);
Andy Shevchenko2540f742014-09-23 17:18:13 +0300293 return dw_dma_enable(chip);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300294}
295
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300296#endif /* CONFIG_PM_SLEEP */
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300297
298static const struct dev_pm_ops dw_dev_pm_ops = {
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300299 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300300};
301
302static struct platform_driver dw_driver = {
303 .probe = dw_probe,
304 .remove = dw_remove,
Andy Shevchenko2540f742014-09-23 17:18:13 +0300305 .shutdown = dw_shutdown,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300306 .driver = {
Andy Shevchenkoa104a452015-03-09 12:16:42 +0200307 .name = DRV_NAME,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300308 .pm = &dw_dev_pm_ops,
309 .of_match_table = of_match_ptr(dw_dma_of_id_table),
310 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
311 },
312};
313
314static int __init dw_init(void)
315{
316 return platform_driver_register(&dw_driver);
317}
318subsys_initcall(dw_init);
319
320static void __exit dw_exit(void)
321{
322 platform_driver_unregister(&dw_driver);
323}
324module_exit(dw_exit);
325
326MODULE_LICENSE("GPL v2");
327MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
Andy Shevchenkoa104a452015-03-09 12:16:42 +0200328MODULE_ALIAS("platform:" DRV_NAME);