blob: b5b7e54ac845b6a4a020abcfd4210fef4da934f9 [file] [log] [blame]
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +09001/*
2 * Renesas SUDMAC support
3 *
4 * Copyright (C) 2013 Renesas Solutions Corp.
5 *
6 * based on drivers/dma/sh/shdma.c:
7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 *
12 * This is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 */
16
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +090017#include <linux/dmaengine.h>
Laurent Pinchartcf5a23b2014-05-13 01:02:13 +020018#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +090021#include <linux/platform_device.h>
Laurent Pinchartcf5a23b2014-05-13 01:02:13 +020022#include <linux/slab.h>
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +090023#include <linux/sudmac.h>
24
25struct sudmac_chan {
26 struct shdma_chan shdma_chan;
27 void __iomem *base;
28 char dev_id[16]; /* unique name per DMAC of channel */
29
30 u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */
31 u32 cfg;
32 u32 dint_end_bit;
33};
34
35struct sudmac_device {
36 struct shdma_dev shdma_dev;
37 struct sudmac_pdata *pdata;
38 void __iomem *chan_reg;
39};
40
41struct sudmac_regs {
42 u32 base_addr;
43 u32 base_byte_count;
44};
45
46struct sudmac_desc {
47 struct sudmac_regs hw;
48 struct shdma_desc shdma_desc;
49};
50
51#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
52#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
53#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
54 struct sudmac_device, shdma_dev.dma_dev)
55
56/* SUDMAC register */
57#define SUDMAC_CH0CFG 0x00
58#define SUDMAC_CH0BA 0x10
59#define SUDMAC_CH0BBC 0x18
60#define SUDMAC_CH0CA 0x20
61#define SUDMAC_CH0CBC 0x28
62#define SUDMAC_CH0DEN 0x30
63#define SUDMAC_DSTSCLR 0x38
64#define SUDMAC_DBUFCTRL 0x3C
65#define SUDMAC_DINTCTRL 0x40
66#define SUDMAC_DINTSTS 0x44
67#define SUDMAC_DINTSTSCLR 0x48
68#define SUDMAC_CH0SHCTRL 0x50
69
70/* Definitions for the sudmac_channel.config */
71#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
72#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
73#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
74
75/* Definitions for the sudmac_channel.dint_end_bit */
76#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
77#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
78
79#define SUDMAC_DRV_NAME "sudmac"
80
81static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
82{
83 iowrite32(data, sc->base + reg);
84}
85
86static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
87{
88 return ioread32(sc->base + reg);
89}
90
91static bool sudmac_is_busy(struct sudmac_chan *sc)
92{
93 u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
94
95 if (den)
96 return true; /* working */
97
98 return false; /* waiting */
99}
100
101static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
102 struct shdma_desc *sdesc)
103{
104 sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
105 sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
106 sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
107}
108
109static void sudmac_start(struct sudmac_chan *sc)
110{
111 u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
112
113 sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
114 sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
115}
116
117static void sudmac_start_xfer(struct shdma_chan *schan,
118 struct shdma_desc *sdesc)
119{
120 struct sudmac_chan *sc = to_chan(schan);
121 struct sudmac_desc *sd = to_desc(sdesc);
122
123 sudmac_set_reg(sc, &sd->hw, sdesc);
124 sudmac_start(sc);
125}
126
127static bool sudmac_channel_busy(struct shdma_chan *schan)
128{
129 struct sudmac_chan *sc = to_chan(schan);
130
131 return sudmac_is_busy(sc);
132}
133
134static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
135{
136}
137
138static const struct sudmac_slave_config *sudmac_find_slave(
139 struct sudmac_chan *sc, int slave_id)
140{
141 struct sudmac_device *sdev = to_sdev(sc);
142 struct sudmac_pdata *pdata = sdev->pdata;
143 const struct sudmac_slave_config *cfg;
144 int i;
145
146 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
147 if (cfg->slave_id == slave_id)
148 return cfg;
149
150 return NULL;
151}
152
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200153static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
154 dma_addr_t slave_addr, bool try)
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900155{
156 struct sudmac_chan *sc = to_chan(schan);
157 const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
158
159 if (!cfg)
160 return -ENODEV;
161
162 return 0;
163}
164
165static inline void sudmac_dma_halt(struct sudmac_chan *sc)
166{
167 u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
168
169 sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
170 sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
171 sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
172}
173
174static int sudmac_desc_setup(struct shdma_chan *schan,
175 struct shdma_desc *sdesc,
176 dma_addr_t src, dma_addr_t dst, size_t *len)
177{
178 struct sudmac_chan *sc = to_chan(schan);
179 struct sudmac_desc *sd = to_desc(sdesc);
180
Laurent Pinchart42e4a122013-12-11 15:29:15 +0100181 dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
182 __func__, &src, &dst, *len);
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900183
184 if (*len > schan->max_xfer_len)
185 *len = schan->max_xfer_len;
186
187 if (dst)
188 sd->hw.base_addr = dst;
189 else if (src)
190 sd->hw.base_addr = src;
191 sd->hw.base_byte_count = *len;
192
193 return 0;
194}
195
196static void sudmac_halt(struct shdma_chan *schan)
197{
198 struct sudmac_chan *sc = to_chan(schan);
199
200 sudmac_dma_halt(sc);
201}
202
203static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
204{
205 struct sudmac_chan *sc = to_chan(schan);
206 u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
207
208 if (!(dintsts & sc->dint_end_bit))
209 return false;
210
211 /* DMA stop */
212 sudmac_dma_halt(sc);
213
214 return true;
215}
216
217static size_t sudmac_get_partial(struct shdma_chan *schan,
218 struct shdma_desc *sdesc)
219{
220 struct sudmac_chan *sc = to_chan(schan);
221 struct sudmac_desc *sd = to_desc(sdesc);
222 u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
223
224 return sd->hw.base_byte_count - current_byte_count;
225}
226
227static bool sudmac_desc_completed(struct shdma_chan *schan,
228 struct shdma_desc *sdesc)
229{
230 struct sudmac_chan *sc = to_chan(schan);
231 struct sudmac_desc *sd = to_desc(sdesc);
232 u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
233
234 return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
235}
236
237static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
238 unsigned long flags)
239{
240 struct shdma_dev *sdev = &su_dev->shdma_dev;
241 struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
242 struct sudmac_chan *sc;
243 struct shdma_chan *schan;
244 int err;
245
246 sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
247 if (!sc) {
248 dev_err(sdev->dma_dev.dev,
249 "No free memory for allocating dma channels!\n");
250 return -ENOMEM;
251 }
252
253 schan = &sc->shdma_chan;
254 schan->max_xfer_len = 64 * 1024 * 1024 - 1;
255
256 shdma_chan_probe(sdev, schan, id);
257
258 sc->base = su_dev->chan_reg;
259
260 /* get platform_data */
261 sc->offset = su_dev->pdata->channel->offset;
262 if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
263 sc->cfg |= SUDMAC_SENDBUFM;
264 if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
265 sc->cfg |= SUDMAC_RCVENDM;
266 sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
267
268 if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
269 sc->dint_end_bit |= SUDMAC_CH0ENDE;
270 if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
271 sc->dint_end_bit |= SUDMAC_CH1ENDE;
272
273 /* set up channel irq */
274 if (pdev->id >= 0)
275 snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
276 pdev->id, id);
277 else
278 snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
279
280 err = shdma_request_irq(schan, irq, flags, sc->dev_id);
281 if (err) {
282 dev_err(sdev->dma_dev.dev,
283 "DMA channel %d request_irq failed %d\n", id, err);
284 goto err_no_irq;
285 }
286
287 return 0;
288
289err_no_irq:
290 /* remove from dmaengine device node */
291 shdma_chan_remove(schan);
292 return err;
293}
294
295static void sudmac_chan_remove(struct sudmac_device *su_dev)
296{
297 struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
298 struct shdma_chan *schan;
299 int i;
300
301 shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900302 BUG_ON(!schan);
303
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900304 shdma_chan_remove(schan);
305 }
306 dma_dev->chancnt = 0;
307}
308
309static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
310{
311 /* SUDMAC doesn't need the address */
312 return 0;
313}
314
315static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
316{
317 return &((struct sudmac_desc *)buf)[i].shdma_desc;
318}
319
320static const struct shdma_ops sudmac_shdma_ops = {
321 .desc_completed = sudmac_desc_completed,
322 .halt_channel = sudmac_halt,
323 .channel_busy = sudmac_channel_busy,
324 .slave_addr = sudmac_slave_addr,
325 .desc_setup = sudmac_desc_setup,
326 .set_slave = sudmac_set_slave,
327 .setup_xfer = sudmac_setup_xfer,
328 .start_xfer = sudmac_start_xfer,
329 .embedded_desc = sudmac_embedded_desc,
330 .chan_irq = sudmac_chan_irq,
331 .get_partial = sudmac_get_partial,
332};
333
334static int sudmac_probe(struct platform_device *pdev)
335{
Jingoo Hand4adcc02013-07-30 17:09:11 +0900336 struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900337 int err, i;
338 struct sudmac_device *su_dev;
339 struct dma_device *dma_dev;
340 struct resource *chan, *irq_res;
341
342 /* get platform data */
343 if (!pdata)
344 return -ENODEV;
345
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900346 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
Julia Lawall4770ee42013-08-19 13:20:38 +0200347 if (!irq_res)
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900348 return -ENODEV;
349
350 err = -ENOMEM;
351 su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
352 GFP_KERNEL);
353 if (!su_dev) {
354 dev_err(&pdev->dev, "Not enough memory\n");
355 return err;
356 }
357
358 dma_dev = &su_dev->shdma_dev.dma_dev;
359
Julia Lawall4770ee42013-08-19 13:20:38 +0200360 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
361 su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
362 if (IS_ERR(su_dev->chan_reg))
363 return PTR_ERR(su_dev->chan_reg);
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900364
365 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
366
367 su_dev->shdma_dev.ops = &sudmac_shdma_ops;
368 su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
369 err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
370 if (err < 0)
371 return err;
372
373 /* platform data */
Jingoo Hand4adcc02013-07-30 17:09:11 +0900374 su_dev->pdata = dev_get_platdata(&pdev->dev);
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900375
376 platform_set_drvdata(pdev, su_dev);
377
378 /* Create DMA Channel */
379 for (i = 0; i < pdata->channel_num; i++) {
380 err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
381 if (err)
382 goto chan_probe_err;
383 }
384
385 err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
386 if (err < 0)
387 goto chan_probe_err;
388
389 return err;
390
391chan_probe_err:
392 sudmac_chan_remove(su_dev);
393
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900394 shdma_cleanup(&su_dev->shdma_dev);
395
396 return err;
397}
398
399static int sudmac_remove(struct platform_device *pdev)
400{
401 struct sudmac_device *su_dev = platform_get_drvdata(pdev);
402 struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
403
404 dma_async_device_unregister(dma_dev);
405 sudmac_chan_remove(su_dev);
406 shdma_cleanup(&su_dev->shdma_dev);
Shimoda, Yoshihiro18a10532013-04-23 20:00:12 +0900407
408 return 0;
409}
410
411static struct platform_driver sudmac_driver = {
412 .driver = {
413 .owner = THIS_MODULE,
414 .name = SUDMAC_DRV_NAME,
415 },
416 .probe = sudmac_probe,
417 .remove = sudmac_remove,
418};
419module_platform_driver(sudmac_driver);
420
421MODULE_AUTHOR("Yoshihiro Shimoda");
422MODULE_DESCRIPTION("Renesas SUDMAC driver");
423MODULE_LICENSE("GPL v2");
424MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);