blob: efed64b8b026c18117b6b58a49d8cfb1dd467955 [file] [log] [blame]
Atsushi Nemotoe24805d2009-05-19 22:12:15 +09001/*
2 * Generic TXx9 ACLC platform driver
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * Based on RBTX49xx patch from CELF patch archive.
7 * (C) Copyright TOSHIBA CORPORATION 2004-2006
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/scatterlist.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include "txx9aclc.h"
23
24static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
25 /*
26 * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
27 * needs more works for noncoherent MIPS.
28 */
29 .info = SNDRV_PCM_INFO_INTERLEAVED |
30 SNDRV_PCM_INFO_BATCH |
31 SNDRV_PCM_INFO_PAUSE,
32#ifdef __BIG_ENDIAN
33 .formats = SNDRV_PCM_FMTBIT_S16_BE,
34#else
35 .formats = SNDRV_PCM_FMTBIT_S16_LE,
36#endif
37 .period_bytes_min = 1024,
38 .period_bytes_max = 8 * 1024,
39 .periods_min = 2,
40 .periods_max = 4096,
41 .buffer_bytes_max = 32 * 1024,
42};
43
44static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
45 struct snd_pcm_hw_params *params)
46{
47 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
48 struct snd_soc_device *socdev = rtd->socdev;
49 struct snd_pcm_runtime *runtime = substream->runtime;
50 struct txx9aclc_dmadata *dmadata = runtime->private_data;
51 int ret;
52
53 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
54 if (ret < 0)
55 return ret;
56
57 dev_dbg(socdev->dev,
58 "runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd "
59 "runtime->min_align %ld\n",
60 (unsigned long)runtime->dma_area,
61 (unsigned long)runtime->dma_addr, runtime->dma_bytes,
62 runtime->min_align);
63 dev_dbg(socdev->dev,
64 "periods %d period_bytes %d stream %d\n",
65 params_periods(params), params_period_bytes(params),
66 substream->stream);
67
68 dmadata->substream = substream;
69 dmadata->pos = 0;
70 return 0;
71}
72
73static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
74{
75 return snd_pcm_lib_free_pages(substream);
76}
77
78static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
79{
80 struct snd_pcm_runtime *runtime = substream->runtime;
81 struct txx9aclc_dmadata *dmadata = runtime->private_data;
82
83 dmadata->dma_addr = runtime->dma_addr;
84 dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
85 dmadata->period_bytes = snd_pcm_lib_period_bytes(substream);
86
87 if (dmadata->buffer_bytes == dmadata->period_bytes) {
88 dmadata->frag_bytes = dmadata->period_bytes >> 1;
89 dmadata->frags = 2;
90 } else {
91 dmadata->frag_bytes = dmadata->period_bytes;
92 dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes;
93 }
94 dmadata->frag_count = 0;
95 dmadata->pos = 0;
96 return 0;
97}
98
99static void txx9aclc_dma_complete(void *arg)
100{
101 struct txx9aclc_dmadata *dmadata = arg;
102 unsigned long flags;
103
104 /* dma completion handler cannot submit new operations */
105 spin_lock_irqsave(&dmadata->dma_lock, flags);
106 if (dmadata->frag_count >= 0) {
107 dmadata->dmacount--;
108 BUG_ON(dmadata->dmacount < 0);
109 tasklet_schedule(&dmadata->tasklet);
110 }
111 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
112}
113
114static struct dma_async_tx_descriptor *
115txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
116{
117 struct dma_chan *chan = dmadata->dma_chan;
118 struct dma_async_tx_descriptor *desc;
119 struct scatterlist sg;
120
121 sg_init_table(&sg, 1);
122 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
123 dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
124 sg_dma_address(&sg) = buf_dma_addr;
125 desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
126 dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
127 DMA_TO_DEVICE : DMA_FROM_DEVICE,
128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
129 if (!desc) {
130 dev_err(&chan->dev->device, "cannot prepare slave dma\n");
131 return NULL;
132 }
133 desc->callback = txx9aclc_dma_complete;
134 desc->callback_param = dmadata;
135 desc->tx_submit(desc);
136 return desc;
137}
138
139#define NR_DMA_CHAIN 2
140
141static void txx9aclc_dma_tasklet(unsigned long data)
142{
143 struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
144 struct dma_chan *chan = dmadata->dma_chan;
145 struct dma_async_tx_descriptor *desc;
146 struct snd_pcm_substream *substream = dmadata->substream;
147 u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
148 ACCTL_AUDODMA : ACCTL_AUDIDMA;
149 int i;
150 unsigned long flags;
151
152 spin_lock_irqsave(&dmadata->dma_lock, flags);
153 if (dmadata->frag_count < 0) {
154 struct txx9aclc_soc_device *dev =
155 container_of(dmadata, struct txx9aclc_soc_device,
156 dmadata[substream->stream]);
157 struct txx9aclc_plat_drvdata *drvdata =
158 txx9aclc_get_plat_drvdata(dev);
159 void __iomem *base = drvdata->base;
160
161 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
162 chan->device->device_terminate_all(chan);
163 /* first time */
164 for (i = 0; i < NR_DMA_CHAIN; i++) {
165 desc = txx9aclc_dma_submit(dmadata,
166 dmadata->dma_addr + i * dmadata->frag_bytes);
167 if (!desc)
168 return;
169 }
170 dmadata->dmacount = NR_DMA_CHAIN;
171 chan->device->device_issue_pending(chan);
172 spin_lock_irqsave(&dmadata->dma_lock, flags);
173 __raw_writel(ctlbit, base + ACCTLEN);
174 dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
175 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
176 return;
177 }
178 BUG_ON(dmadata->dmacount >= NR_DMA_CHAIN);
179 while (dmadata->dmacount < NR_DMA_CHAIN) {
180 dmadata->dmacount++;
181 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
182 desc = txx9aclc_dma_submit(dmadata,
183 dmadata->dma_addr +
184 dmadata->frag_count * dmadata->frag_bytes);
185 if (!desc)
186 return;
187 chan->device->device_issue_pending(chan);
188
189 spin_lock_irqsave(&dmadata->dma_lock, flags);
190 dmadata->frag_count++;
191 dmadata->frag_count %= dmadata->frags;
192 dmadata->pos += dmadata->frag_bytes;
193 dmadata->pos %= dmadata->buffer_bytes;
194 if ((dmadata->frag_count * dmadata->frag_bytes) %
195 dmadata->period_bytes == 0)
196 snd_pcm_period_elapsed(substream);
197 }
198 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
199}
200
201static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
202{
203 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
204 struct snd_soc_pcm_runtime *rtd = substream->private_data;
205 struct txx9aclc_soc_device *dev =
206 container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
207 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
208 void __iomem *base = drvdata->base;
209 unsigned long flags;
210 int ret = 0;
211 u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
212 ACCTL_AUDODMA : ACCTL_AUDIDMA;
213
214 spin_lock_irqsave(&dmadata->dma_lock, flags);
215 switch (cmd) {
216 case SNDRV_PCM_TRIGGER_START:
217 dmadata->frag_count = -1;
218 tasklet_schedule(&dmadata->tasklet);
219 break;
220 case SNDRV_PCM_TRIGGER_STOP:
221 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
222 case SNDRV_PCM_TRIGGER_SUSPEND:
223 __raw_writel(ctlbit, base + ACCTLDIS);
224 break;
225 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
226 case SNDRV_PCM_TRIGGER_RESUME:
227 __raw_writel(ctlbit, base + ACCTLEN);
228 break;
229 default:
230 ret = -EINVAL;
231 }
232 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
233 return ret;
234}
235
236static snd_pcm_uframes_t
237txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
238{
239 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
240
241 return bytes_to_frames(substream->runtime, dmadata->pos);
242}
243
244static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
245{
246 struct snd_soc_pcm_runtime *rtd = substream->private_data;
247 struct txx9aclc_soc_device *dev =
248 container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
249 struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
250 int ret;
251
252 ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware);
253 if (ret)
254 return ret;
255 /* ensure that buffer size is a multiple of period size */
256 ret = snd_pcm_hw_constraint_integer(substream->runtime,
257 SNDRV_PCM_HW_PARAM_PERIODS);
258 if (ret < 0)
259 return ret;
260 substream->runtime->private_data = dmadata;
261 return 0;
262}
263
264static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
265{
266 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
267 struct dma_chan *chan = dmadata->dma_chan;
268
269 dmadata->frag_count = -1;
270 chan->device->device_terminate_all(chan);
271 return 0;
272}
273
274static struct snd_pcm_ops txx9aclc_pcm_ops = {
275 .open = txx9aclc_pcm_open,
276 .close = txx9aclc_pcm_close,
277 .ioctl = snd_pcm_lib_ioctl,
278 .hw_params = txx9aclc_pcm_hw_params,
279 .hw_free = txx9aclc_pcm_hw_free,
280 .prepare = txx9aclc_pcm_prepare,
281 .trigger = txx9aclc_pcm_trigger,
282 .pointer = txx9aclc_pcm_pointer,
283};
284
285static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm)
286{
287 snd_pcm_lib_preallocate_free_for_all(pcm);
288}
289
290static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
291 struct snd_pcm *pcm)
292{
293 return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
294 card->dev, 64 * 1024, 4 * 1024 * 1024);
295}
296
297static bool filter(struct dma_chan *chan, void *param)
298{
299 struct txx9aclc_dmadata *dmadata = param;
Atsushi Nemoto647613e2009-06-25 22:36:58 +0900300 char *devname;
301 bool found = false;
Atsushi Nemotoe24805d2009-05-19 22:12:15 +0900302
Atsushi Nemoto647613e2009-06-25 22:36:58 +0900303 devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
Atsushi Nemotoe24805d2009-05-19 22:12:15 +0900304 (int)dmadata->dma_res->start);
305 if (strcmp(dev_name(chan->device->dev), devname) == 0) {
306 chan->private = &dmadata->dma_slave;
Atsushi Nemoto647613e2009-06-25 22:36:58 +0900307 found = true;
Atsushi Nemotoe24805d2009-05-19 22:12:15 +0900308 }
Atsushi Nemoto647613e2009-06-25 22:36:58 +0900309 kfree(devname);
310 return found;
Atsushi Nemotoe24805d2009-05-19 22:12:15 +0900311}
312
313static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
314 struct txx9aclc_dmadata *dmadata)
315{
316 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
317 struct txx9dmac_slave *ds = &dmadata->dma_slave;
318 dma_cap_mask_t mask;
319
320 spin_lock_init(&dmadata->dma_lock);
321
322 ds->reg_width = sizeof(u32);
323 if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) {
324 ds->tx_reg = drvdata->physbase + ACAUDODAT;
325 ds->rx_reg = 0;
326 } else {
327 ds->tx_reg = 0;
328 ds->rx_reg = drvdata->physbase + ACAUDIDAT;
329 }
330
331 /* Try to grab a DMA channel */
332 dma_cap_zero(mask);
333 dma_cap_set(DMA_SLAVE, mask);
334 dmadata->dma_chan = dma_request_channel(mask, filter, dmadata);
335 if (!dmadata->dma_chan) {
336 dev_err(dev->soc_dev.dev,
337 "DMA channel for %s is not available\n",
338 dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ?
339 "playback" : "capture");
340 return -EBUSY;
341 }
342 tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
343 (unsigned long)dmadata);
344 return 0;
345}
346
347static int txx9aclc_pcm_probe(struct platform_device *pdev)
348{
349 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
350 struct txx9aclc_soc_device *dev =
351 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
352 struct resource *r;
353 int i;
354 int ret;
355
356 dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK;
357 dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE;
358 for (i = 0; i < 2; i++) {
359 r = platform_get_resource(dev->aclc_pdev, IORESOURCE_DMA, i);
360 if (!r) {
361 ret = -EBUSY;
362 goto exit;
363 }
364 dev->dmadata[i].dma_res = r;
365 ret = txx9aclc_dma_init(dev, &dev->dmadata[i]);
366 if (ret)
367 goto exit;
368 }
369 return 0;
370
371exit:
372 for (i = 0; i < 2; i++) {
373 if (dev->dmadata[i].dma_chan)
374 dma_release_channel(dev->dmadata[i].dma_chan);
375 dev->dmadata[i].dma_chan = NULL;
376 }
377 return ret;
378}
379
380static int txx9aclc_pcm_remove(struct platform_device *pdev)
381{
382 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
383 struct txx9aclc_soc_device *dev =
384 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
385 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
386 void __iomem *base = drvdata->base;
387 int i;
388
389 /* disable all FIFO DMAs */
390 __raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS);
391 /* dummy R/W to clear pending DMAREQ if any */
392 __raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT);
393
394 for (i = 0; i < 2; i++) {
395 struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
396 struct dma_chan *chan = dmadata->dma_chan;
397 if (chan) {
398 dmadata->frag_count = -1;
399 chan->device->device_terminate_all(chan);
400 dma_release_channel(chan);
401 }
402 dev->dmadata[i].dma_chan = NULL;
403 }
404 return 0;
405}
406
407struct snd_soc_platform txx9aclc_soc_platform = {
408 .name = "txx9aclc-audio",
409 .probe = txx9aclc_pcm_probe,
410 .remove = txx9aclc_pcm_remove,
411 .pcm_ops = &txx9aclc_pcm_ops,
412 .pcm_new = txx9aclc_pcm_new,
413 .pcm_free = txx9aclc_pcm_free_dma_buffers,
414};
415EXPORT_SYMBOL_GPL(txx9aclc_soc_platform);
416
417static int __init txx9aclc_soc_platform_init(void)
418{
419 return snd_soc_register_platform(&txx9aclc_soc_platform);
420}
421
422static void __exit txx9aclc_soc_platform_exit(void)
423{
424 snd_soc_unregister_platform(&txx9aclc_soc_platform);
425}
426
427module_init(txx9aclc_soc_platform_init);
428module_exit(txx9aclc_soc_platform_exit);
429
430MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
431MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver");
432MODULE_LICENSE("GPL");