blob: 4864392bfcba7c0189a37b04f495ec20c2022f9d [file] [log] [blame]
Lars-Peter Clausen28c44682013-04-15 19:19:50 +02001/*
2 * Copyright (C) 2013, Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/dmaengine.h>
18#include <linux/slab.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include <linux/dma-mapping.h>
23#include <linux/of.h>
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020024
25#include <sound/dmaengine_pcm.h>
26
27struct dmaengine_pcm {
Takashi Iwaif82bf8e2013-10-25 18:06:09 +020028 struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020029 const struct snd_dmaengine_pcm_config *config;
30 struct snd_soc_platform platform;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +020031 unsigned int flags;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020032};
33
34static struct dmaengine_pcm *soc_platform_to_pcm(struct snd_soc_platform *p)
35{
36 return container_of(p, struct dmaengine_pcm, platform);
37}
38
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +020039static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
40 struct snd_pcm_substream *substream)
41{
42 if (!pcm->chan[substream->stream])
43 return NULL;
44
45 return pcm->chan[substream->stream]->device->dev;
46}
47
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020048/**
49 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
50 * @substream: PCM substream
51 * @params: hw_params
52 * @slave_config: DMA slave config to prepare
53 *
54 * This function can be used as a generic prepare_slave_config callback for
55 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
56 * DAI DMA data. Internally the function will first call
57 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
58 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
59 * remaining fields based on the DAI DMA data.
60 */
61int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
62 struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
63{
64 struct snd_soc_pcm_runtime *rtd = substream->private_data;
65 struct snd_dmaengine_dai_dma_data *dma_data;
66 int ret;
67
68 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
69
70 ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
71 if (ret)
72 return ret;
73
74 snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
75 slave_config);
76
77 return 0;
78}
79EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
80
81static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
82 struct snd_pcm_hw_params *params)
83{
84 struct snd_soc_pcm_runtime *rtd = substream->private_data;
85 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
86 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +020087 int (*prepare_slave_config)(struct snd_pcm_substream *substream,
88 struct snd_pcm_hw_params *params,
89 struct dma_slave_config *slave_config);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020090 struct dma_slave_config slave_config;
91 int ret;
92
Lee Jonesa894bd72013-11-06 10:16:20 +000093 memset(&slave_config, 0, sizeof(slave_config));
94
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +020095 if (!pcm->config)
96 prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
97 else
98 prepare_slave_config = pcm->config->prepare_slave_config;
99
100 if (prepare_slave_config) {
101 ret = prepare_slave_config(substream, params, &slave_config);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200102 if (ret)
103 return ret;
104
105 ret = dmaengine_slave_config(chan, &slave_config);
106 if (ret)
107 return ret;
108 }
109
110 return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
111}
112
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200113static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substream)
114{
115 struct snd_soc_pcm_runtime *rtd = substream->private_data;
116 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
117 struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
118 struct dma_chan *chan = pcm->chan[substream->stream];
119 struct snd_dmaengine_dai_dma_data *dma_data;
120 struct dma_slave_caps dma_caps;
121 struct snd_pcm_hardware hw;
Peter Ujfalusi2d38df12014-07-03 07:51:54 +0300122 u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
123 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
124 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
125 int i, ret;
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200126
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200127 if (pcm->config && pcm->config->pcm_hardware)
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200128 return snd_soc_set_runtime_hwparams(substream,
129 pcm->config->pcm_hardware);
130
131 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
132
133 memset(&hw, 0, sizeof(hw));
134 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
135 SNDRV_PCM_INFO_INTERLEAVED;
136 hw.periods_min = 2;
137 hw.periods_max = UINT_MAX;
138 hw.period_bytes_min = 256;
139 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
140 hw.buffer_bytes_max = SIZE_MAX;
141 hw.fifo_size = dma_data->fifo_size;
142
Lars-Peter Clausena22f33b2013-11-30 18:00:45 +0100143 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
144 hw.info |= SNDRV_PCM_INFO_BATCH;
145
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200146 ret = dma_get_slave_caps(chan, &dma_caps);
147 if (ret == 0) {
148 if (dma_caps.cmd_pause)
149 hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100150 if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
151 hw.info |= SNDRV_PCM_INFO_BATCH;
Peter Ujfalusi2d38df12014-07-03 07:51:54 +0300152
153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
154 addr_widths = dma_caps.dstn_addr_widths;
155 else
156 addr_widths = dma_caps.src_addr_widths;
157 }
158
159 /*
160 * Prepare formats mask for valid/allowed sample types. If the dma does
161 * not have support for the given physical word size, it needs to be
162 * masked out so user space can not use the format which produces
163 * corrupted audio.
164 * In case the dma driver does not implement the slave_caps the default
165 * assumption is that it supports 1, 2 and 4 bytes widths.
166 */
167 for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
168 int bits = snd_pcm_format_physical_width(i);
169
170 /* Enable only samples with DMA supported physical widths */
171 switch (bits) {
172 case 8:
173 case 16:
174 case 24:
175 case 32:
176 case 64:
177 if (addr_widths & (1 << (bits / 8)))
178 hw.formats |= (1LL << i);
179 break;
180 default:
181 /* Unsupported types */
182 break;
183 }
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200184 }
185
186 return snd_soc_set_runtime_hwparams(substream, &hw);
187}
188
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200189static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
190{
191 struct snd_soc_pcm_runtime *rtd = substream->private_data;
192 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
193 struct dma_chan *chan = pcm->chan[substream->stream];
194 int ret;
195
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200196 ret = dmaengine_pcm_set_runtime_hwparams(substream);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200197 if (ret)
198 return ret;
199
200 return snd_dmaengine_pcm_open(substream, chan);
201}
202
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200203static struct dma_chan *dmaengine_pcm_compat_request_channel(
204 struct snd_soc_pcm_runtime *rtd,
205 struct snd_pcm_substream *substream)
206{
207 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
Mark Brown90130d22013-10-19 21:38:26 +0100208 struct snd_dmaengine_dai_dma_data *dma_data;
Xiubo Liec4f2852014-01-16 16:08:04 +0800209 dma_filter_fn fn = NULL;
Mark Brown90130d22013-10-19 21:38:26 +0100210
211 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200212
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200213 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
214 return pcm->chan[0];
215
Xiubo Liec4f2852014-01-16 16:08:04 +0800216 if (pcm->config && pcm->config->compat_request_channel)
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200217 return pcm->config->compat_request_channel(rtd, substream);
218
Xiubo Liec4f2852014-01-16 16:08:04 +0800219 if (pcm->config)
220 fn = pcm->config->compat_filter_fn;
221
222 return snd_dmaengine_pcm_request_channel(fn, dma_data->filter_data);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200223}
224
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100225static bool dmaengine_pcm_can_report_residue(struct dma_chan *chan)
226{
227 struct dma_slave_caps dma_caps;
228 int ret;
229
230 ret = dma_get_slave_caps(chan, &dma_caps);
231 if (ret != 0)
232 return true;
233
234 if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
235 return false;
236
237 return true;
238}
239
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200240static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
241{
242 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
243 const struct snd_dmaengine_pcm_config *config = pcm->config;
Mark Brownea73b7d2013-10-19 17:43:51 +0100244 struct device *dev = rtd->platform->dev;
245 struct snd_dmaengine_dai_dma_data *dma_data;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200246 struct snd_pcm_substream *substream;
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200247 size_t prealloc_buffer_size;
248 size_t max_buffer_size;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200249 unsigned int i;
250 int ret;
251
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200252 if (config && config->prealloc_buffer_size) {
253 prealloc_buffer_size = config->prealloc_buffer_size;
254 max_buffer_size = config->pcm_hardware->buffer_bytes_max;
255 } else {
256 prealloc_buffer_size = 512 * 1024;
257 max_buffer_size = SIZE_MAX;
258 }
259
260
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200261 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
262 substream = rtd->pcm->streams[i].substream;
263 if (!substream)
264 continue;
265
Mark Brownea73b7d2013-10-19 17:43:51 +0100266 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
267
268 if (!pcm->chan[i] &&
269 (pcm->flags & SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME))
270 pcm->chan[i] = dma_request_slave_channel(dev,
271 dma_data->chan_name);
272
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200273 if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200274 pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
275 substream);
276 }
277
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200278 if (!pcm->chan[i]) {
279 dev_err(rtd->platform->dev,
280 "Missing dma channel for stream: %d\n", i);
Lars-Peter Clausende7621e2015-01-02 13:56:07 +0100281 return -EINVAL;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200282 }
283
284 ret = snd_pcm_lib_preallocate_pages(substream,
Nicolin Chenca2b0292013-11-07 14:45:16 +0800285 SNDRV_DMA_TYPE_DEV_IRAM,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200286 dmaengine_dma_dev(pcm, substream),
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200287 prealloc_buffer_size,
288 max_buffer_size);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200289 if (ret)
Lars-Peter Clausende7621e2015-01-02 13:56:07 +0100290 return ret;
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100291
292 /*
293 * This will only return false if we know for sure that at least
294 * one channel does not support residue reporting. If the DMA
295 * driver does not implement the slave_caps API we rely having
296 * the NO_RESIDUE flag set manually in case residue reporting is
297 * not supported.
298 */
299 if (!dmaengine_pcm_can_report_residue(pcm->chan[i]))
300 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200301 }
302
303 return 0;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200304}
305
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100306static snd_pcm_uframes_t dmaengine_pcm_pointer(
307 struct snd_pcm_substream *substream)
308{
309 struct snd_soc_pcm_runtime *rtd = substream->private_data;
310 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
311
312 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
313 return snd_dmaengine_pcm_pointer_no_residue(substream);
314 else
315 return snd_dmaengine_pcm_pointer(substream);
316}
317
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200318static const struct snd_pcm_ops dmaengine_pcm_ops = {
319 .open = dmaengine_pcm_open,
320 .close = snd_dmaengine_pcm_close,
321 .ioctl = snd_pcm_lib_ioctl,
322 .hw_params = dmaengine_pcm_hw_params,
323 .hw_free = snd_pcm_lib_free_pages,
324 .trigger = snd_dmaengine_pcm_trigger,
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100325 .pointer = dmaengine_pcm_pointer,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200326};
327
328static const struct snd_soc_platform_driver dmaengine_pcm_platform = {
Lars-Peter Clausenf1d45cc32014-08-19 15:51:19 +0200329 .component_driver = {
330 .probe_order = SND_SOC_COMP_ORDER_LATE,
331 },
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200332 .ops = &dmaengine_pcm_ops,
333 .pcm_new = dmaengine_pcm_new,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200334};
335
336static const char * const dmaengine_pcm_dma_channel_names[] = {
337 [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
338 [SNDRV_PCM_STREAM_CAPTURE] = "rx",
339};
340
Stephen Warren5eda87b2013-12-10 11:11:02 -0700341static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
Stephen Warren194c7de2013-12-03 14:26:34 -0700342 struct device *dev, const struct snd_dmaengine_pcm_config *config)
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200343{
344 unsigned int i;
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700345 const char *name;
Stephen Warren5eda87b2013-12-10 11:11:02 -0700346 struct dma_chan *chan;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200347
Mark Brownea73b7d2013-10-19 17:43:51 +0100348 if ((pcm->flags & (SND_DMAENGINE_PCM_FLAG_NO_DT |
349 SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) ||
350 !dev->of_node)
Stephen Warren5eda87b2013-12-10 11:11:02 -0700351 return 0;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200352
Xiubo Li2b67f8b2013-12-17 15:16:40 +0800353 if (config && config->dma_dev) {
Stephen Warren194c7de2013-12-03 14:26:34 -0700354 /*
355 * If this warning is seen, it probably means that your Linux
356 * device structure does not match your HW device structure.
357 * It would be best to refactor the Linux device structure to
358 * correctly match the HW structure.
359 */
360 dev_warn(dev, "DMA channels sourced from device %s",
361 dev_name(config->dma_dev));
362 dev = config->dma_dev;
363 }
364
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700365 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
366 i++) {
367 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
368 name = "rx-tx";
369 else
370 name = dmaengine_pcm_dma_channel_names[i];
Xiubo Li2b67f8b2013-12-17 15:16:40 +0800371 if (config && config->chan_names[i])
Stephen Warren194c7de2013-12-03 14:26:34 -0700372 name = config->chan_names[i];
Stephen Warren5eda87b2013-12-10 11:11:02 -0700373 chan = dma_request_slave_channel_reason(dev, name);
374 if (IS_ERR(chan)) {
Stephen Warrene9036c22013-12-11 11:20:50 -0700375 if (PTR_ERR(chan) == -EPROBE_DEFER)
Stephen Warren5eda87b2013-12-10 11:11:02 -0700376 return -EPROBE_DEFER;
377 pcm->chan[i] = NULL;
378 } else {
379 pcm->chan[i] = chan;
380 }
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700381 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
382 break;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200383 }
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700384
385 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
386 pcm->chan[1] = pcm->chan[0];
Stephen Warren5eda87b2013-12-10 11:11:02 -0700387
388 return 0;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200389}
390
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700391static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
392{
393 unsigned int i;
394
395 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
396 i++) {
397 if (!pcm->chan[i])
398 continue;
399 dma_release_channel(pcm->chan[i]);
400 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
401 break;
402 }
403}
404
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200405/**
406 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
407 * @dev: The parent device for the PCM device
408 * @config: Platform specific PCM configuration
409 * @flags: Platform specific quirks
410 */
411int snd_dmaengine_pcm_register(struct device *dev,
412 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
413{
414 struct dmaengine_pcm *pcm;
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700415 int ret;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200416
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200417 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
418 if (!pcm)
419 return -ENOMEM;
420
421 pcm->config = config;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200422 pcm->flags = flags;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200423
Stephen Warren5eda87b2013-12-10 11:11:02 -0700424 ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
425 if (ret)
426 goto err_free_dma;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200427
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100428 ret = snd_soc_add_platform(dev, &pcm->platform,
429 &dmaengine_pcm_platform);
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700430 if (ret)
431 goto err_free_dma;
432
433 return 0;
434
435err_free_dma:
436 dmaengine_pcm_release_chan(pcm);
437 kfree(pcm);
438 return ret;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200439}
440EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
441
442/**
443 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
444 * @dev: Parent device the PCM was register with
445 *
446 * Removes a dmaengine based PCM device previously registered with
447 * snd_dmaengine_pcm_register.
448 */
449void snd_dmaengine_pcm_unregister(struct device *dev)
450{
451 struct snd_soc_platform *platform;
452 struct dmaengine_pcm *pcm;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200453
454 platform = snd_soc_lookup_platform(dev);
455 if (!platform)
456 return;
457
458 pcm = soc_platform_to_pcm(platform);
459
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200460 snd_soc_remove_platform(platform);
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700461 dmaengine_pcm_release_chan(pcm);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200462 kfree(pcm);
463}
464EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
465
466MODULE_LICENSE("GPL");