blob: c1b6aa551ce123c037860042e0588c418a2facea [file] [log] [blame]
Alex Elder2de1a8b2015-05-22 12:59:15 -05001/*
2 * Greybus audio Pulse Code Modulation (PCM) driver
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
John Stultza4749bb2015-05-08 12:57:36 -070010#include <linux/kernel.h>
11#include <linux/device.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/workqueue.h>
16#include <linux/i2c.h>
17#include <sound/core.h>
18#include <sound/pcm.h>
19#include <sound/pcm_params.h>
20#include <sound/soc.h>
21#include <sound/dmaengine_pcm.h>
22#include <sound/simple_card.h>
Alex Eldere9385e52015-05-22 12:35:31 -050023
John Stultza4749bb2015-05-08 12:57:36 -070024#include "greybus.h"
John Stultza4749bb2015-05-08 12:57:36 -070025#include "audio.h"
26
27/*
28 * timer/workqueue logic for pushing pcm data.
29 *
30 * Since when we are playing audio, we don't get any
31 * status or feedback from the codec, we have to use a
32 * hrtimer to trigger sending data to the remote codec.
33 * However since the hrtimer runs in irq context, so we
34 * have to schedule a workqueue to actually send the
35 * greybus data.
36 */
37
38static void gb_pcm_work(struct work_struct *work)
39{
40 struct gb_snd *snd_dev = container_of(work, struct gb_snd, work);
41 struct snd_pcm_substream *substream = snd_dev->substream;
42 struct snd_pcm_runtime *runtime = substream->runtime;
43 unsigned int stride, frames, oldptr;
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070044 int period_elapsed, ret;
John Stultza4749bb2015-05-08 12:57:36 -070045 char *address;
46 long len;
47
48 if (!snd_dev)
49 return;
50
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070051 if (!atomic_read(&snd_dev->running)) {
52 if (snd_dev->cport_active) {
53 ret = gb_i2s_mgmt_deactivate_cport(
Mark A. Greer48229e52015-05-21 15:57:02 -070054 snd_dev->mgmt_connection,
55 snd_dev->i2s_tx_connection->bundle_cport_id);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070056 if (ret) /* XXX Do what else with failure? */
57 pr_err("deactivate_cport failed: %d\n", ret);
58
59 snd_dev->cport_active = false;
60 }
61
John Stultza4749bb2015-05-08 12:57:36 -070062 return;
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070063 } else if (!snd_dev->cport_active) {
64 ret = gb_i2s_mgmt_activate_cport(snd_dev->mgmt_connection,
Mark A. Greer48229e52015-05-21 15:57:02 -070065 snd_dev->i2s_tx_connection->bundle_cport_id);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070066 if (ret)
67 pr_err("activate_cport failed: %d\n", ret);
68
69 snd_dev->cport_active = true;
70 }
John Stultza4749bb2015-05-08 12:57:36 -070071
72 address = runtime->dma_area + snd_dev->hwptr_done;
73
74 len = frames_to_bytes(runtime,
75 runtime->buffer_size) - snd_dev->hwptr_done;
76 len = min(len, MAX_SEND_DATA_LEN);
77 gb_i2s_send_data(snd_dev->i2s_tx_connection, snd_dev->send_data_req_buf,
78 address, len, snd_dev->send_data_sample_count);
79
80 snd_dev->send_data_sample_count += CONFIG_SAMPLES_PER_MSG;
81
82 stride = runtime->frame_bits >> 3;
83 frames = len/stride;
84
85 snd_pcm_stream_lock(substream);
86 oldptr = snd_dev->hwptr_done;
87 snd_dev->hwptr_done += len;
88 if (snd_dev->hwptr_done >= runtime->buffer_size * stride)
89 snd_dev->hwptr_done -= runtime->buffer_size * stride;
90
91 frames = (len + (oldptr % stride)) / stride;
92
93 snd_dev->transfer_done += frames;
94 if (snd_dev->transfer_done >= runtime->period_size) {
95 snd_dev->transfer_done -= runtime->period_size;
96 period_elapsed = 1;
97 }
98
99 snd_pcm_stream_unlock(substream);
100 if (period_elapsed)
101 snd_pcm_period_elapsed(snd_dev->substream);
102}
103
104static enum hrtimer_restart gb_pcm_timer_function(struct hrtimer *hrtimer)
105{
106 struct gb_snd *snd_dev = container_of(hrtimer, struct gb_snd, timer);
107
108 if (!atomic_read(&snd_dev->running))
109 return HRTIMER_NORESTART;
110 queue_work(snd_dev->workqueue, &snd_dev->work);
111 hrtimer_forward_now(hrtimer, ns_to_ktime(CONFIG_PERIOD_NS));
112 return HRTIMER_RESTART;
113}
114
115void gb_pcm_hrtimer_start(struct gb_snd *snd_dev)
116{
117 atomic_set(&snd_dev->running, 1);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -0700118 queue_work(snd_dev->workqueue, &snd_dev->work); /* Activates CPort */
John Stultza4749bb2015-05-08 12:57:36 -0700119 hrtimer_start(&snd_dev->timer, ns_to_ktime(CONFIG_PERIOD_NS),
120 HRTIMER_MODE_REL);
121}
122
123void gb_pcm_hrtimer_stop(struct gb_snd *snd_dev)
124{
125 atomic_set(&snd_dev->running, 0);
126 hrtimer_cancel(&snd_dev->timer);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -0700127 queue_work(snd_dev->workqueue, &snd_dev->work); /* Deactivates CPort */
John Stultza4749bb2015-05-08 12:57:36 -0700128}
129
130static int gb_pcm_hrtimer_init(struct gb_snd *snd_dev)
131{
132 hrtimer_init(&snd_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
133 snd_dev->timer.function = gb_pcm_timer_function;
134 atomic_set(&snd_dev->running, 0);
135 snd_dev->workqueue = alloc_workqueue("gb-audio", WQ_HIGHPRI, 0);
136 if (!snd_dev->workqueue)
137 return -ENOMEM;
138 INIT_WORK(&snd_dev->work, gb_pcm_work);
139 return 0;
140}
141
142
143/*
144 * Core gb pcm structure
145 */
146static struct snd_pcm_hardware gb_plat_pcm_hardware = {
147 .info = SNDRV_PCM_INFO_INTERLEAVED |
148 SNDRV_PCM_INFO_MMAP |
149 SNDRV_PCM_INFO_MMAP_VALID,
150 .formats = GB_FMTS,
151 .rates = GB_RATES,
152 .rate_min = 8000,
153 .rate_max = GB_SAMPLE_RATE,
154 .channels_min = 1,
155 .channels_max = 2,
156 /* XXX - All the values below are junk */
157 .buffer_bytes_max = 64 * 1024,
158 .period_bytes_min = 32,
159 .period_bytes_max = 8192,
160 .periods_min = 2,
161 .periods_max = 32,
162};
163
164static snd_pcm_uframes_t gb_pcm_pointer(struct snd_pcm_substream *substream)
165{
166 struct snd_soc_pcm_runtime *rtd = substream->private_data;
167 struct gb_snd *snd_dev;
168
169 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
170
171 return snd_dev->hwptr_done / (substream->runtime->frame_bits >> 3);
172}
173
174static int gb_pcm_prepare(struct snd_pcm_substream *substream)
175{
176 struct snd_soc_pcm_runtime *rtd = substream->private_data;
177 struct gb_snd *snd_dev;
178
179 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
180 snd_dev->hwptr_done = 0;
181 snd_dev->transfer_done = 0;
182 return 0;
183}
184
185static unsigned int rates[] = {GB_SAMPLE_RATE};
186static struct snd_pcm_hw_constraint_list constraints_rates = {
187 .count = ARRAY_SIZE(rates),
188 .list = rates,
189 .mask = 0,
190};
191
192static int gb_pcm_open(struct snd_pcm_substream *substream)
193{
194 struct snd_pcm_runtime *runtime = substream->runtime;
195 struct snd_soc_pcm_runtime *rtd = substream->private_data;
196 struct gb_snd *snd_dev;
197 unsigned long flags;
198 int ret;
199
200 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
201
202 spin_lock_irqsave(&snd_dev->lock, flags);
203 runtime->private_data = snd_dev;
204 snd_dev->substream = substream;
205 ret = gb_pcm_hrtimer_init(snd_dev);
206 spin_unlock_irqrestore(&snd_dev->lock, flags);
207
208 if (ret)
209 return ret;
210
211 snd_soc_set_runtime_hwparams(substream, &gb_plat_pcm_hardware);
212
213 ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
214 SNDRV_PCM_HW_PARAM_RATE,
215 &constraints_rates);
216 if (ret < 0)
217 return ret;
218
219 return snd_pcm_hw_constraint_integer(runtime,
220 SNDRV_PCM_HW_PARAM_PERIODS);
221}
222
223static int gb_pcm_close(struct snd_pcm_substream *substream)
224{
225 substream->runtime->private_data = NULL;
226 return 0;
227}
228
229static int gb_pcm_hw_params(struct snd_pcm_substream *substream,
230 struct snd_pcm_hw_params *hw_params)
231{
Mark A. Greer6b340992015-05-21 15:57:03 -0700232 struct snd_soc_pcm_runtime *rtd = substream->private_data;
233 struct gb_snd *snd_dev;
234 int rate, chans, bytes_per_chan, is_le, ret;
235
236 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
237
238 rate = params_rate(hw_params);
239 chans = params_channels(hw_params);
240 bytes_per_chan = snd_pcm_format_width(params_format(hw_params)) / 8;
241 is_le = snd_pcm_format_little_endian(params_format(hw_params));
242
243 ret = gb_i2s_mgmt_set_cfg(snd_dev, rate, chans, bytes_per_chan, is_le);
244 if (ret)
245 return ret;
246
John Stultza4749bb2015-05-08 12:57:36 -0700247 return snd_pcm_lib_malloc_pages(substream,
248 params_buffer_bytes(hw_params));
249}
250
251static int gb_pcm_hw_free(struct snd_pcm_substream *substream)
252{
253 return snd_pcm_lib_free_pages(substream);
254}
255
256static struct snd_pcm_ops gb_pcm_ops = {
257 .open = gb_pcm_open,
258 .close = gb_pcm_close,
259 .ioctl = snd_pcm_lib_ioctl,
260 .hw_params = gb_pcm_hw_params,
261 .hw_free = gb_pcm_hw_free,
262 .prepare = gb_pcm_prepare,
263 .pointer = gb_pcm_pointer,
264};
265
266static void gb_pcm_free(struct snd_pcm *pcm)
267{
268 snd_pcm_lib_preallocate_free_for_all(pcm);
269}
270
271static int gb_pcm_new(struct snd_soc_pcm_runtime *rtd)
272{
273 struct snd_pcm *pcm = rtd->pcm;
274
275 return snd_pcm_lib_preallocate_pages_for_all(
276 pcm,
277 SNDRV_DMA_TYPE_CONTINUOUS,
278 snd_dma_continuous_data(GFP_KERNEL),
279 PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
280}
281
Greg Kroah-Hartman13956902015-05-11 14:09:09 -0700282static struct snd_soc_platform_driver gb_soc_platform = {
John Stultza4749bb2015-05-08 12:57:36 -0700283 .ops = &gb_pcm_ops,
284 .pcm_new = gb_pcm_new,
285 .pcm_free = gb_pcm_free,
286};
287
288static int gb_soc_platform_probe(struct platform_device *pdev)
289{
290 return snd_soc_register_platform(&pdev->dev, &gb_soc_platform);
291}
292
293static int gb_soc_platform_remove(struct platform_device *pdev)
294{
295 snd_soc_unregister_platform(&pdev->dev);
296 return 0;
297}
298
299struct platform_driver gb_audio_pcm_driver = {
300 .driver = {
301 .name = "gb-pcm-audio",
302 .owner = THIS_MODULE,
303 },
304 .probe = gb_soc_platform_probe,
305 .remove = gb_soc_platform_remove,
306};