blob: b32700841444166df94f1908f74bee68a905fd0c [file] [log] [blame]
John Stultza4749bb2015-05-08 12:57:36 -07001#include <linux/kernel.h>
2#include <linux/device.h>
3#include <linux/interrupt.h>
4#include <linux/module.h>
5#include <linux/platform_device.h>
6#include <linux/workqueue.h>
7#include <linux/i2c.h>
8#include <sound/core.h>
9#include <sound/pcm.h>
10#include <sound/pcm_params.h>
11#include <sound/soc.h>
12#include <sound/dmaengine_pcm.h>
13#include <sound/simple_card.h>
14#include "greybus.h"
15#include "gpbridge.h"
16#include "audio.h"
17
18/*
19 * timer/workqueue logic for pushing pcm data.
20 *
21 * Since when we are playing audio, we don't get any
22 * status or feedback from the codec, we have to use a
23 * hrtimer to trigger sending data to the remote codec.
24 * However since the hrtimer runs in irq context, so we
25 * have to schedule a workqueue to actually send the
26 * greybus data.
27 */
28
29static void gb_pcm_work(struct work_struct *work)
30{
31 struct gb_snd *snd_dev = container_of(work, struct gb_snd, work);
32 struct snd_pcm_substream *substream = snd_dev->substream;
33 struct snd_pcm_runtime *runtime = substream->runtime;
34 unsigned int stride, frames, oldptr;
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070035 int period_elapsed, ret;
John Stultza4749bb2015-05-08 12:57:36 -070036 char *address;
37 long len;
38
39 if (!snd_dev)
40 return;
41
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070042 if (!atomic_read(&snd_dev->running)) {
43 if (snd_dev->cport_active) {
44 ret = gb_i2s_mgmt_deactivate_cport(
Mark A. Greer48229e52015-05-21 15:57:02 -070045 snd_dev->mgmt_connection,
46 snd_dev->i2s_tx_connection->bundle_cport_id);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070047 if (ret) /* XXX Do what else with failure? */
48 pr_err("deactivate_cport failed: %d\n", ret);
49
50 snd_dev->cport_active = false;
51 }
52
John Stultza4749bb2015-05-08 12:57:36 -070053 return;
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070054 } else if (!snd_dev->cport_active) {
55 ret = gb_i2s_mgmt_activate_cport(snd_dev->mgmt_connection,
Mark A. Greer48229e52015-05-21 15:57:02 -070056 snd_dev->i2s_tx_connection->bundle_cport_id);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -070057 if (ret)
58 pr_err("activate_cport failed: %d\n", ret);
59
60 snd_dev->cport_active = true;
61 }
John Stultza4749bb2015-05-08 12:57:36 -070062
63 address = runtime->dma_area + snd_dev->hwptr_done;
64
65 len = frames_to_bytes(runtime,
66 runtime->buffer_size) - snd_dev->hwptr_done;
67 len = min(len, MAX_SEND_DATA_LEN);
68 gb_i2s_send_data(snd_dev->i2s_tx_connection, snd_dev->send_data_req_buf,
69 address, len, snd_dev->send_data_sample_count);
70
71 snd_dev->send_data_sample_count += CONFIG_SAMPLES_PER_MSG;
72
73 stride = runtime->frame_bits >> 3;
74 frames = len/stride;
75
76 snd_pcm_stream_lock(substream);
77 oldptr = snd_dev->hwptr_done;
78 snd_dev->hwptr_done += len;
79 if (snd_dev->hwptr_done >= runtime->buffer_size * stride)
80 snd_dev->hwptr_done -= runtime->buffer_size * stride;
81
82 frames = (len + (oldptr % stride)) / stride;
83
84 snd_dev->transfer_done += frames;
85 if (snd_dev->transfer_done >= runtime->period_size) {
86 snd_dev->transfer_done -= runtime->period_size;
87 period_elapsed = 1;
88 }
89
90 snd_pcm_stream_unlock(substream);
91 if (period_elapsed)
92 snd_pcm_period_elapsed(snd_dev->substream);
93}
94
95static enum hrtimer_restart gb_pcm_timer_function(struct hrtimer *hrtimer)
96{
97 struct gb_snd *snd_dev = container_of(hrtimer, struct gb_snd, timer);
98
99 if (!atomic_read(&snd_dev->running))
100 return HRTIMER_NORESTART;
101 queue_work(snd_dev->workqueue, &snd_dev->work);
102 hrtimer_forward_now(hrtimer, ns_to_ktime(CONFIG_PERIOD_NS));
103 return HRTIMER_RESTART;
104}
105
106void gb_pcm_hrtimer_start(struct gb_snd *snd_dev)
107{
108 atomic_set(&snd_dev->running, 1);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -0700109 queue_work(snd_dev->workqueue, &snd_dev->work); /* Activates CPort */
John Stultza4749bb2015-05-08 12:57:36 -0700110 hrtimer_start(&snd_dev->timer, ns_to_ktime(CONFIG_PERIOD_NS),
111 HRTIMER_MODE_REL);
112}
113
114void gb_pcm_hrtimer_stop(struct gb_snd *snd_dev)
115{
116 atomic_set(&snd_dev->running, 0);
117 hrtimer_cancel(&snd_dev->timer);
Mark A. Greerf9a4fee2015-05-21 15:57:01 -0700118 queue_work(snd_dev->workqueue, &snd_dev->work); /* Deactivates CPort */
John Stultza4749bb2015-05-08 12:57:36 -0700119}
120
121static int gb_pcm_hrtimer_init(struct gb_snd *snd_dev)
122{
123 hrtimer_init(&snd_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
124 snd_dev->timer.function = gb_pcm_timer_function;
125 atomic_set(&snd_dev->running, 0);
126 snd_dev->workqueue = alloc_workqueue("gb-audio", WQ_HIGHPRI, 0);
127 if (!snd_dev->workqueue)
128 return -ENOMEM;
129 INIT_WORK(&snd_dev->work, gb_pcm_work);
130 return 0;
131}
132
133
134/*
135 * Core gb pcm structure
136 */
137static struct snd_pcm_hardware gb_plat_pcm_hardware = {
138 .info = SNDRV_PCM_INFO_INTERLEAVED |
139 SNDRV_PCM_INFO_MMAP |
140 SNDRV_PCM_INFO_MMAP_VALID,
141 .formats = GB_FMTS,
142 .rates = GB_RATES,
143 .rate_min = 8000,
144 .rate_max = GB_SAMPLE_RATE,
145 .channels_min = 1,
146 .channels_max = 2,
147 /* XXX - All the values below are junk */
148 .buffer_bytes_max = 64 * 1024,
149 .period_bytes_min = 32,
150 .period_bytes_max = 8192,
151 .periods_min = 2,
152 .periods_max = 32,
153};
154
155static snd_pcm_uframes_t gb_pcm_pointer(struct snd_pcm_substream *substream)
156{
157 struct snd_soc_pcm_runtime *rtd = substream->private_data;
158 struct gb_snd *snd_dev;
159
160 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
161
162 return snd_dev->hwptr_done / (substream->runtime->frame_bits >> 3);
163}
164
165static int gb_pcm_prepare(struct snd_pcm_substream *substream)
166{
167 struct snd_soc_pcm_runtime *rtd = substream->private_data;
168 struct gb_snd *snd_dev;
169
170 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
171 snd_dev->hwptr_done = 0;
172 snd_dev->transfer_done = 0;
173 return 0;
174}
175
176static unsigned int rates[] = {GB_SAMPLE_RATE};
177static struct snd_pcm_hw_constraint_list constraints_rates = {
178 .count = ARRAY_SIZE(rates),
179 .list = rates,
180 .mask = 0,
181};
182
183static int gb_pcm_open(struct snd_pcm_substream *substream)
184{
185 struct snd_pcm_runtime *runtime = substream->runtime;
186 struct snd_soc_pcm_runtime *rtd = substream->private_data;
187 struct gb_snd *snd_dev;
188 unsigned long flags;
189 int ret;
190
191 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
192
193 spin_lock_irqsave(&snd_dev->lock, flags);
194 runtime->private_data = snd_dev;
195 snd_dev->substream = substream;
196 ret = gb_pcm_hrtimer_init(snd_dev);
197 spin_unlock_irqrestore(&snd_dev->lock, flags);
198
199 if (ret)
200 return ret;
201
202 snd_soc_set_runtime_hwparams(substream, &gb_plat_pcm_hardware);
203
204 ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
205 SNDRV_PCM_HW_PARAM_RATE,
206 &constraints_rates);
207 if (ret < 0)
208 return ret;
209
210 return snd_pcm_hw_constraint_integer(runtime,
211 SNDRV_PCM_HW_PARAM_PERIODS);
212}
213
214static int gb_pcm_close(struct snd_pcm_substream *substream)
215{
216 substream->runtime->private_data = NULL;
217 return 0;
218}
219
220static int gb_pcm_hw_params(struct snd_pcm_substream *substream,
221 struct snd_pcm_hw_params *hw_params)
222{
Mark A. Greer6b340992015-05-21 15:57:03 -0700223 struct snd_soc_pcm_runtime *rtd = substream->private_data;
224 struct gb_snd *snd_dev;
225 int rate, chans, bytes_per_chan, is_le, ret;
226
227 snd_dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
228
229 rate = params_rate(hw_params);
230 chans = params_channels(hw_params);
231 bytes_per_chan = snd_pcm_format_width(params_format(hw_params)) / 8;
232 is_le = snd_pcm_format_little_endian(params_format(hw_params));
233
234 ret = gb_i2s_mgmt_set_cfg(snd_dev, rate, chans, bytes_per_chan, is_le);
235 if (ret)
236 return ret;
237
John Stultza4749bb2015-05-08 12:57:36 -0700238 return snd_pcm_lib_malloc_pages(substream,
239 params_buffer_bytes(hw_params));
240}
241
242static int gb_pcm_hw_free(struct snd_pcm_substream *substream)
243{
244 return snd_pcm_lib_free_pages(substream);
245}
246
247static struct snd_pcm_ops gb_pcm_ops = {
248 .open = gb_pcm_open,
249 .close = gb_pcm_close,
250 .ioctl = snd_pcm_lib_ioctl,
251 .hw_params = gb_pcm_hw_params,
252 .hw_free = gb_pcm_hw_free,
253 .prepare = gb_pcm_prepare,
254 .pointer = gb_pcm_pointer,
255};
256
257static void gb_pcm_free(struct snd_pcm *pcm)
258{
259 snd_pcm_lib_preallocate_free_for_all(pcm);
260}
261
262static int gb_pcm_new(struct snd_soc_pcm_runtime *rtd)
263{
264 struct snd_pcm *pcm = rtd->pcm;
265
266 return snd_pcm_lib_preallocate_pages_for_all(
267 pcm,
268 SNDRV_DMA_TYPE_CONTINUOUS,
269 snd_dma_continuous_data(GFP_KERNEL),
270 PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
271}
272
Greg Kroah-Hartman13956902015-05-11 14:09:09 -0700273static struct snd_soc_platform_driver gb_soc_platform = {
John Stultza4749bb2015-05-08 12:57:36 -0700274 .ops = &gb_pcm_ops,
275 .pcm_new = gb_pcm_new,
276 .pcm_free = gb_pcm_free,
277};
278
279static int gb_soc_platform_probe(struct platform_device *pdev)
280{
281 return snd_soc_register_platform(&pdev->dev, &gb_soc_platform);
282}
283
284static int gb_soc_platform_remove(struct platform_device *pdev)
285{
286 snd_soc_unregister_platform(&pdev->dev);
287 return 0;
288}
289
290struct platform_driver gb_audio_pcm_driver = {
291 .driver = {
292 .name = "gb-pcm-audio",
293 .owner = THIS_MODULE,
294 },
295 .probe = gb_soc_platform_probe,
296 .remove = gb_soc_platform_remove,
297};