blob: e184349aee83f1e5a790deb43238186dab2b3355 [file] [log] [blame]
Daniel Macke5779992010-03-04 19:46:13 +01001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 */
17
Tejun Heo7b7b9042010-03-30 02:52:29 +090018#include <linux/gfp.h>
Daniel Macke5779992010-03-04 19:46:13 +010019#include <linux/init.h>
20#include <linux/usb.h>
21#include <linux/usb/audio.h>
22
23#include <sound/core.h>
24#include <sound/pcm.h>
25
26#include "usbaudio.h"
27#include "helper.h"
28#include "card.h"
29#include "urb.h"
30#include "pcm.h"
31
32/*
33 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
34 * this will overflow at approx 524 kHz
35 */
36static inline unsigned get_usb_full_speed_rate(unsigned int rate)
37{
38 return ((rate << 13) + 62) / 125;
39}
40
41/*
42 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
43 * this will overflow at approx 4 MHz
44 */
45static inline unsigned get_usb_high_speed_rate(unsigned int rate)
46{
47 return ((rate << 10) + 62) / 125;
48}
49
50/*
51 * unlink active urbs.
52 */
53static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep)
54{
55 struct snd_usb_audio *chip = subs->stream->chip;
56 unsigned int i;
57 int async;
58
59 subs->running = 0;
60
61 if (!force && subs->stream->chip->shutdown) /* to be sure... */
62 return -EBADFD;
63
64 async = !can_sleep && chip->async_unlink;
65
66 if (!async && in_interrupt())
67 return 0;
68
69 for (i = 0; i < subs->nurbs; i++) {
70 if (test_bit(i, &subs->active_mask)) {
71 if (!test_and_set_bit(i, &subs->unlink_mask)) {
72 struct urb *u = subs->dataurb[i].urb;
73 if (async)
74 usb_unlink_urb(u);
75 else
76 usb_kill_urb(u);
77 }
78 }
79 }
80 if (subs->syncpipe) {
81 for (i = 0; i < SYNC_URBS; i++) {
82 if (test_bit(i+16, &subs->active_mask)) {
83 if (!test_and_set_bit(i+16, &subs->unlink_mask)) {
84 struct urb *u = subs->syncurb[i].urb;
85 if (async)
86 usb_unlink_urb(u);
87 else
88 usb_kill_urb(u);
89 }
90 }
91 }
92 }
93 return 0;
94}
95
96
97/*
98 * release a urb data
99 */
100static void release_urb_ctx(struct snd_urb_ctx *u)
101{
102 if (u->urb) {
103 if (u->buffer_size)
Stephen Rothwell3d62e3f2010-04-30 14:44:54 +1000104 usb_free_coherent(u->subs->dev, u->buffer_size,
Daniel Macke5779992010-03-04 19:46:13 +0100105 u->urb->transfer_buffer,
106 u->urb->transfer_dma);
107 usb_free_urb(u->urb);
108 u->urb = NULL;
109 }
110}
111
112/*
113 * wait until all urbs are processed.
114 */
115static int wait_clear_urbs(struct snd_usb_substream *subs)
116{
117 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
118 unsigned int i;
119 int alive;
120
121 do {
122 alive = 0;
123 for (i = 0; i < subs->nurbs; i++) {
124 if (test_bit(i, &subs->active_mask))
125 alive++;
126 }
127 if (subs->syncpipe) {
128 for (i = 0; i < SYNC_URBS; i++) {
129 if (test_bit(i + 16, &subs->active_mask))
130 alive++;
131 }
132 }
133 if (! alive)
134 break;
135 schedule_timeout_uninterruptible(1);
136 } while (time_before(jiffies, end_time));
137 if (alive)
138 snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
139 return 0;
140}
141
142/*
143 * release a substream
144 */
145void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
146{
147 int i;
148
149 /* stop urbs (to be sure) */
150 deactivate_urbs(subs, force, 1);
151 wait_clear_urbs(subs);
152
153 for (i = 0; i < MAX_URBS; i++)
154 release_urb_ctx(&subs->dataurb[i]);
155 for (i = 0; i < SYNC_URBS; i++)
156 release_urb_ctx(&subs->syncurb[i]);
Stephen Rothwell3d62e3f2010-04-30 14:44:54 +1000157 usb_free_coherent(subs->dev, SYNC_URBS * 4,
Daniel Macke5779992010-03-04 19:46:13 +0100158 subs->syncbuf, subs->sync_dma);
159 subs->syncbuf = NULL;
160 subs->nurbs = 0;
161}
162
163/*
164 * complete callback from data urb
165 */
166static void snd_complete_urb(struct urb *urb)
167{
168 struct snd_urb_ctx *ctx = urb->context;
169 struct snd_usb_substream *subs = ctx->subs;
170 struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
171 int err = 0;
172
173 if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) ||
174 !subs->running || /* can be stopped during retire callback */
175 (err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 ||
176 (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
177 clear_bit(ctx->index, &subs->active_mask);
178 if (err < 0) {
179 snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err);
180 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181 }
182 }
183}
184
185
186/*
187 * complete callback from sync urb
188 */
189static void snd_complete_sync_urb(struct urb *urb)
190{
191 struct snd_urb_ctx *ctx = urb->context;
192 struct snd_usb_substream *subs = ctx->subs;
193 struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
194 int err = 0;
195
196 if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) ||
197 !subs->running || /* can be stopped during retire callback */
198 (err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 ||
199 (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
200 clear_bit(ctx->index + 16, &subs->active_mask);
201 if (err < 0) {
202 snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err);
203 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
204 }
205 }
206}
207
208
209/*
210 * initialize a substream for plaback/capture
211 */
212int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
213 unsigned int period_bytes,
214 unsigned int rate,
215 unsigned int frame_bits)
216{
217 unsigned int maxsize, i;
218 int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
219 unsigned int urb_packs, total_packs, packs_per_ms;
220 struct snd_usb_audio *chip = subs->stream->chip;
221
222 /* calculate the frequency in 16.16 format */
223 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
224 subs->freqn = get_usb_full_speed_rate(rate);
225 else
226 subs->freqn = get_usb_high_speed_rate(rate);
227 subs->freqm = subs->freqn;
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200228 subs->freqshift = INT_MIN;
Daniel Macke5779992010-03-04 19:46:13 +0100229 /* calculate max. frequency */
230 if (subs->maxpacksize) {
231 /* whatever fits into a max. size packet */
232 maxsize = subs->maxpacksize;
233 subs->freqmax = (maxsize / (frame_bits >> 3))
234 << (16 - subs->datainterval);
235 } else {
236 /* no max. packet size: just take 25% higher than nominal */
237 subs->freqmax = subs->freqn + (subs->freqn >> 2);
238 maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3))
239 >> (16 - subs->datainterval);
240 }
241 subs->phase = 0;
242
243 if (subs->fill_max)
244 subs->curpacksize = subs->maxpacksize;
245 else
246 subs->curpacksize = maxsize;
247
Paul Zimmerman4f4e8f62010-08-13 12:42:07 -0700248 if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
Daniel Macke5779992010-03-04 19:46:13 +0100249 packs_per_ms = 8 >> subs->datainterval;
250 else
251 packs_per_ms = 1;
252
253 if (is_playback) {
254 urb_packs = max(chip->nrpacks, 1);
255 urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
256 } else
257 urb_packs = 1;
258 urb_packs *= packs_per_ms;
259 if (subs->syncpipe)
260 urb_packs = min(urb_packs, 1U << subs->syncinterval);
261
262 /* decide how many packets to be used */
263 if (is_playback) {
264 unsigned int minsize, maxpacks;
265 /* determine how small a packet can be */
266 minsize = (subs->freqn >> (16 - subs->datainterval))
267 * (frame_bits >> 3);
268 /* with sync from device, assume it can be 12% lower */
269 if (subs->syncpipe)
270 minsize -= minsize >> 3;
271 minsize = max(minsize, 1u);
272 total_packs = (period_bytes + minsize - 1) / minsize;
273 /* we need at least two URBs for queueing */
274 if (total_packs < 2) {
275 total_packs = 2;
276 } else {
277 /* and we don't want too long a queue either */
278 maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
279 total_packs = min(total_packs, maxpacks);
280 }
281 } else {
282 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
283 urb_packs >>= 1;
284 total_packs = MAX_URBS * urb_packs;
285 }
286 subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
287 if (subs->nurbs > MAX_URBS) {
288 /* too much... */
289 subs->nurbs = MAX_URBS;
290 total_packs = MAX_URBS * urb_packs;
291 } else if (subs->nurbs < 2) {
292 /* too little - we need at least two packets
293 * to ensure contiguous playback/capture
294 */
295 subs->nurbs = 2;
296 }
297
298 /* allocate and initialize data urbs */
299 for (i = 0; i < subs->nurbs; i++) {
300 struct snd_urb_ctx *u = &subs->dataurb[i];
301 u->index = i;
302 u->subs = subs;
303 u->packets = (i + 1) * total_packs / subs->nurbs
304 - i * total_packs / subs->nurbs;
305 u->buffer_size = maxsize * u->packets;
306 if (subs->fmt_type == UAC_FORMAT_TYPE_II)
307 u->packets++; /* for transfer delimiter */
308 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
309 if (!u->urb)
310 goto out_of_memory;
311 u->urb->transfer_buffer =
Stephen Rothwell3d62e3f2010-04-30 14:44:54 +1000312 usb_alloc_coherent(subs->dev, u->buffer_size,
313 GFP_KERNEL, &u->urb->transfer_dma);
Daniel Macke5779992010-03-04 19:46:13 +0100314 if (!u->urb->transfer_buffer)
315 goto out_of_memory;
316 u->urb->pipe = subs->datapipe;
317 u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
318 u->urb->interval = 1 << subs->datainterval;
319 u->urb->context = u;
320 u->urb->complete = snd_complete_urb;
321 }
322
323 if (subs->syncpipe) {
324 /* allocate and initialize sync urbs */
Stephen Rothwell3d62e3f2010-04-30 14:44:54 +1000325 subs->syncbuf = usb_alloc_coherent(subs->dev, SYNC_URBS * 4,
Daniel Macke5779992010-03-04 19:46:13 +0100326 GFP_KERNEL, &subs->sync_dma);
327 if (!subs->syncbuf)
328 goto out_of_memory;
329 for (i = 0; i < SYNC_URBS; i++) {
330 struct snd_urb_ctx *u = &subs->syncurb[i];
331 u->index = i;
332 u->subs = subs;
333 u->packets = 1;
334 u->urb = usb_alloc_urb(1, GFP_KERNEL);
335 if (!u->urb)
336 goto out_of_memory;
337 u->urb->transfer_buffer = subs->syncbuf + i * 4;
338 u->urb->transfer_dma = subs->sync_dma + i * 4;
339 u->urb->transfer_buffer_length = 4;
340 u->urb->pipe = subs->syncpipe;
341 u->urb->transfer_flags = URB_ISO_ASAP |
342 URB_NO_TRANSFER_DMA_MAP;
343 u->urb->number_of_packets = 1;
344 u->urb->interval = 1 << subs->syncinterval;
345 u->urb->context = u;
346 u->urb->complete = snd_complete_sync_urb;
347 }
348 }
349 return 0;
350
351out_of_memory:
352 snd_usb_release_substream_urbs(subs, 0);
353 return -ENOMEM;
354}
355
356/*
357 * prepare urb for full speed capture sync pipe
358 *
359 * fill the length and offset of each urb descriptor.
360 * the fixed 10.14 frequency is passed through the pipe.
361 */
362static int prepare_capture_sync_urb(struct snd_usb_substream *subs,
363 struct snd_pcm_runtime *runtime,
364 struct urb *urb)
365{
366 unsigned char *cp = urb->transfer_buffer;
367 struct snd_urb_ctx *ctx = urb->context;
368
369 urb->dev = ctx->subs->dev; /* we need to set this at each time */
370 urb->iso_frame_desc[0].length = 3;
371 urb->iso_frame_desc[0].offset = 0;
372 cp[0] = subs->freqn >> 2;
373 cp[1] = subs->freqn >> 10;
374 cp[2] = subs->freqn >> 18;
375 return 0;
376}
377
378/*
379 * prepare urb for high speed capture sync pipe
380 *
381 * fill the length and offset of each urb descriptor.
382 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
383 */
384static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs,
385 struct snd_pcm_runtime *runtime,
386 struct urb *urb)
387{
388 unsigned char *cp = urb->transfer_buffer;
389 struct snd_urb_ctx *ctx = urb->context;
390
391 urb->dev = ctx->subs->dev; /* we need to set this at each time */
392 urb->iso_frame_desc[0].length = 4;
393 urb->iso_frame_desc[0].offset = 0;
394 cp[0] = subs->freqn;
395 cp[1] = subs->freqn >> 8;
396 cp[2] = subs->freqn >> 16;
397 cp[3] = subs->freqn >> 24;
398 return 0;
399}
400
401/*
402 * process after capture sync complete
403 * - nothing to do
404 */
405static int retire_capture_sync_urb(struct snd_usb_substream *subs,
406 struct snd_pcm_runtime *runtime,
407 struct urb *urb)
408{
409 return 0;
410}
411
412/*
413 * prepare urb for capture data pipe
414 *
415 * fill the offset and length of each descriptor.
416 *
417 * we use a temporary buffer to write the captured data.
418 * since the length of written data is determined by host, we cannot
419 * write onto the pcm buffer directly... the data is thus copied
420 * later at complete callback to the global buffer.
421 */
422static int prepare_capture_urb(struct snd_usb_substream *subs,
423 struct snd_pcm_runtime *runtime,
424 struct urb *urb)
425{
426 int i, offs;
427 struct snd_urb_ctx *ctx = urb->context;
428
429 offs = 0;
430 urb->dev = ctx->subs->dev; /* we need to set this at each time */
431 for (i = 0; i < ctx->packets; i++) {
432 urb->iso_frame_desc[i].offset = offs;
433 urb->iso_frame_desc[i].length = subs->curpacksize;
434 offs += subs->curpacksize;
435 }
436 urb->transfer_buffer_length = offs;
437 urb->number_of_packets = ctx->packets;
438 return 0;
439}
440
441/*
442 * process after capture complete
443 *
444 * copy the data from each desctiptor to the pcm buffer, and
445 * update the current position.
446 */
447static int retire_capture_urb(struct snd_usb_substream *subs,
448 struct snd_pcm_runtime *runtime,
449 struct urb *urb)
450{
451 unsigned long flags;
452 unsigned char *cp;
453 int i;
454 unsigned int stride, frames, bytes, oldptr;
455 int period_elapsed = 0;
456
457 stride = runtime->frame_bits >> 3;
458
459 for (i = 0; i < urb->number_of_packets; i++) {
460 cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
461 if (urb->iso_frame_desc[i].status) {
462 snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
463 // continue;
464 }
465 bytes = urb->iso_frame_desc[i].actual_length;
466 frames = bytes / stride;
467 if (!subs->txfr_quirk)
468 bytes = frames * stride;
469 if (bytes % (runtime->sample_bits >> 3) != 0) {
470#ifdef CONFIG_SND_DEBUG_VERBOSE
471 int oldbytes = bytes;
472#endif
473 bytes = frames * stride;
474 snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
475 oldbytes, bytes);
476 }
477 /* update the current pointer */
478 spin_lock_irqsave(&subs->lock, flags);
479 oldptr = subs->hwptr_done;
480 subs->hwptr_done += bytes;
481 if (subs->hwptr_done >= runtime->buffer_size * stride)
482 subs->hwptr_done -= runtime->buffer_size * stride;
483 frames = (bytes + (oldptr % stride)) / stride;
484 subs->transfer_done += frames;
485 if (subs->transfer_done >= runtime->period_size) {
486 subs->transfer_done -= runtime->period_size;
487 period_elapsed = 1;
488 }
489 spin_unlock_irqrestore(&subs->lock, flags);
490 /* copy a data chunk */
491 if (oldptr + bytes > runtime->buffer_size * stride) {
492 unsigned int bytes1 =
493 runtime->buffer_size * stride - oldptr;
494 memcpy(runtime->dma_area + oldptr, cp, bytes1);
495 memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
496 } else {
497 memcpy(runtime->dma_area + oldptr, cp, bytes);
498 }
499 }
500 if (period_elapsed)
501 snd_pcm_period_elapsed(subs->pcm_substream);
502 return 0;
503}
504
505/*
506 * Process after capture complete when paused. Nothing to do.
507 */
508static int retire_paused_capture_urb(struct snd_usb_substream *subs,
509 struct snd_pcm_runtime *runtime,
510 struct urb *urb)
511{
512 return 0;
513}
514
515
516/*
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200517 * prepare urb for playback sync pipe
Daniel Macke5779992010-03-04 19:46:13 +0100518 *
519 * set up the offset and length to receive the current frequency.
520 */
Daniel Macke5779992010-03-04 19:46:13 +0100521static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
522 struct snd_pcm_runtime *runtime,
523 struct urb *urb)
524{
525 struct snd_urb_ctx *ctx = urb->context;
526
527 urb->dev = ctx->subs->dev; /* we need to set this at each time */
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200528 urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
Daniel Macke5779992010-03-04 19:46:13 +0100529 urb->iso_frame_desc[0].offset = 0;
530 return 0;
531}
532
533/*
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200534 * process after playback sync complete
Daniel Macke5779992010-03-04 19:46:13 +0100535 *
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200536 * Full speed devices report feedback values in 10.14 format as samples per
537 * frame, high speed devices in 16.16 format as samples per microframe.
538 * Because the Audio Class 1 spec was written before USB 2.0, many high speed
539 * devices use a wrong interpretation, some others use an entirely different
540 * format. Therefore, we cannot predict what format any particular device uses
541 * and must detect it automatically.
Daniel Macke5779992010-03-04 19:46:13 +0100542 */
543static int retire_playback_sync_urb(struct snd_usb_substream *subs,
544 struct snd_pcm_runtime *runtime,
545 struct urb *urb)
546{
547 unsigned int f;
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200548 int shift;
Daniel Macke5779992010-03-04 19:46:13 +0100549 unsigned long flags;
550
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200551 if (urb->iso_frame_desc[0].status != 0 ||
552 urb->iso_frame_desc[0].actual_length < 3)
553 return 0;
554
555 f = le32_to_cpup(urb->transfer_buffer);
556 if (urb->iso_frame_desc[0].actual_length == 3)
557 f &= 0x00ffffff;
558 else
559 f &= 0x0fffffff;
560 if (f == 0)
561 return 0;
562
563 if (unlikely(subs->freqshift == INT_MIN)) {
564 /*
565 * The first time we see a feedback value, determine its format
566 * by shifting it left or right until it matches the nominal
567 * frequency value. This assumes that the feedback does not
568 * differ from the nominal value more than +50% or -25%.
569 */
570 shift = 0;
571 while (f < subs->freqn - subs->freqn / 4) {
572 f <<= 1;
573 shift++;
Daniel Macke5779992010-03-04 19:46:13 +0100574 }
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200575 while (f > subs->freqn + subs->freqn / 2) {
576 f >>= 1;
577 shift--;
578 }
579 subs->freqshift = shift;
Daniel Macke5779992010-03-04 19:46:13 +0100580 }
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200581 else if (subs->freqshift >= 0)
582 f <<= subs->freqshift;
583 else
584 f >>= -subs->freqshift;
Daniel Macke5779992010-03-04 19:46:13 +0100585
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200586 if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
587 /*
588 * If the frequency looks valid, set it.
589 * This value is referred to in prepare_playback_urb().
590 */
591 spin_lock_irqsave(&subs->lock, flags);
592 subs->freqm = f;
593 spin_unlock_irqrestore(&subs->lock, flags);
594 } else {
595 /*
596 * Out of range; maybe the shift value is wrong.
597 * Reset it so that we autodetect again the next time.
598 */
599 subs->freqshift = INT_MIN;
Daniel Macke5779992010-03-04 19:46:13 +0100600 }
601
602 return 0;
603}
604
605/* determine the number of frames in the next packet */
606static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
607{
608 if (subs->fill_max)
609 return subs->maxframesize;
610 else {
611 subs->phase = (subs->phase & 0xffff)
612 + (subs->freqm << subs->datainterval);
613 return min(subs->phase >> 16, subs->maxframesize);
614 }
615}
616
617/*
618 * Prepare urb for streaming before playback starts or when paused.
619 *
620 * We don't have any data, so we send silence.
621 */
622static int prepare_nodata_playback_urb(struct snd_usb_substream *subs,
623 struct snd_pcm_runtime *runtime,
624 struct urb *urb)
625{
626 unsigned int i, offs, counts;
627 struct snd_urb_ctx *ctx = urb->context;
628 int stride = runtime->frame_bits >> 3;
629
630 offs = 0;
631 urb->dev = ctx->subs->dev;
632 for (i = 0; i < ctx->packets; ++i) {
633 counts = snd_usb_audio_next_packet_size(subs);
634 urb->iso_frame_desc[i].offset = offs * stride;
635 urb->iso_frame_desc[i].length = counts * stride;
636 offs += counts;
637 }
638 urb->number_of_packets = ctx->packets;
639 urb->transfer_buffer_length = offs * stride;
640 memset(urb->transfer_buffer,
Clemens Ladisch015eb0b2010-03-04 19:46:15 +0100641 runtime->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
Daniel Macke5779992010-03-04 19:46:13 +0100642 offs * stride);
643 return 0;
644}
645
646/*
647 * prepare urb for playback data pipe
648 *
649 * Since a URB can handle only a single linear buffer, we must use double
650 * buffering when the data to be transferred overflows the buffer boundary.
651 * To avoid inconsistencies when updating hwptr_done, we use double buffering
652 * for all URBs.
653 */
654static int prepare_playback_urb(struct snd_usb_substream *subs,
655 struct snd_pcm_runtime *runtime,
656 struct urb *urb)
657{
658 int i, stride;
659 unsigned int counts, frames, bytes;
660 unsigned long flags;
661 int period_elapsed = 0;
662 struct snd_urb_ctx *ctx = urb->context;
663
664 stride = runtime->frame_bits >> 3;
665
666 frames = 0;
667 urb->dev = ctx->subs->dev; /* we need to set this at each time */
668 urb->number_of_packets = 0;
669 spin_lock_irqsave(&subs->lock, flags);
670 for (i = 0; i < ctx->packets; i++) {
671 counts = snd_usb_audio_next_packet_size(subs);
672 /* set up descriptor */
673 urb->iso_frame_desc[i].offset = frames * stride;
674 urb->iso_frame_desc[i].length = counts * stride;
675 frames += counts;
676 urb->number_of_packets++;
677 subs->transfer_done += counts;
678 if (subs->transfer_done >= runtime->period_size) {
679 subs->transfer_done -= runtime->period_size;
680 period_elapsed = 1;
681 if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
682 if (subs->transfer_done > 0) {
683 /* FIXME: fill-max mode is not
684 * supported yet */
685 frames -= subs->transfer_done;
686 counts -= subs->transfer_done;
687 urb->iso_frame_desc[i].length =
688 counts * stride;
689 subs->transfer_done = 0;
690 }
691 i++;
692 if (i < ctx->packets) {
693 /* add a transfer delimiter */
694 urb->iso_frame_desc[i].offset =
695 frames * stride;
696 urb->iso_frame_desc[i].length = 0;
697 urb->number_of_packets++;
698 }
699 break;
700 }
701 }
702 if (period_elapsed) /* finish at the period boundary */
703 break;
704 }
705 bytes = frames * stride;
706 if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
707 /* err, the transferred area goes over buffer boundary. */
708 unsigned int bytes1 =
709 runtime->buffer_size * stride - subs->hwptr_done;
710 memcpy(urb->transfer_buffer,
711 runtime->dma_area + subs->hwptr_done, bytes1);
712 memcpy(urb->transfer_buffer + bytes1,
713 runtime->dma_area, bytes - bytes1);
714 } else {
715 memcpy(urb->transfer_buffer,
716 runtime->dma_area + subs->hwptr_done, bytes);
717 }
718 subs->hwptr_done += bytes;
719 if (subs->hwptr_done >= runtime->buffer_size * stride)
720 subs->hwptr_done -= runtime->buffer_size * stride;
721 runtime->delay += frames;
722 spin_unlock_irqrestore(&subs->lock, flags);
723 urb->transfer_buffer_length = bytes;
724 if (period_elapsed)
725 snd_pcm_period_elapsed(subs->pcm_substream);
726 return 0;
727}
728
729/*
730 * process after playback data complete
731 * - decrease the delay count again
732 */
733static int retire_playback_urb(struct snd_usb_substream *subs,
734 struct snd_pcm_runtime *runtime,
735 struct urb *urb)
736{
737 unsigned long flags;
738 int stride = runtime->frame_bits >> 3;
739 int processed = urb->transfer_buffer_length / stride;
740
741 spin_lock_irqsave(&subs->lock, flags);
742 if (processed > runtime->delay)
743 runtime->delay = 0;
744 else
745 runtime->delay -= processed;
746 spin_unlock_irqrestore(&subs->lock, flags);
747 return 0;
748}
749
750static const char *usb_error_string(int err)
751{
752 switch (err) {
753 case -ENODEV:
754 return "no device";
755 case -ENOENT:
756 return "endpoint not enabled";
757 case -EPIPE:
758 return "endpoint stalled";
759 case -ENOSPC:
760 return "not enough bandwidth";
761 case -ESHUTDOWN:
762 return "device disabled";
763 case -EHOSTUNREACH:
764 return "device suspended";
765 case -EINVAL:
766 case -EAGAIN:
767 case -EFBIG:
768 case -EMSGSIZE:
769 return "internal error";
770 default:
771 return "unknown error";
772 }
773}
774
775/*
776 * set up and start data/sync urbs
777 */
778static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime)
779{
780 unsigned int i;
781 int err;
782
783 if (subs->stream->chip->shutdown)
784 return -EBADFD;
785
786 for (i = 0; i < subs->nurbs; i++) {
787 if (snd_BUG_ON(!subs->dataurb[i].urb))
788 return -EINVAL;
789 if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) {
790 snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i);
791 goto __error;
792 }
793 }
794 if (subs->syncpipe) {
795 for (i = 0; i < SYNC_URBS; i++) {
796 if (snd_BUG_ON(!subs->syncurb[i].urb))
797 return -EINVAL;
798 if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) {
799 snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i);
800 goto __error;
801 }
802 }
803 }
804
805 subs->active_mask = 0;
806 subs->unlink_mask = 0;
807 subs->running = 1;
808 for (i = 0; i < subs->nurbs; i++) {
809 err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
810 if (err < 0) {
811 snd_printk(KERN_ERR "cannot submit datapipe "
812 "for urb %d, error %d: %s\n",
813 i, err, usb_error_string(err));
814 goto __error;
815 }
816 set_bit(i, &subs->active_mask);
817 }
818 if (subs->syncpipe) {
819 for (i = 0; i < SYNC_URBS; i++) {
820 err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
821 if (err < 0) {
822 snd_printk(KERN_ERR "cannot submit syncpipe "
823 "for urb %d, error %d: %s\n",
824 i, err, usb_error_string(err));
825 goto __error;
826 }
827 set_bit(i + 16, &subs->active_mask);
828 }
829 }
830 return 0;
831
832 __error:
833 // snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
834 deactivate_urbs(subs, 0, 0);
835 return -EPIPE;
836}
837
838
839/*
840 */
841static struct snd_urb_ops audio_urb_ops[2] = {
842 {
843 .prepare = prepare_nodata_playback_urb,
844 .retire = retire_playback_urb,
845 .prepare_sync = prepare_playback_sync_urb,
846 .retire_sync = retire_playback_sync_urb,
847 },
848 {
849 .prepare = prepare_capture_urb,
850 .retire = retire_capture_urb,
851 .prepare_sync = prepare_capture_sync_urb,
852 .retire_sync = retire_capture_sync_urb,
853 },
854};
855
Daniel Macke5779992010-03-04 19:46:13 +0100856/*
857 * initialize the substream instance.
858 */
859
860void snd_usb_init_substream(struct snd_usb_stream *as,
861 int stream, struct audioformat *fp)
862{
863 struct snd_usb_substream *subs = &as->substream[stream];
864
865 INIT_LIST_HEAD(&subs->fmt_list);
866 spin_lock_init(&subs->lock);
867
868 subs->stream = as;
869 subs->direction = stream;
870 subs->dev = as->chip->dev;
871 subs->txfr_quirk = as->chip->txfr_quirk;
Clemens Ladisch89e1e662010-10-26 17:14:41 +0200872 subs->ops = audio_urb_ops[stream];
873 if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
874 subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
Daniel Macke5779992010-03-04 19:46:13 +0100875
876 snd_usb_set_pcm_ops(as->pcm, stream);
877
878 list_add_tail(&fp->list, &subs->fmt_list);
Clemens Ladisch015eb0b2010-03-04 19:46:15 +0100879 subs->formats |= fp->formats;
Daniel Macke5779992010-03-04 19:46:13 +0100880 subs->endpoint = fp->endpoint;
881 subs->num_formats++;
882 subs->fmt_type = fp->fmt_type;
883}
884
885int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd)
886{
887 struct snd_usb_substream *subs = substream->runtime->private_data;
888
889 switch (cmd) {
890 case SNDRV_PCM_TRIGGER_START:
891 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
892 subs->ops.prepare = prepare_playback_urb;
893 return 0;
894 case SNDRV_PCM_TRIGGER_STOP:
895 return deactivate_urbs(subs, 0, 0);
896 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
897 subs->ops.prepare = prepare_nodata_playback_urb;
898 return 0;
899 }
900
901 return -EINVAL;
902}
903
904int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
905{
906 struct snd_usb_substream *subs = substream->runtime->private_data;
907
908 switch (cmd) {
909 case SNDRV_PCM_TRIGGER_START:
910 subs->ops.retire = retire_capture_urb;
911 return start_urbs(subs, substream->runtime);
912 case SNDRV_PCM_TRIGGER_STOP:
913 return deactivate_urbs(subs, 0, 0);
914 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
915 subs->ops.retire = retire_paused_capture_urb;
916 return 0;
917 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
918 subs->ops.retire = retire_capture_urb;
919 return 0;
920 }
921
922 return -EINVAL;
923}
924
925int snd_usb_substream_prepare(struct snd_usb_substream *subs,
926 struct snd_pcm_runtime *runtime)
927{
928 /* clear urbs (to be sure) */
929 deactivate_urbs(subs, 0, 1);
930 wait_clear_urbs(subs);
931
932 /* for playback, submit the URBs now; otherwise, the first hwptr_done
933 * updates for all URBs would happen at the same time when starting */
934 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
935 subs->ops.prepare = prepare_nodata_playback_urb;
936 return start_urbs(subs, runtime);
937 }
938
939 return 0;
940}
941