blob: d6e2bb49c59c52f01288925fa52461eea510e78a [file] [log] [blame]
Daniel Macke5779992010-03-04 19:46:13 +01001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 */
17
Daniel Mackc731bc92011-09-14 12:46:57 +020018#include <linux/gfp.h>
19#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +010020#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020021#include <linux/usb.h>
22#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020023#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020024
25#include <sound/core.h>
26#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020027#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020028
29#include "usbaudio.h"
30#include "helper.h"
31#include "card.h"
32#include "endpoint.h"
33#include "pcm.h"
34
Daniel Mack8fdff6a2012-04-12 13:51:11 +020035#define EP_FLAG_ACTIVATED 0
36#define EP_FLAG_RUNNING 1
37
Daniel Mackc731bc92011-09-14 12:46:57 +020038/*
Daniel Mack94c27212012-04-12 13:51:15 +020039 * snd_usb_endpoint is a model that abstracts everything related to an
40 * USB endpoint and its streaming.
41 *
42 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020043 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020044 * packets for playback and record. Thus, the bus streaming and the audio
45 * handlers are fully decoupled.
46 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020047 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020048 *
49 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
50 * inbound and outbound traffic.
51 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020052 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
53 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
54 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020055 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020056 * Each endpoint has to be configured prior to being used by calling
57 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020058 *
59 * The model incorporates a reference counting, so that multiple users
60 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
61 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020062 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020063 */
64
65/*
Daniel Mackc731bc92011-09-14 12:46:57 +020066 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
67 * this will overflow at approx 524 kHz
68 */
69static inline unsigned get_usb_full_speed_rate(unsigned int rate)
70{
71 return ((rate << 13) + 62) / 125;
72}
73
74/*
75 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
76 * this will overflow at approx 4 MHz
77 */
78static inline unsigned get_usb_high_speed_rate(unsigned int rate)
79{
80 return ((rate << 10) + 62) / 125;
81}
82
83/*
Daniel Mackc731bc92011-09-14 12:46:57 +020084 * release a urb data
85 */
86static void release_urb_ctx(struct snd_urb_ctx *u)
87{
Daniel Mackd399ff92012-04-12 13:51:13 +020088 if (u->buffer_size)
89 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
90 u->urb->transfer_buffer,
91 u->urb->transfer_dma);
92 usb_free_urb(u->urb);
93 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020094}
95
96static const char *usb_error_string(int err)
97{
98 switch (err) {
99 case -ENODEV:
100 return "no device";
101 case -ENOENT:
102 return "endpoint not enabled";
103 case -EPIPE:
104 return "endpoint stalled";
105 case -ENOSPC:
106 return "not enough bandwidth";
107 case -ESHUTDOWN:
108 return "device disabled";
109 case -EHOSTUNREACH:
110 return "device suspended";
111 case -EINVAL:
112 case -EAGAIN:
113 case -EFBIG:
114 case -EMSGSIZE:
115 return "internal error";
116 default:
117 return "unknown error";
118 }
119}
120
Daniel Mack94c27212012-04-12 13:51:15 +0200121/**
122 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
123 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200124 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200125 *
126 * Determine whether an endpoint is driven by an implicit feedback
127 * data endpoint source.
128 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200129int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep)
130{
131 return ep->sync_master &&
132 ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA &&
133 ep->type == SND_USB_ENDPOINT_TYPE_DATA &&
134 usb_pipeout(ep->pipe);
135}
136
Daniel Mack94c27212012-04-12 13:51:15 +0200137/*
138 * For streaming based on information derived from sync endpoints,
139 * prepare_outbound_urb_sizes() will call next_packet_size() to
140 * determine the number of samples to be sent in the next packet.
141 *
142 * For implicit feedback, next_packet_size() is unused.
143 */
Daniel Mack245baf92012-08-30 18:52:30 +0200144int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200145{
146 unsigned long flags;
147 int ret;
148
149 if (ep->fill_max)
150 return ep->maxframesize;
151
152 spin_lock_irqsave(&ep->lock, flags);
153 ep->phase = (ep->phase & 0xffff)
154 + (ep->freqm << ep->datainterval);
155 ret = min(ep->phase >> 16, ep->maxframesize);
156 spin_unlock_irqrestore(&ep->lock, flags);
157
158 return ret;
159}
160
161static void retire_outbound_urb(struct snd_usb_endpoint *ep,
162 struct snd_urb_ctx *urb_ctx)
163{
164 if (ep->retire_data_urb)
165 ep->retire_data_urb(ep->data_subs, urb_ctx->urb);
166}
167
168static void retire_inbound_urb(struct snd_usb_endpoint *ep,
169 struct snd_urb_ctx *urb_ctx)
170{
171 struct urb *urb = urb_ctx->urb;
172
173 if (ep->sync_slave)
174 snd_usb_handle_sync_urb(ep->sync_slave, ep, urb);
175
176 if (ep->retire_data_urb)
177 ep->retire_data_urb(ep->data_subs, urb);
178}
179
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200180/*
181 * Prepare a PLAYBACK urb for submission to the bus.
182 */
183static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
184 struct snd_urb_ctx *ctx)
185{
186 int i;
187 struct urb *urb = ctx->urb;
188 unsigned char *cp = urb->transfer_buffer;
189
190 urb->dev = ep->chip->dev; /* we need to set this at each time */
191
192 switch (ep->type) {
193 case SND_USB_ENDPOINT_TYPE_DATA:
194 if (ep->prepare_data_urb) {
195 ep->prepare_data_urb(ep->data_subs, urb);
196 } else {
197 /* no data provider, so send silence */
198 unsigned int offs = 0;
199 for (i = 0; i < ctx->packets; ++i) {
200 int counts = ctx->packet_size[i];
201 urb->iso_frame_desc[i].offset = offs * ep->stride;
202 urb->iso_frame_desc[i].length = counts * ep->stride;
203 offs += counts;
204 }
205
206 urb->number_of_packets = ctx->packets;
207 urb->transfer_buffer_length = offs * ep->stride;
208 memset(urb->transfer_buffer, ep->silence_value,
209 offs * ep->stride);
210 }
211 break;
212
213 case SND_USB_ENDPOINT_TYPE_SYNC:
214 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
215 /*
216 * fill the length and offset of each urb descriptor.
217 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
218 */
219 urb->iso_frame_desc[0].length = 4;
220 urb->iso_frame_desc[0].offset = 0;
221 cp[0] = ep->freqn;
222 cp[1] = ep->freqn >> 8;
223 cp[2] = ep->freqn >> 16;
224 cp[3] = ep->freqn >> 24;
225 } else {
226 /*
227 * fill the length and offset of each urb descriptor.
228 * the fixed 10.14 frequency is passed through the pipe.
229 */
230 urb->iso_frame_desc[0].length = 3;
231 urb->iso_frame_desc[0].offset = 0;
232 cp[0] = ep->freqn >> 2;
233 cp[1] = ep->freqn >> 10;
234 cp[2] = ep->freqn >> 18;
235 }
236
237 break;
238 }
239}
240
241/*
242 * Prepare a CAPTURE or SYNC urb for submission to the bus.
243 */
244static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
245 struct snd_urb_ctx *urb_ctx)
246{
247 int i, offs;
248 struct urb *urb = urb_ctx->urb;
249
250 urb->dev = ep->chip->dev; /* we need to set this at each time */
251
252 switch (ep->type) {
253 case SND_USB_ENDPOINT_TYPE_DATA:
254 offs = 0;
255 for (i = 0; i < urb_ctx->packets; i++) {
256 urb->iso_frame_desc[i].offset = offs;
257 urb->iso_frame_desc[i].length = ep->curpacksize;
258 offs += ep->curpacksize;
259 }
260
261 urb->transfer_buffer_length = offs;
262 urb->number_of_packets = urb_ctx->packets;
263 break;
264
265 case SND_USB_ENDPOINT_TYPE_SYNC:
266 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
267 urb->iso_frame_desc[0].offset = 0;
268 break;
269 }
270}
271
Daniel Mack94c27212012-04-12 13:51:15 +0200272/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200273 * Send output urbs that have been prepared previously. URBs are dequeued
Daniel Mack94c27212012-04-12 13:51:15 +0200274 * from ep->ready_playback_urbs and in case there there aren't any available
275 * or there are no packets that have been prepared, this function does
276 * nothing.
277 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200278 * The reason why the functionality of sending and preparing URBs is separated
279 * is that host controllers don't guarantee the order in which they return
280 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200281 *
282 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200283 * driven by dedicated sync endpoints, URBs are immediately re-submitted
284 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200285 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200286static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
287{
288 while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
289
290 unsigned long flags;
Andrew Morton68853fa2012-04-24 08:10:10 +0200291 struct snd_usb_packet_info *uninitialized_var(packet);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200292 struct snd_urb_ctx *ctx = NULL;
293 struct urb *urb;
294 int err, i;
295
296 spin_lock_irqsave(&ep->lock, flags);
297 if (ep->next_packet_read_pos != ep->next_packet_write_pos) {
298 packet = ep->next_packet + ep->next_packet_read_pos;
299 ep->next_packet_read_pos++;
300 ep->next_packet_read_pos %= MAX_URBS;
301
302 /* take URB out of FIFO */
303 if (!list_empty(&ep->ready_playback_urbs))
304 ctx = list_first_entry(&ep->ready_playback_urbs,
305 struct snd_urb_ctx, ready_list);
306 }
307 spin_unlock_irqrestore(&ep->lock, flags);
308
309 if (ctx == NULL)
310 return;
311
312 list_del_init(&ctx->ready_list);
313 urb = ctx->urb;
314
315 /* copy over the length information */
316 for (i = 0; i < packet->packets; i++)
317 ctx->packet_size[i] = packet->packet_size[i];
318
Daniel Mack94c27212012-04-12 13:51:15 +0200319 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200320 prepare_outbound_urb(ep, ctx);
321
322 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
323 if (err < 0)
324 snd_printk(KERN_ERR "Unable to submit urb #%d: %d (urb %p)\n",
325 ctx->index, err, ctx->urb);
326 else
327 set_bit(ctx->index, &ep->active_mask);
328 }
329}
330
331/*
332 * complete callback for urbs
333 */
334static void snd_complete_urb(struct urb *urb)
335{
336 struct snd_urb_ctx *ctx = urb->context;
337 struct snd_usb_endpoint *ep = ctx->ep;
338 int err;
339
340 if (unlikely(urb->status == -ENOENT || /* unlinked */
341 urb->status == -ENODEV || /* device removed */
342 urb->status == -ECONNRESET || /* unlinked */
343 urb->status == -ESHUTDOWN || /* device disabled */
344 ep->chip->shutdown)) /* device disconnected */
345 goto exit_clear;
346
347 if (usb_pipeout(ep->pipe)) {
348 retire_outbound_urb(ep, ctx);
349 /* can be stopped during retire callback */
350 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
351 goto exit_clear;
352
353 if (snd_usb_endpoint_implict_feedback_sink(ep)) {
354 unsigned long flags;
355
356 spin_lock_irqsave(&ep->lock, flags);
357 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
358 spin_unlock_irqrestore(&ep->lock, flags);
359 queue_pending_output_urbs(ep);
360
361 goto exit_clear;
362 }
363
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200364 prepare_outbound_urb(ep, ctx);
365 } else {
366 retire_inbound_urb(ep, ctx);
367 /* can be stopped during retire callback */
368 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
369 goto exit_clear;
370
371 prepare_inbound_urb(ep, ctx);
372 }
373
374 err = usb_submit_urb(urb, GFP_ATOMIC);
375 if (err == 0)
376 return;
377
378 snd_printk(KERN_ERR "cannot submit urb (err = %d)\n", err);
379 //snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
380
381exit_clear:
382 clear_bit(ctx->index, &ep->active_mask);
383}
384
Daniel Mack94c27212012-04-12 13:51:15 +0200385/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200386 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200387 *
388 * @chip: The chip
389 * @alts: The USB host interface
390 * @ep_num: The number of the endpoint to use
391 * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE
392 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
393 *
394 * If the requested endpoint has not been added to the given chip before,
395 * a new instance is created. Otherwise, a pointer to the previoulsy
396 * created instance is returned. In case of any error, NULL is returned.
397 *
398 * New endpoints will be added to chip->ep_list and must be freed by
399 * calling snd_usb_endpoint_free().
400 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200401struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
402 struct usb_host_interface *alts,
403 int ep_num, int direction, int type)
404{
405 struct list_head *p;
406 struct snd_usb_endpoint *ep;
Daniel Mack68e67f42012-07-12 13:08:40 +0200407 int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200408
409 mutex_lock(&chip->mutex);
410
411 list_for_each(p, &chip->ep_list) {
412 ep = list_entry(p, struct snd_usb_endpoint, list);
413 if (ep->ep_num == ep_num &&
414 ep->iface == alts->desc.bInterfaceNumber &&
415 ep->alt_idx == alts->desc.bAlternateSetting) {
416 snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n",
417 ep_num, ep->iface, ep->alt_idx, ep);
418 goto __exit_unlock;
419 }
420 }
421
422 snd_printdd(KERN_DEBUG "Creating new %s %s endpoint #%x\n",
423 is_playback ? "playback" : "capture",
424 type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
425 ep_num);
426
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200427 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
428 if (!ep)
429 goto __exit_unlock;
430
431 ep->chip = chip;
432 spin_lock_init(&ep->lock);
433 ep->type = type;
434 ep->ep_num = ep_num;
435 ep->iface = alts->desc.bInterfaceNumber;
436 ep->alt_idx = alts->desc.bAlternateSetting;
437 INIT_LIST_HEAD(&ep->ready_playback_urbs);
438 ep_num &= USB_ENDPOINT_NUMBER_MASK;
439
440 if (is_playback)
441 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
442 else
443 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
444
445 if (type == SND_USB_ENDPOINT_TYPE_SYNC) {
446 if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
447 get_endpoint(alts, 1)->bRefresh >= 1 &&
448 get_endpoint(alts, 1)->bRefresh <= 9)
449 ep->syncinterval = get_endpoint(alts, 1)->bRefresh;
450 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
451 ep->syncinterval = 1;
452 else if (get_endpoint(alts, 1)->bInterval >= 1 &&
453 get_endpoint(alts, 1)->bInterval <= 16)
454 ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
455 else
456 ep->syncinterval = 3;
457
458 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
459 }
460
461 list_add_tail(&ep->list, &chip->ep_list);
462
463__exit_unlock:
464 mutex_unlock(&chip->mutex);
465
466 return ep;
467}
468
469/*
470 * wait until all urbs are processed.
471 */
472static int wait_clear_urbs(struct snd_usb_endpoint *ep)
473{
474 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
475 unsigned int i;
476 int alive;
477
478 do {
479 alive = 0;
480 for (i = 0; i < ep->nurbs; i++)
481 if (test_bit(i, &ep->active_mask))
482 alive++;
483
484 if (!alive)
485 break;
486
487 schedule_timeout_uninterruptible(1);
488 } while (time_before(jiffies, end_time));
489
490 if (alive)
491 snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n",
492 alive, ep->ep_num);
493
494 return 0;
495}
496
497/*
498 * unlink active urbs.
499 */
500static int deactivate_urbs(struct snd_usb_endpoint *ep, int force, int can_sleep)
501{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200502 unsigned int i;
503 int async;
504
505 if (!force && ep->chip->shutdown) /* to be sure... */
506 return -EBADFD;
507
508 async = !can_sleep && ep->chip->async_unlink;
509
510 clear_bit(EP_FLAG_RUNNING, &ep->flags);
511
512 INIT_LIST_HEAD(&ep->ready_playback_urbs);
513 ep->next_packet_read_pos = 0;
514 ep->next_packet_write_pos = 0;
515
516 if (!async && in_interrupt())
517 return 0;
518
519 for (i = 0; i < ep->nurbs; i++) {
520 if (test_bit(i, &ep->active_mask)) {
521 if (!test_and_set_bit(i, &ep->unlink_mask)) {
522 struct urb *u = ep->urb[i].urb;
523 if (async)
524 usb_unlink_urb(u);
525 else
526 usb_kill_urb(u);
527 }
528 }
529 }
530
531 return 0;
532}
533
534/*
535 * release an endpoint's urbs
536 */
537static void release_urbs(struct snd_usb_endpoint *ep, int force)
538{
539 int i;
540
541 /* route incoming urbs to nirvana */
542 ep->retire_data_urb = NULL;
543 ep->prepare_data_urb = NULL;
544
545 /* stop urbs */
546 deactivate_urbs(ep, force, 1);
547 wait_clear_urbs(ep);
548
549 for (i = 0; i < ep->nurbs; i++)
550 release_urb_ctx(&ep->urb[i]);
551
552 if (ep->syncbuf)
553 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
554 ep->syncbuf, ep->sync_dma);
555
556 ep->syncbuf = NULL;
557 ep->nurbs = 0;
558}
559
Daniel Mack94c27212012-04-12 13:51:15 +0200560/*
561 * configure a data endpoint
562 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200563static int data_ep_set_params(struct snd_usb_endpoint *ep,
564 struct snd_pcm_hw_params *hw_params,
565 struct audioformat *fmt,
566 struct snd_usb_endpoint *sync_ep)
567{
568 unsigned int maxsize, i, urb_packs, total_packs, packs_per_ms;
569 int period_bytes = params_period_bytes(hw_params);
570 int format = params_format(hw_params);
571 int is_playback = usb_pipeout(ep->pipe);
572 int frame_bits = snd_pcm_format_physical_width(params_format(hw_params)) *
573 params_channels(hw_params);
574
575 ep->datainterval = fmt->datainterval;
576 ep->stride = frame_bits >> 3;
577 ep->silence_value = format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
578
579 /* calculate max. frequency */
580 if (ep->maxpacksize) {
581 /* whatever fits into a max. size packet */
582 maxsize = ep->maxpacksize;
583 ep->freqmax = (maxsize / (frame_bits >> 3))
584 << (16 - ep->datainterval);
585 } else {
586 /* no max. packet size: just take 25% higher than nominal */
587 ep->freqmax = ep->freqn + (ep->freqn >> 2);
588 maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
589 >> (16 - ep->datainterval);
590 }
591
592 if (ep->fill_max)
593 ep->curpacksize = ep->maxpacksize;
594 else
595 ep->curpacksize = maxsize;
596
597 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL)
598 packs_per_ms = 8 >> ep->datainterval;
599 else
600 packs_per_ms = 1;
601
602 if (is_playback && !snd_usb_endpoint_implict_feedback_sink(ep)) {
603 urb_packs = max(ep->chip->nrpacks, 1);
604 urb_packs = min(urb_packs, (unsigned int) MAX_PACKS);
605 } else {
606 urb_packs = 1;
607 }
608
609 urb_packs *= packs_per_ms;
610
611 if (sync_ep && !snd_usb_endpoint_implict_feedback_sink(ep))
612 urb_packs = min(urb_packs, 1U << sync_ep->syncinterval);
613
614 /* decide how many packets to be used */
615 if (is_playback && !snd_usb_endpoint_implict_feedback_sink(ep)) {
616 unsigned int minsize, maxpacks;
617 /* determine how small a packet can be */
618 minsize = (ep->freqn >> (16 - ep->datainterval))
619 * (frame_bits >> 3);
620 /* with sync from device, assume it can be 12% lower */
621 if (sync_ep)
622 minsize -= minsize >> 3;
623 minsize = max(minsize, 1u);
624 total_packs = (period_bytes + minsize - 1) / minsize;
625 /* we need at least two URBs for queueing */
626 if (total_packs < 2) {
627 total_packs = 2;
628 } else {
629 /* and we don't want too long a queue either */
630 maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
631 total_packs = min(total_packs, maxpacks);
632 }
633 } else {
634 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
635 urb_packs >>= 1;
636 total_packs = MAX_URBS * urb_packs;
637 }
638
639 ep->nurbs = (total_packs + urb_packs - 1) / urb_packs;
640 if (ep->nurbs > MAX_URBS) {
641 /* too much... */
642 ep->nurbs = MAX_URBS;
643 total_packs = MAX_URBS * urb_packs;
644 } else if (ep->nurbs < 2) {
645 /* too little - we need at least two packets
646 * to ensure contiguous playback/capture
647 */
648 ep->nurbs = 2;
649 }
650
651 /* allocate and initialize data urbs */
652 for (i = 0; i < ep->nurbs; i++) {
653 struct snd_urb_ctx *u = &ep->urb[i];
654 u->index = i;
655 u->ep = ep;
656 u->packets = (i + 1) * total_packs / ep->nurbs
657 - i * total_packs / ep->nurbs;
658 u->buffer_size = maxsize * u->packets;
659
660 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
661 u->packets++; /* for transfer delimiter */
662 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
663 if (!u->urb)
664 goto out_of_memory;
665
666 u->urb->transfer_buffer =
667 usb_alloc_coherent(ep->chip->dev, u->buffer_size,
668 GFP_KERNEL, &u->urb->transfer_dma);
669 if (!u->urb->transfer_buffer)
670 goto out_of_memory;
671 u->urb->pipe = ep->pipe;
672 u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
673 u->urb->interval = 1 << ep->datainterval;
674 u->urb->context = u;
675 u->urb->complete = snd_complete_urb;
676 INIT_LIST_HEAD(&u->ready_list);
677 }
678
679 return 0;
680
681out_of_memory:
682 release_urbs(ep, 0);
683 return -ENOMEM;
684}
685
Daniel Mack94c27212012-04-12 13:51:15 +0200686/*
687 * configure a sync endpoint
688 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200689static int sync_ep_set_params(struct snd_usb_endpoint *ep,
690 struct snd_pcm_hw_params *hw_params,
691 struct audioformat *fmt)
692{
693 int i;
694
695 ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4,
696 GFP_KERNEL, &ep->sync_dma);
697 if (!ep->syncbuf)
698 return -ENOMEM;
699
700 for (i = 0; i < SYNC_URBS; i++) {
701 struct snd_urb_ctx *u = &ep->urb[i];
702 u->index = i;
703 u->ep = ep;
704 u->packets = 1;
705 u->urb = usb_alloc_urb(1, GFP_KERNEL);
706 if (!u->urb)
707 goto out_of_memory;
708 u->urb->transfer_buffer = ep->syncbuf + i * 4;
709 u->urb->transfer_dma = ep->sync_dma + i * 4;
710 u->urb->transfer_buffer_length = 4;
711 u->urb->pipe = ep->pipe;
712 u->urb->transfer_flags = URB_ISO_ASAP |
713 URB_NO_TRANSFER_DMA_MAP;
714 u->urb->number_of_packets = 1;
715 u->urb->interval = 1 << ep->syncinterval;
716 u->urb->context = u;
717 u->urb->complete = snd_complete_urb;
718 }
719
720 ep->nurbs = SYNC_URBS;
721
722 return 0;
723
724out_of_memory:
725 release_urbs(ep, 0);
726 return -ENOMEM;
727}
728
Daniel Mack94c27212012-04-12 13:51:15 +0200729/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200730 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200731 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200732 * @ep: the snd_usb_endpoint to configure
733 * @hw_params: the hardware parameters
734 * @fmt: the USB audio format information
735 * @sync_ep: the sync endpoint to use, if any
Daniel Mack94c27212012-04-12 13:51:15 +0200736 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200737 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +0200738 * An endpoint must be configured before it can be started.
739 * An endpoint that is already running can not be reconfigured.
740 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200741int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
742 struct snd_pcm_hw_params *hw_params,
743 struct audioformat *fmt,
744 struct snd_usb_endpoint *sync_ep)
745{
746 int err;
747
748 if (ep->use_count != 0) {
749 snd_printk(KERN_WARNING "Unable to change format on ep #%x: already in use\n",
750 ep->ep_num);
751 return -EBUSY;
752 }
753
754 /* release old buffers, if any */
755 release_urbs(ep, 0);
756
757 ep->datainterval = fmt->datainterval;
758 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +0200759 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200760
761 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL)
762 ep->freqn = get_usb_full_speed_rate(params_rate(hw_params));
763 else
764 ep->freqn = get_usb_high_speed_rate(params_rate(hw_params));
765
766 /* calculate the frequency in 16.16 format */
767 ep->freqm = ep->freqn;
768 ep->freqshift = INT_MIN;
769
770 ep->phase = 0;
771
772 switch (ep->type) {
773 case SND_USB_ENDPOINT_TYPE_DATA:
774 err = data_ep_set_params(ep, hw_params, fmt, sync_ep);
775 break;
776 case SND_USB_ENDPOINT_TYPE_SYNC:
777 err = sync_ep_set_params(ep, hw_params, fmt);
778 break;
779 default:
780 err = -EINVAL;
781 }
782
783 snd_printdd(KERN_DEBUG "Setting params for ep #%x (type %d, %d urbs), ret=%d\n",
784 ep->ep_num, ep->type, ep->nurbs, err);
785
786 return err;
787}
788
Daniel Mack94c27212012-04-12 13:51:15 +0200789/**
790 * snd_usb_endpoint_start: start an snd_usb_endpoint
791 *
Daniel Mack015618b2012-08-29 13:17:05 +0200792 * @ep: the endpoint to start
793 * @can_sleep: flag indicating whether the operation is executed in
794 * non-atomic context
Daniel Mack94c27212012-04-12 13:51:15 +0200795 *
796 * A call to this function will increment the use count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200797 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +0200798 * submitted. Otherwise, this function does nothing.
799 *
800 * Must be balanced to calls of snd_usb_endpoint_stop().
801 *
802 * Returns an error if the URB submission failed, 0 in all other cases.
803 */
Daniel Mack015618b2012-08-29 13:17:05 +0200804int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200805{
806 int err;
807 unsigned int i;
808
809 if (ep->chip->shutdown)
810 return -EBADFD;
811
812 /* already running? */
813 if (++ep->use_count != 1)
814 return 0;
815
Daniel Mack015618b2012-08-29 13:17:05 +0200816 /* just to be sure */
817 deactivate_urbs(ep, 0, can_sleep);
818 if (can_sleep)
819 wait_clear_urbs(ep);
820
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200821 ep->active_mask = 0;
822 ep->unlink_mask = 0;
823 ep->phase = 0;
824
825 /*
826 * If this endpoint has a data endpoint as implicit feedback source,
827 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200828 * wait for the record urbs to return and queue the playback urbs
829 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200830 */
831
832 set_bit(EP_FLAG_RUNNING, &ep->flags);
833
834 if (snd_usb_endpoint_implict_feedback_sink(ep)) {
835 for (i = 0; i < ep->nurbs; i++) {
836 struct snd_urb_ctx *ctx = ep->urb + i;
837 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
838 }
839
840 return 0;
841 }
842
843 for (i = 0; i < ep->nurbs; i++) {
844 struct urb *urb = ep->urb[i].urb;
845
846 if (snd_BUG_ON(!urb))
847 goto __error;
848
849 if (usb_pipeout(ep->pipe)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200850 prepare_outbound_urb(ep, urb->context);
851 } else {
852 prepare_inbound_urb(ep, urb->context);
853 }
854
855 err = usb_submit_urb(urb, GFP_ATOMIC);
856 if (err < 0) {
857 snd_printk(KERN_ERR "cannot submit urb %d, error %d: %s\n",
858 i, err, usb_error_string(err));
859 goto __error;
860 }
861 set_bit(i, &ep->active_mask);
862 }
863
864 return 0;
865
866__error:
867 clear_bit(EP_FLAG_RUNNING, &ep->flags);
868 ep->use_count--;
869 deactivate_urbs(ep, 0, 0);
870 return -EPIPE;
871}
872
Daniel Mack94c27212012-04-12 13:51:15 +0200873/**
874 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
875 *
876 * @ep: the endpoint to stop (may be NULL)
877 *
878 * A call to this function will decrement the use count of the endpoint.
879 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200880 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +0200881 *
882 * Must be balanced to calls of snd_usb_endpoint_start().
883 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200884void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
885 int force, int can_sleep, int wait)
886{
887 if (!ep)
888 return;
889
890 if (snd_BUG_ON(ep->use_count == 0))
891 return;
892
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200893 if (--ep->use_count == 0) {
894 deactivate_urbs(ep, force, can_sleep);
895 ep->data_subs = NULL;
896 ep->sync_slave = NULL;
897 ep->retire_data_urb = NULL;
898 ep->prepare_data_urb = NULL;
899
900 if (wait)
901 wait_clear_urbs(ep);
902 }
903}
904
Daniel Mack94c27212012-04-12 13:51:15 +0200905/**
Daniel Mack94c27212012-04-12 13:51:15 +0200906 * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
907 *
908 * @ep: the endpoint to deactivate
909 *
910 * If the endpoint is not currently in use, this functions will select the
911 * alternate interface setting 0 for the interface of this endpoint.
912 *
913 * In case of any active users, this functions does nothing.
914 *
915 * Returns an error if usb_set_interface() failed, 0 in all other
916 * cases.
917 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200918int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
919{
920 if (!ep)
921 return -EINVAL;
922
Daniel Mack68e67f42012-07-12 13:08:40 +0200923 deactivate_urbs(ep, 1, 1);
924 wait_clear_urbs(ep);
925
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200926 if (ep->use_count != 0)
927 return 0;
928
Daniel Mack68e67f42012-07-12 13:08:40 +0200929 clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200930
Daniel Mack68e67f42012-07-12 13:08:40 +0200931 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200932}
933
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200934/**
935 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200936 *
937 * @ep: the list header of the endpoint to free
938 *
939 * This function does not care for the endpoint's use count but will tear
940 * down all the streaming URBs immediately and free all resources.
941 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200942void snd_usb_endpoint_free(struct list_head *head)
943{
944 struct snd_usb_endpoint *ep;
945
946 ep = list_entry(head, struct snd_usb_endpoint, list);
947 release_urbs(ep, 1);
948 kfree(ep);
949}
950
Daniel Mack94c27212012-04-12 13:51:15 +0200951/**
952 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200953 *
Daniel Mack94c27212012-04-12 13:51:15 +0200954 * @ep: the endpoint to handle the packet
955 * @sender: the sending endpoint
956 * @urb: the received packet
957 *
958 * This function is called from the context of an endpoint that received
959 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200960 */
961void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
962 struct snd_usb_endpoint *sender,
963 const struct urb *urb)
964{
965 int shift;
966 unsigned int f;
967 unsigned long flags;
968
969 snd_BUG_ON(ep == sender);
970
Daniel Mack94c27212012-04-12 13:51:15 +0200971 /*
972 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200973 * a new outbound URB that has the same layout as the received packet
974 * and add it to the list of pending urbs. queue_pending_output_urbs()
975 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +0200976 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200977 if (snd_usb_endpoint_implict_feedback_sink(ep) &&
978 ep->use_count != 0) {
979
980 /* implicit feedback case */
981 int i, bytes = 0;
982 struct snd_urb_ctx *in_ctx;
983 struct snd_usb_packet_info *out_packet;
984
985 in_ctx = urb->context;
986
987 /* Count overall packet size */
988 for (i = 0; i < in_ctx->packets; i++)
989 if (urb->iso_frame_desc[i].status == 0)
990 bytes += urb->iso_frame_desc[i].actual_length;
991
992 /*
993 * skip empty packets. At least M-Audio's Fast Track Ultra stops
994 * streaming once it received a 0-byte OUT URB
995 */
996 if (bytes == 0)
997 return;
998
999 spin_lock_irqsave(&ep->lock, flags);
1000 out_packet = ep->next_packet + ep->next_packet_write_pos;
1001
1002 /*
1003 * Iterate through the inbound packet and prepare the lengths
1004 * for the output packet. The OUT packet we are about to send
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001005 * will have the same amount of payload bytes than the IN
1006 * packet we just received.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001007 */
1008
1009 out_packet->packets = in_ctx->packets;
1010 for (i = 0; i < in_ctx->packets; i++) {
1011 if (urb->iso_frame_desc[i].status == 0)
1012 out_packet->packet_size[i] =
1013 urb->iso_frame_desc[i].actual_length / ep->stride;
1014 else
1015 out_packet->packet_size[i] = 0;
1016 }
1017
1018 ep->next_packet_write_pos++;
1019 ep->next_packet_write_pos %= MAX_URBS;
1020 spin_unlock_irqrestore(&ep->lock, flags);
1021 queue_pending_output_urbs(ep);
1022
1023 return;
1024 }
1025
Daniel Mack94c27212012-04-12 13:51:15 +02001026 /*
1027 * process after playback sync complete
1028 *
1029 * Full speed devices report feedback values in 10.14 format as samples
1030 * per frame, high speed devices in 16.16 format as samples per
1031 * microframe.
1032 *
1033 * Because the Audio Class 1 spec was written before USB 2.0, many high
1034 * speed devices use a wrong interpretation, some others use an
1035 * entirely different format.
1036 *
1037 * Therefore, we cannot predict what format any particular device uses
1038 * and must detect it automatically.
1039 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001040
1041 if (urb->iso_frame_desc[0].status != 0 ||
1042 urb->iso_frame_desc[0].actual_length < 3)
1043 return;
1044
1045 f = le32_to_cpup(urb->transfer_buffer);
1046 if (urb->iso_frame_desc[0].actual_length == 3)
1047 f &= 0x00ffffff;
1048 else
1049 f &= 0x0fffffff;
1050
1051 if (f == 0)
1052 return;
1053
1054 if (unlikely(ep->freqshift == INT_MIN)) {
1055 /*
1056 * The first time we see a feedback value, determine its format
1057 * by shifting it left or right until it matches the nominal
1058 * frequency value. This assumes that the feedback does not
1059 * differ from the nominal value more than +50% or -25%.
1060 */
1061 shift = 0;
1062 while (f < ep->freqn - ep->freqn / 4) {
1063 f <<= 1;
1064 shift++;
1065 }
1066 while (f > ep->freqn + ep->freqn / 2) {
1067 f >>= 1;
1068 shift--;
1069 }
1070 ep->freqshift = shift;
1071 } else if (ep->freqshift >= 0)
1072 f <<= ep->freqshift;
1073 else
1074 f >>= -ep->freqshift;
1075
1076 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1077 /*
1078 * If the frequency looks valid, set it.
1079 * This value is referred to in prepare_playback_urb().
1080 */
1081 spin_lock_irqsave(&ep->lock, flags);
1082 ep->freqm = f;
1083 spin_unlock_irqrestore(&ep->lock, flags);
1084 } else {
1085 /*
1086 * Out of range; maybe the shift value is wrong.
1087 * Reset it so that we autodetect again the next time.
1088 */
1089 ep->freqshift = INT_MIN;
1090 }
1091}
1092