blob: 21dc6422d7478351f29f2159411d1d5100b5796d [file] [log] [blame]
Daniel Macke5779992010-03-04 19:46:13 +01001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 */
17
Daniel Mackc731bc92011-09-14 12:46:57 +020018#include <linux/gfp.h>
19#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +010020#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020021#include <linux/usb.h>
22#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020023#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020024
25#include <sound/core.h>
26#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020027#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020028
29#include "usbaudio.h"
30#include "helper.h"
31#include "card.h"
32#include "endpoint.h"
33#include "pcm.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020034#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020035
Daniel Mack8fdff6a2012-04-12 13:51:11 +020036#define EP_FLAG_ACTIVATED 0
37#define EP_FLAG_RUNNING 1
Takashi Iwaif58161b2012-11-08 08:52:45 +010038#define EP_FLAG_STOPPING 2
Daniel Mack8fdff6a2012-04-12 13:51:11 +020039
Daniel Mackc731bc92011-09-14 12:46:57 +020040/*
Daniel Mack94c27212012-04-12 13:51:15 +020041 * snd_usb_endpoint is a model that abstracts everything related to an
42 * USB endpoint and its streaming.
43 *
44 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020045 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020046 * packets for playback and record. Thus, the bus streaming and the audio
47 * handlers are fully decoupled.
48 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020049 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020050 *
51 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
52 * inbound and outbound traffic.
53 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020054 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
55 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
56 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020057 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020058 * Each endpoint has to be configured prior to being used by calling
59 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020060 *
61 * The model incorporates a reference counting, so that multiple users
62 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
63 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020064 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020065 */
66
67/*
Daniel Mackc731bc92011-09-14 12:46:57 +020068 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
69 * this will overflow at approx 524 kHz
70 */
71static inline unsigned get_usb_full_speed_rate(unsigned int rate)
72{
73 return ((rate << 13) + 62) / 125;
74}
75
76/*
77 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
78 * this will overflow at approx 4 MHz
79 */
80static inline unsigned get_usb_high_speed_rate(unsigned int rate)
81{
82 return ((rate << 10) + 62) / 125;
83}
84
85/*
Daniel Mackc731bc92011-09-14 12:46:57 +020086 * release a urb data
87 */
88static void release_urb_ctx(struct snd_urb_ctx *u)
89{
Daniel Mackd399ff92012-04-12 13:51:13 +020090 if (u->buffer_size)
91 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
92 u->urb->transfer_buffer,
93 u->urb->transfer_dma);
94 usb_free_urb(u->urb);
95 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020096}
97
98static const char *usb_error_string(int err)
99{
100 switch (err) {
101 case -ENODEV:
102 return "no device";
103 case -ENOENT:
104 return "endpoint not enabled";
105 case -EPIPE:
106 return "endpoint stalled";
107 case -ENOSPC:
108 return "not enough bandwidth";
109 case -ESHUTDOWN:
110 return "device disabled";
111 case -EHOSTUNREACH:
112 return "device suspended";
113 case -EINVAL:
114 case -EAGAIN:
115 case -EFBIG:
116 case -EMSGSIZE:
117 return "internal error";
118 default:
119 return "unknown error";
120 }
121}
122
Daniel Mack94c27212012-04-12 13:51:15 +0200123/**
124 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
125 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200126 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200127 *
128 * Determine whether an endpoint is driven by an implicit feedback
129 * data endpoint source.
130 */
Eldad Zack98ae4722013-04-03 23:18:52 +0200131int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200132{
133 return ep->sync_master &&
134 ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA &&
135 ep->type == SND_USB_ENDPOINT_TYPE_DATA &&
136 usb_pipeout(ep->pipe);
137}
138
Daniel Mack94c27212012-04-12 13:51:15 +0200139/*
140 * For streaming based on information derived from sync endpoints,
141 * prepare_outbound_urb_sizes() will call next_packet_size() to
142 * determine the number of samples to be sent in the next packet.
143 *
144 * For implicit feedback, next_packet_size() is unused.
145 */
Daniel Mack245baf92012-08-30 18:52:30 +0200146int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200147{
148 unsigned long flags;
149 int ret;
150
151 if (ep->fill_max)
152 return ep->maxframesize;
153
154 spin_lock_irqsave(&ep->lock, flags);
155 ep->phase = (ep->phase & 0xffff)
156 + (ep->freqm << ep->datainterval);
157 ret = min(ep->phase >> 16, ep->maxframesize);
158 spin_unlock_irqrestore(&ep->lock, flags);
159
160 return ret;
161}
162
163static void retire_outbound_urb(struct snd_usb_endpoint *ep,
164 struct snd_urb_ctx *urb_ctx)
165{
166 if (ep->retire_data_urb)
167 ep->retire_data_urb(ep->data_subs, urb_ctx->urb);
168}
169
170static void retire_inbound_urb(struct snd_usb_endpoint *ep,
171 struct snd_urb_ctx *urb_ctx)
172{
173 struct urb *urb = urb_ctx->urb;
174
Daniel Mack2b58fd52012-09-04 10:23:07 +0200175 if (unlikely(ep->skip_packets > 0)) {
176 ep->skip_packets--;
177 return;
178 }
179
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200180 if (ep->sync_slave)
181 snd_usb_handle_sync_urb(ep->sync_slave, ep, urb);
182
183 if (ep->retire_data_urb)
184 ep->retire_data_urb(ep->data_subs, urb);
185}
186
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200187/*
188 * Prepare a PLAYBACK urb for submission to the bus.
189 */
190static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
191 struct snd_urb_ctx *ctx)
192{
193 int i;
194 struct urb *urb = ctx->urb;
195 unsigned char *cp = urb->transfer_buffer;
196
197 urb->dev = ep->chip->dev; /* we need to set this at each time */
198
199 switch (ep->type) {
200 case SND_USB_ENDPOINT_TYPE_DATA:
201 if (ep->prepare_data_urb) {
202 ep->prepare_data_urb(ep->data_subs, urb);
203 } else {
204 /* no data provider, so send silence */
205 unsigned int offs = 0;
206 for (i = 0; i < ctx->packets; ++i) {
Daniel Mack8dce30c2012-09-27 10:26:01 +0200207 int counts;
208
209 if (ctx->packet_size[i])
210 counts = ctx->packet_size[i];
211 else
212 counts = snd_usb_endpoint_next_packet_size(ep);
213
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200214 urb->iso_frame_desc[i].offset = offs * ep->stride;
215 urb->iso_frame_desc[i].length = counts * ep->stride;
216 offs += counts;
217 }
218
219 urb->number_of_packets = ctx->packets;
220 urb->transfer_buffer_length = offs * ep->stride;
221 memset(urb->transfer_buffer, ep->silence_value,
222 offs * ep->stride);
223 }
224 break;
225
226 case SND_USB_ENDPOINT_TYPE_SYNC:
227 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
228 /*
229 * fill the length and offset of each urb descriptor.
230 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
231 */
232 urb->iso_frame_desc[0].length = 4;
233 urb->iso_frame_desc[0].offset = 0;
234 cp[0] = ep->freqn;
235 cp[1] = ep->freqn >> 8;
236 cp[2] = ep->freqn >> 16;
237 cp[3] = ep->freqn >> 24;
238 } else {
239 /*
240 * fill the length and offset of each urb descriptor.
241 * the fixed 10.14 frequency is passed through the pipe.
242 */
243 urb->iso_frame_desc[0].length = 3;
244 urb->iso_frame_desc[0].offset = 0;
245 cp[0] = ep->freqn >> 2;
246 cp[1] = ep->freqn >> 10;
247 cp[2] = ep->freqn >> 18;
248 }
249
250 break;
251 }
252}
253
254/*
255 * Prepare a CAPTURE or SYNC urb for submission to the bus.
256 */
257static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
258 struct snd_urb_ctx *urb_ctx)
259{
260 int i, offs;
261 struct urb *urb = urb_ctx->urb;
262
263 urb->dev = ep->chip->dev; /* we need to set this at each time */
264
265 switch (ep->type) {
266 case SND_USB_ENDPOINT_TYPE_DATA:
267 offs = 0;
268 for (i = 0; i < urb_ctx->packets; i++) {
269 urb->iso_frame_desc[i].offset = offs;
270 urb->iso_frame_desc[i].length = ep->curpacksize;
271 offs += ep->curpacksize;
272 }
273
274 urb->transfer_buffer_length = offs;
275 urb->number_of_packets = urb_ctx->packets;
276 break;
277
278 case SND_USB_ENDPOINT_TYPE_SYNC:
279 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
280 urb->iso_frame_desc[0].offset = 0;
281 break;
282 }
283}
284
Daniel Mack94c27212012-04-12 13:51:15 +0200285/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200286 * Send output urbs that have been prepared previously. URBs are dequeued
Daniel Mack94c27212012-04-12 13:51:15 +0200287 * from ep->ready_playback_urbs and in case there there aren't any available
288 * or there are no packets that have been prepared, this function does
289 * nothing.
290 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200291 * The reason why the functionality of sending and preparing URBs is separated
292 * is that host controllers don't guarantee the order in which they return
293 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200294 *
295 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200296 * driven by dedicated sync endpoints, URBs are immediately re-submitted
297 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200298 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200299static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
300{
301 while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
302
303 unsigned long flags;
Andrew Morton68853fa2012-04-24 08:10:10 +0200304 struct snd_usb_packet_info *uninitialized_var(packet);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200305 struct snd_urb_ctx *ctx = NULL;
306 struct urb *urb;
307 int err, i;
308
309 spin_lock_irqsave(&ep->lock, flags);
310 if (ep->next_packet_read_pos != ep->next_packet_write_pos) {
311 packet = ep->next_packet + ep->next_packet_read_pos;
312 ep->next_packet_read_pos++;
313 ep->next_packet_read_pos %= MAX_URBS;
314
315 /* take URB out of FIFO */
316 if (!list_empty(&ep->ready_playback_urbs))
317 ctx = list_first_entry(&ep->ready_playback_urbs,
318 struct snd_urb_ctx, ready_list);
319 }
320 spin_unlock_irqrestore(&ep->lock, flags);
321
322 if (ctx == NULL)
323 return;
324
325 list_del_init(&ctx->ready_list);
326 urb = ctx->urb;
327
328 /* copy over the length information */
329 for (i = 0; i < packet->packets; i++)
330 ctx->packet_size[i] = packet->packet_size[i];
331
Daniel Mack94c27212012-04-12 13:51:15 +0200332 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200333 prepare_outbound_urb(ep, ctx);
334
335 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
336 if (err < 0)
337 snd_printk(KERN_ERR "Unable to submit urb #%d: %d (urb %p)\n",
338 ctx->index, err, ctx->urb);
339 else
340 set_bit(ctx->index, &ep->active_mask);
341 }
342}
343
344/*
345 * complete callback for urbs
346 */
347static void snd_complete_urb(struct urb *urb)
348{
349 struct snd_urb_ctx *ctx = urb->context;
350 struct snd_usb_endpoint *ep = ctx->ep;
351 int err;
352
353 if (unlikely(urb->status == -ENOENT || /* unlinked */
354 urb->status == -ENODEV || /* device removed */
355 urb->status == -ECONNRESET || /* unlinked */
356 urb->status == -ESHUTDOWN || /* device disabled */
357 ep->chip->shutdown)) /* device disconnected */
358 goto exit_clear;
359
360 if (usb_pipeout(ep->pipe)) {
361 retire_outbound_urb(ep, ctx);
362 /* can be stopped during retire callback */
363 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
364 goto exit_clear;
365
Eldad Zack98ae4722013-04-03 23:18:52 +0200366 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200367 unsigned long flags;
368
369 spin_lock_irqsave(&ep->lock, flags);
370 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
371 spin_unlock_irqrestore(&ep->lock, flags);
372 queue_pending_output_urbs(ep);
373
374 goto exit_clear;
375 }
376
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200377 prepare_outbound_urb(ep, ctx);
378 } else {
379 retire_inbound_urb(ep, ctx);
380 /* can be stopped during retire callback */
381 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
382 goto exit_clear;
383
384 prepare_inbound_urb(ep, ctx);
385 }
386
387 err = usb_submit_urb(urb, GFP_ATOMIC);
388 if (err == 0)
389 return;
390
391 snd_printk(KERN_ERR "cannot submit urb (err = %d)\n", err);
392 //snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
393
394exit_clear:
395 clear_bit(ctx->index, &ep->active_mask);
396}
397
Daniel Mack94c27212012-04-12 13:51:15 +0200398/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200399 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200400 *
401 * @chip: The chip
402 * @alts: The USB host interface
403 * @ep_num: The number of the endpoint to use
404 * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE
405 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
406 *
407 * If the requested endpoint has not been added to the given chip before,
408 * a new instance is created. Otherwise, a pointer to the previoulsy
409 * created instance is returned. In case of any error, NULL is returned.
410 *
411 * New endpoints will be added to chip->ep_list and must be freed by
412 * calling snd_usb_endpoint_free().
413 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200414struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
415 struct usb_host_interface *alts,
416 int ep_num, int direction, int type)
417{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200418 struct snd_usb_endpoint *ep;
Daniel Mack68e67f42012-07-12 13:08:40 +0200419 int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200420
Eldad Zacke7e58df2013-08-03 10:51:15 +0200421 if (WARN_ON(!alts))
422 return NULL;
423
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200424 mutex_lock(&chip->mutex);
425
Eldad Zack88766f02013-04-03 23:18:49 +0200426 list_for_each_entry(ep, &chip->ep_list, list) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200427 if (ep->ep_num == ep_num &&
428 ep->iface == alts->desc.bInterfaceNumber &&
429 ep->alt_idx == alts->desc.bAlternateSetting) {
430 snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n",
431 ep_num, ep->iface, ep->alt_idx, ep);
432 goto __exit_unlock;
433 }
434 }
435
436 snd_printdd(KERN_DEBUG "Creating new %s %s endpoint #%x\n",
437 is_playback ? "playback" : "capture",
438 type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
439 ep_num);
440
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200441 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
442 if (!ep)
443 goto __exit_unlock;
444
445 ep->chip = chip;
446 spin_lock_init(&ep->lock);
447 ep->type = type;
448 ep->ep_num = ep_num;
449 ep->iface = alts->desc.bInterfaceNumber;
450 ep->alt_idx = alts->desc.bAlternateSetting;
451 INIT_LIST_HEAD(&ep->ready_playback_urbs);
452 ep_num &= USB_ENDPOINT_NUMBER_MASK;
453
454 if (is_playback)
455 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
456 else
457 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
458
459 if (type == SND_USB_ENDPOINT_TYPE_SYNC) {
460 if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
461 get_endpoint(alts, 1)->bRefresh >= 1 &&
462 get_endpoint(alts, 1)->bRefresh <= 9)
463 ep->syncinterval = get_endpoint(alts, 1)->bRefresh;
464 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
465 ep->syncinterval = 1;
466 else if (get_endpoint(alts, 1)->bInterval >= 1 &&
467 get_endpoint(alts, 1)->bInterval <= 16)
468 ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
469 else
470 ep->syncinterval = 3;
471
472 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
473 }
474
475 list_add_tail(&ep->list, &chip->ep_list);
476
477__exit_unlock:
478 mutex_unlock(&chip->mutex);
479
480 return ep;
481}
482
483/*
484 * wait until all urbs are processed.
485 */
486static int wait_clear_urbs(struct snd_usb_endpoint *ep)
487{
488 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200489 int alive;
490
491 do {
Joe Perches190006f2012-11-16 23:35:16 -0800492 alive = bitmap_weight(&ep->active_mask, ep->nurbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200493 if (!alive)
494 break;
495
496 schedule_timeout_uninterruptible(1);
497 } while (time_before(jiffies, end_time));
498
499 if (alive)
500 snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n",
501 alive, ep->ep_num);
Takashi Iwaif58161b2012-11-08 08:52:45 +0100502 clear_bit(EP_FLAG_STOPPING, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200503
504 return 0;
505}
506
Takashi Iwaif58161b2012-11-08 08:52:45 +0100507/* sync the pending stop operation;
508 * this function itself doesn't trigger the stop operation
509 */
510void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
511{
512 if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags))
513 wait_clear_urbs(ep);
514}
515
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200516/*
517 * unlink active urbs.
518 */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100519static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200520{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200521 unsigned int i;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200522
523 if (!force && ep->chip->shutdown) /* to be sure... */
524 return -EBADFD;
525
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200526 clear_bit(EP_FLAG_RUNNING, &ep->flags);
527
528 INIT_LIST_HEAD(&ep->ready_playback_urbs);
529 ep->next_packet_read_pos = 0;
530 ep->next_packet_write_pos = 0;
531
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200532 for (i = 0; i < ep->nurbs; i++) {
533 if (test_bit(i, &ep->active_mask)) {
534 if (!test_and_set_bit(i, &ep->unlink_mask)) {
535 struct urb *u = ep->urb[i].urb;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100536 usb_unlink_urb(u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200537 }
538 }
539 }
540
541 return 0;
542}
543
544/*
545 * release an endpoint's urbs
546 */
547static void release_urbs(struct snd_usb_endpoint *ep, int force)
548{
549 int i;
550
551 /* route incoming urbs to nirvana */
552 ep->retire_data_urb = NULL;
553 ep->prepare_data_urb = NULL;
554
555 /* stop urbs */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100556 deactivate_urbs(ep, force);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200557 wait_clear_urbs(ep);
558
559 for (i = 0; i < ep->nurbs; i++)
560 release_urb_ctx(&ep->urb[i]);
561
562 if (ep->syncbuf)
563 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
564 ep->syncbuf, ep->sync_dma);
565
566 ep->syncbuf = NULL;
567 ep->nurbs = 0;
568}
569
Daniel Mack94c27212012-04-12 13:51:15 +0200570/*
571 * configure a data endpoint
572 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200573static int data_ep_set_params(struct snd_usb_endpoint *ep,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700574 snd_pcm_format_t pcm_format,
575 unsigned int channels,
576 unsigned int period_bytes,
Alan Stern976b6c02013-09-24 15:51:58 -0400577 unsigned int frames_per_period,
578 unsigned int periods_per_buffer,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200579 struct audioformat *fmt,
580 struct snd_usb_endpoint *sync_ep)
581{
Alan Stern976b6c02013-09-24 15:51:58 -0400582 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
583 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
584 unsigned int max_urbs, i;
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700585 int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200586
Daniel Mackd24f5062013-04-17 00:01:38 +0800587 if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
588 /*
589 * When operating in DSD DOP mode, the size of a sample frame
590 * in hardware differs from the actual physical format width
591 * because we need to make room for the DOP markers.
592 */
593 frame_bits += channels << 3;
594 }
595
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200596 ep->datainterval = fmt->datainterval;
597 ep->stride = frame_bits >> 3;
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700598 ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200599
Clemens Ladisch57e6dae2013-08-08 11:24:55 +0200600 /* assume max. frequency is 25% higher than nominal */
601 ep->freqmax = ep->freqn + (ep->freqn >> 2);
602 maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
603 >> (16 - ep->datainterval);
604 /* but wMaxPacketSize might reduce this */
605 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200606 /* whatever fits into a max. size packet */
607 maxsize = ep->maxpacksize;
608 ep->freqmax = (maxsize / (frame_bits >> 3))
609 << (16 - ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200610 }
611
612 if (ep->fill_max)
613 ep->curpacksize = ep->maxpacksize;
614 else
615 ep->curpacksize = maxsize;
616
Alan Stern976b6c02013-09-24 15:51:58 -0400617 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200618 packs_per_ms = 8 >> ep->datainterval;
Alan Stern976b6c02013-09-24 15:51:58 -0400619 max_packs_per_urb = MAX_PACKS_HS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200620 } else {
Alan Stern976b6c02013-09-24 15:51:58 -0400621 packs_per_ms = 1;
622 max_packs_per_urb = MAX_PACKS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200623 }
Eldad Zack98ae4722013-04-03 23:18:52 +0200624 if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
Alan Stern976b6c02013-09-24 15:51:58 -0400625 max_packs_per_urb = min(max_packs_per_urb,
626 1U << sync_ep->syncinterval);
627 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200628
Alan Stern976b6c02013-09-24 15:51:58 -0400629 /*
630 * Capture endpoints need to use small URBs because there's no way
631 * to tell in advance where the next period will end, and we don't
632 * want the next URB to complete much after the period ends.
633 *
634 * Playback endpoints with implicit sync much use the same parameters
635 * as their corresponding capture endpoint.
636 */
637 if (usb_pipein(ep->pipe) ||
638 snd_usb_endpoint_implicit_feedback_sink(ep)) {
639
640 /* make capture URBs <= 1 ms and smaller than a period */
641 urb_packs = min(max_packs_per_urb, packs_per_ms);
642 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
643 urb_packs >>= 1;
644 ep->nurbs = MAX_URBS;
645
646 /*
647 * Playback endpoints without implicit sync are adjusted so that
648 * a period fits as evenly as possible in the smallest number of
649 * URBs. The total number of URBs is adjusted to the size of the
650 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
651 */
652 } else {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200653 /* determine how small a packet can be */
Alan Stern976b6c02013-09-24 15:51:58 -0400654 minsize = (ep->freqn >> (16 - ep->datainterval)) *
655 (frame_bits >> 3);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200656 /* with sync from device, assume it can be 12% lower */
657 if (sync_ep)
658 minsize -= minsize >> 3;
659 minsize = max(minsize, 1u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200660
Alan Stern976b6c02013-09-24 15:51:58 -0400661 /* how many packets will contain an entire ALSA period? */
662 max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
663
664 /* how many URBs will contain a period? */
665 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
666 max_packs_per_urb);
667 /* how many packets are needed in each URB? */
668 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
669
670 /* limit the number of frames in a single URB */
671 ep->max_urb_frames = DIV_ROUND_UP(frames_per_period,
672 urbs_per_period);
673
674 /* try to use enough URBs to contain an entire ALSA buffer */
675 max_urbs = min((unsigned) MAX_URBS,
676 MAX_QUEUE * packs_per_ms / urb_packs);
677 ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200678 }
679
680 /* allocate and initialize data urbs */
681 for (i = 0; i < ep->nurbs; i++) {
682 struct snd_urb_ctx *u = &ep->urb[i];
683 u->index = i;
684 u->ep = ep;
Alan Stern976b6c02013-09-24 15:51:58 -0400685 u->packets = urb_packs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200686 u->buffer_size = maxsize * u->packets;
687
688 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
689 u->packets++; /* for transfer delimiter */
690 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
691 if (!u->urb)
692 goto out_of_memory;
693
694 u->urb->transfer_buffer =
695 usb_alloc_coherent(ep->chip->dev, u->buffer_size,
696 GFP_KERNEL, &u->urb->transfer_dma);
697 if (!u->urb->transfer_buffer)
698 goto out_of_memory;
699 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +0200700 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200701 u->urb->interval = 1 << ep->datainterval;
702 u->urb->context = u;
703 u->urb->complete = snd_complete_urb;
704 INIT_LIST_HEAD(&u->ready_list);
705 }
706
707 return 0;
708
709out_of_memory:
710 release_urbs(ep, 0);
711 return -ENOMEM;
712}
713
Daniel Mack94c27212012-04-12 13:51:15 +0200714/*
715 * configure a sync endpoint
716 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200717static int sync_ep_set_params(struct snd_usb_endpoint *ep,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200718 struct audioformat *fmt)
719{
720 int i;
721
722 ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4,
723 GFP_KERNEL, &ep->sync_dma);
724 if (!ep->syncbuf)
725 return -ENOMEM;
726
727 for (i = 0; i < SYNC_URBS; i++) {
728 struct snd_urb_ctx *u = &ep->urb[i];
729 u->index = i;
730 u->ep = ep;
731 u->packets = 1;
732 u->urb = usb_alloc_urb(1, GFP_KERNEL);
733 if (!u->urb)
734 goto out_of_memory;
735 u->urb->transfer_buffer = ep->syncbuf + i * 4;
736 u->urb->transfer_dma = ep->sync_dma + i * 4;
737 u->urb->transfer_buffer_length = 4;
738 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +0200739 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200740 u->urb->number_of_packets = 1;
741 u->urb->interval = 1 << ep->syncinterval;
742 u->urb->context = u;
743 u->urb->complete = snd_complete_urb;
744 }
745
746 ep->nurbs = SYNC_URBS;
747
748 return 0;
749
750out_of_memory:
751 release_urbs(ep, 0);
752 return -ENOMEM;
753}
754
Daniel Mack94c27212012-04-12 13:51:15 +0200755/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200756 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200757 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200758 * @ep: the snd_usb_endpoint to configure
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700759 * @pcm_format: the audio fomat.
760 * @channels: the number of audio channels.
761 * @period_bytes: the number of bytes in one alsa period.
Alan Stern976b6c02013-09-24 15:51:58 -0400762 * @period_frames: the number of frames in one alsa period.
763 * @buffer_periods: the number of periods in one alsa buffer.
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700764 * @rate: the frame rate.
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200765 * @fmt: the USB audio format information
766 * @sync_ep: the sync endpoint to use, if any
Daniel Mack94c27212012-04-12 13:51:15 +0200767 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200768 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +0200769 * An endpoint must be configured before it can be started.
770 * An endpoint that is already running can not be reconfigured.
771 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200772int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700773 snd_pcm_format_t pcm_format,
774 unsigned int channels,
775 unsigned int period_bytes,
Alan Stern976b6c02013-09-24 15:51:58 -0400776 unsigned int period_frames,
777 unsigned int buffer_periods,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700778 unsigned int rate,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200779 struct audioformat *fmt,
780 struct snd_usb_endpoint *sync_ep)
781{
782 int err;
783
784 if (ep->use_count != 0) {
785 snd_printk(KERN_WARNING "Unable to change format on ep #%x: already in use\n",
786 ep->ep_num);
787 return -EBUSY;
788 }
789
790 /* release old buffers, if any */
791 release_urbs(ep, 0);
792
793 ep->datainterval = fmt->datainterval;
794 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +0200795 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200796
797 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL)
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700798 ep->freqn = get_usb_full_speed_rate(rate);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200799 else
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700800 ep->freqn = get_usb_high_speed_rate(rate);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200801
802 /* calculate the frequency in 16.16 format */
803 ep->freqm = ep->freqn;
804 ep->freqshift = INT_MIN;
805
806 ep->phase = 0;
807
808 switch (ep->type) {
809 case SND_USB_ENDPOINT_TYPE_DATA:
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700810 err = data_ep_set_params(ep, pcm_format, channels,
Alan Stern976b6c02013-09-24 15:51:58 -0400811 period_bytes, period_frames,
812 buffer_periods, fmt, sync_ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200813 break;
814 case SND_USB_ENDPOINT_TYPE_SYNC:
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700815 err = sync_ep_set_params(ep, fmt);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200816 break;
817 default:
818 err = -EINVAL;
819 }
820
821 snd_printdd(KERN_DEBUG "Setting params for ep #%x (type %d, %d urbs), ret=%d\n",
822 ep->ep_num, ep->type, ep->nurbs, err);
823
824 return err;
825}
826
Daniel Mack94c27212012-04-12 13:51:15 +0200827/**
828 * snd_usb_endpoint_start: start an snd_usb_endpoint
829 *
Daniel Mack015618b2012-08-29 13:17:05 +0200830 * @ep: the endpoint to start
831 * @can_sleep: flag indicating whether the operation is executed in
832 * non-atomic context
Daniel Mack94c27212012-04-12 13:51:15 +0200833 *
834 * A call to this function will increment the use count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200835 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +0200836 * submitted. Otherwise, this function does nothing.
837 *
838 * Must be balanced to calls of snd_usb_endpoint_stop().
839 *
840 * Returns an error if the URB submission failed, 0 in all other cases.
841 */
Takashi Iwaia9bb3622012-11-20 18:32:06 +0100842int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200843{
844 int err;
845 unsigned int i;
846
847 if (ep->chip->shutdown)
848 return -EBADFD;
849
850 /* already running? */
851 if (++ep->use_count != 1)
852 return 0;
853
Daniel Mack015618b2012-08-29 13:17:05 +0200854 /* just to be sure */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100855 deactivate_urbs(ep, false);
Daniel Mack015618b2012-08-29 13:17:05 +0200856 if (can_sleep)
857 wait_clear_urbs(ep);
858
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200859 ep->active_mask = 0;
860 ep->unlink_mask = 0;
861 ep->phase = 0;
862
Daniel Mack2b58fd52012-09-04 10:23:07 +0200863 snd_usb_endpoint_start_quirk(ep);
864
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200865 /*
866 * If this endpoint has a data endpoint as implicit feedback source,
867 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200868 * wait for the record urbs to return and queue the playback urbs
869 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200870 */
871
872 set_bit(EP_FLAG_RUNNING, &ep->flags);
873
Eldad Zack98ae4722013-04-03 23:18:52 +0200874 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200875 for (i = 0; i < ep->nurbs; i++) {
876 struct snd_urb_ctx *ctx = ep->urb + i;
877 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
878 }
879
880 return 0;
881 }
882
883 for (i = 0; i < ep->nurbs; i++) {
884 struct urb *urb = ep->urb[i].urb;
885
886 if (snd_BUG_ON(!urb))
887 goto __error;
888
889 if (usb_pipeout(ep->pipe)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200890 prepare_outbound_urb(ep, urb->context);
891 } else {
892 prepare_inbound_urb(ep, urb->context);
893 }
894
895 err = usb_submit_urb(urb, GFP_ATOMIC);
896 if (err < 0) {
897 snd_printk(KERN_ERR "cannot submit urb %d, error %d: %s\n",
898 i, err, usb_error_string(err));
899 goto __error;
900 }
901 set_bit(i, &ep->active_mask);
902 }
903
904 return 0;
905
906__error:
907 clear_bit(EP_FLAG_RUNNING, &ep->flags);
908 ep->use_count--;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100909 deactivate_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200910 return -EPIPE;
911}
912
Daniel Mack94c27212012-04-12 13:51:15 +0200913/**
914 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
915 *
916 * @ep: the endpoint to stop (may be NULL)
917 *
918 * A call to this function will decrement the use count of the endpoint.
919 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200920 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +0200921 *
922 * Must be balanced to calls of snd_usb_endpoint_start().
Takashi Iwaib2eb9502012-11-21 08:30:48 +0100923 *
924 * The caller needs to synchronize the pending stop operation via
925 * snd_usb_endpoint_sync_pending_stop().
Daniel Mack94c27212012-04-12 13:51:15 +0200926 */
Takashi Iwaib2eb9502012-11-21 08:30:48 +0100927void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200928{
929 if (!ep)
930 return;
931
932 if (snd_BUG_ON(ep->use_count == 0))
933 return;
934
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200935 if (--ep->use_count == 0) {
Takashi Iwaiccc16962012-11-21 08:22:52 +0100936 deactivate_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200937 ep->data_subs = NULL;
938 ep->sync_slave = NULL;
939 ep->retire_data_urb = NULL;
940 ep->prepare_data_urb = NULL;
Takashi Iwaib2eb9502012-11-21 08:30:48 +0100941 set_bit(EP_FLAG_STOPPING, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200942 }
943}
944
Daniel Mack94c27212012-04-12 13:51:15 +0200945/**
Daniel Mack94c27212012-04-12 13:51:15 +0200946 * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
947 *
948 * @ep: the endpoint to deactivate
949 *
950 * If the endpoint is not currently in use, this functions will select the
951 * alternate interface setting 0 for the interface of this endpoint.
952 *
953 * In case of any active users, this functions does nothing.
954 *
955 * Returns an error if usb_set_interface() failed, 0 in all other
956 * cases.
957 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200958int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
959{
960 if (!ep)
961 return -EINVAL;
962
Takashi Iwaiccc16962012-11-21 08:22:52 +0100963 deactivate_urbs(ep, true);
Daniel Mack68e67f42012-07-12 13:08:40 +0200964 wait_clear_urbs(ep);
965
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200966 if (ep->use_count != 0)
967 return 0;
968
Daniel Mack68e67f42012-07-12 13:08:40 +0200969 clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200970
Daniel Mack68e67f42012-07-12 13:08:40 +0200971 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200972}
973
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200974/**
975 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200976 *
977 * @ep: the list header of the endpoint to free
978 *
979 * This function does not care for the endpoint's use count but will tear
980 * down all the streaming URBs immediately and free all resources.
981 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200982void snd_usb_endpoint_free(struct list_head *head)
983{
984 struct snd_usb_endpoint *ep;
985
986 ep = list_entry(head, struct snd_usb_endpoint, list);
987 release_urbs(ep, 1);
988 kfree(ep);
989}
990
Daniel Mack94c27212012-04-12 13:51:15 +0200991/**
992 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200993 *
Daniel Mack94c27212012-04-12 13:51:15 +0200994 * @ep: the endpoint to handle the packet
995 * @sender: the sending endpoint
996 * @urb: the received packet
997 *
998 * This function is called from the context of an endpoint that received
999 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001000 */
1001void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
1002 struct snd_usb_endpoint *sender,
1003 const struct urb *urb)
1004{
1005 int shift;
1006 unsigned int f;
1007 unsigned long flags;
1008
1009 snd_BUG_ON(ep == sender);
1010
Daniel Mack94c27212012-04-12 13:51:15 +02001011 /*
1012 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001013 * a new outbound URB that has the same layout as the received packet
1014 * and add it to the list of pending urbs. queue_pending_output_urbs()
1015 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +02001016 */
Eldad Zack98ae4722013-04-03 23:18:52 +02001017 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001018 ep->use_count != 0) {
1019
1020 /* implicit feedback case */
1021 int i, bytes = 0;
1022 struct snd_urb_ctx *in_ctx;
1023 struct snd_usb_packet_info *out_packet;
1024
1025 in_ctx = urb->context;
1026
1027 /* Count overall packet size */
1028 for (i = 0; i < in_ctx->packets; i++)
1029 if (urb->iso_frame_desc[i].status == 0)
1030 bytes += urb->iso_frame_desc[i].actual_length;
1031
1032 /*
1033 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1034 * streaming once it received a 0-byte OUT URB
1035 */
1036 if (bytes == 0)
1037 return;
1038
1039 spin_lock_irqsave(&ep->lock, flags);
1040 out_packet = ep->next_packet + ep->next_packet_write_pos;
1041
1042 /*
1043 * Iterate through the inbound packet and prepare the lengths
1044 * for the output packet. The OUT packet we are about to send
Eldad Zack28acb122012-11-28 23:55:34 +01001045 * will have the same amount of payload bytes per stride as the
1046 * IN packet we just received. Since the actual size is scaled
1047 * by the stride, use the sender stride to calculate the length
1048 * in case the number of channels differ between the implicitly
1049 * fed-back endpoint and the synchronizing endpoint.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001050 */
1051
1052 out_packet->packets = in_ctx->packets;
1053 for (i = 0; i < in_ctx->packets; i++) {
1054 if (urb->iso_frame_desc[i].status == 0)
1055 out_packet->packet_size[i] =
Eldad Zack28acb122012-11-28 23:55:34 +01001056 urb->iso_frame_desc[i].actual_length / sender->stride;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001057 else
1058 out_packet->packet_size[i] = 0;
1059 }
1060
1061 ep->next_packet_write_pos++;
1062 ep->next_packet_write_pos %= MAX_URBS;
1063 spin_unlock_irqrestore(&ep->lock, flags);
1064 queue_pending_output_urbs(ep);
1065
1066 return;
1067 }
1068
Daniel Mack94c27212012-04-12 13:51:15 +02001069 /*
1070 * process after playback sync complete
1071 *
1072 * Full speed devices report feedback values in 10.14 format as samples
1073 * per frame, high speed devices in 16.16 format as samples per
1074 * microframe.
1075 *
1076 * Because the Audio Class 1 spec was written before USB 2.0, many high
1077 * speed devices use a wrong interpretation, some others use an
1078 * entirely different format.
1079 *
1080 * Therefore, we cannot predict what format any particular device uses
1081 * and must detect it automatically.
1082 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001083
1084 if (urb->iso_frame_desc[0].status != 0 ||
1085 urb->iso_frame_desc[0].actual_length < 3)
1086 return;
1087
1088 f = le32_to_cpup(urb->transfer_buffer);
1089 if (urb->iso_frame_desc[0].actual_length == 3)
1090 f &= 0x00ffffff;
1091 else
1092 f &= 0x0fffffff;
1093
1094 if (f == 0)
1095 return;
1096
1097 if (unlikely(ep->freqshift == INT_MIN)) {
1098 /*
1099 * The first time we see a feedback value, determine its format
1100 * by shifting it left or right until it matches the nominal
1101 * frequency value. This assumes that the feedback does not
1102 * differ from the nominal value more than +50% or -25%.
1103 */
1104 shift = 0;
1105 while (f < ep->freqn - ep->freqn / 4) {
1106 f <<= 1;
1107 shift++;
1108 }
1109 while (f > ep->freqn + ep->freqn / 2) {
1110 f >>= 1;
1111 shift--;
1112 }
1113 ep->freqshift = shift;
1114 } else if (ep->freqshift >= 0)
1115 f <<= ep->freqshift;
1116 else
1117 f >>= -ep->freqshift;
1118
1119 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1120 /*
1121 * If the frequency looks valid, set it.
1122 * This value is referred to in prepare_playback_urb().
1123 */
1124 spin_lock_irqsave(&ep->lock, flags);
1125 ep->freqm = f;
1126 spin_unlock_irqrestore(&ep->lock, flags);
1127 } else {
1128 /*
1129 * Out of range; maybe the shift value is wrong.
1130 * Reset it so that we autodetect again the next time.
1131 */
1132 ep->freqshift = INT_MIN;
1133 }
1134}
1135