blob: 6bd424b8e073d24018a35a94438d9ba121cb4503 [file] [log] [blame]
Daniel Macke5779992010-03-04 19:46:13 +01001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 */
17
Daniel Mackc731bc92011-09-14 12:46:57 +020018#include <linux/gfp.h>
19#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +010020#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020021#include <linux/usb.h>
22#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020023#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020024
25#include <sound/core.h>
26#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020027#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020028
29#include "usbaudio.h"
30#include "helper.h"
31#include "card.h"
32#include "endpoint.h"
33#include "pcm.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020034#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020035
Daniel Mack8fdff6a2012-04-12 13:51:11 +020036#define EP_FLAG_RUNNING 1
Takashi Iwaif58161b2012-11-08 08:52:45 +010037#define EP_FLAG_STOPPING 2
Daniel Mack8fdff6a2012-04-12 13:51:11 +020038
Daniel Mackc731bc92011-09-14 12:46:57 +020039/*
Daniel Mack94c27212012-04-12 13:51:15 +020040 * snd_usb_endpoint is a model that abstracts everything related to an
41 * USB endpoint and its streaming.
42 *
43 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020044 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020045 * packets for playback and record. Thus, the bus streaming and the audio
46 * handlers are fully decoupled.
47 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020048 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020049 *
50 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
51 * inbound and outbound traffic.
52 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020053 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
54 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
55 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020056 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020057 * Each endpoint has to be configured prior to being used by calling
58 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020059 *
60 * The model incorporates a reference counting, so that multiple users
61 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
62 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020063 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020064 */
65
66/*
Daniel Mackc731bc92011-09-14 12:46:57 +020067 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
68 * this will overflow at approx 524 kHz
69 */
70static inline unsigned get_usb_full_speed_rate(unsigned int rate)
71{
72 return ((rate << 13) + 62) / 125;
73}
74
75/*
76 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
77 * this will overflow at approx 4 MHz
78 */
79static inline unsigned get_usb_high_speed_rate(unsigned int rate)
80{
81 return ((rate << 10) + 62) / 125;
82}
83
84/*
Daniel Mackc731bc92011-09-14 12:46:57 +020085 * release a urb data
86 */
87static void release_urb_ctx(struct snd_urb_ctx *u)
88{
Daniel Mackd399ff92012-04-12 13:51:13 +020089 if (u->buffer_size)
90 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
91 u->urb->transfer_buffer,
92 u->urb->transfer_dma);
93 usb_free_urb(u->urb);
94 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020095}
96
97static const char *usb_error_string(int err)
98{
99 switch (err) {
100 case -ENODEV:
101 return "no device";
102 case -ENOENT:
103 return "endpoint not enabled";
104 case -EPIPE:
105 return "endpoint stalled";
106 case -ENOSPC:
107 return "not enough bandwidth";
108 case -ESHUTDOWN:
109 return "device disabled";
110 case -EHOSTUNREACH:
111 return "device suspended";
112 case -EINVAL:
113 case -EAGAIN:
114 case -EFBIG:
115 case -EMSGSIZE:
116 return "internal error";
117 default:
118 return "unknown error";
119 }
120}
121
Daniel Mack94c27212012-04-12 13:51:15 +0200122/**
123 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
124 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200125 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200126 *
127 * Determine whether an endpoint is driven by an implicit feedback
128 * data endpoint source.
129 */
Eldad Zack98ae4722013-04-03 23:18:52 +0200130int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200131{
132 return ep->sync_master &&
133 ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA &&
134 ep->type == SND_USB_ENDPOINT_TYPE_DATA &&
135 usb_pipeout(ep->pipe);
136}
137
Daniel Mack94c27212012-04-12 13:51:15 +0200138/*
139 * For streaming based on information derived from sync endpoints,
140 * prepare_outbound_urb_sizes() will call next_packet_size() to
141 * determine the number of samples to be sent in the next packet.
142 *
143 * For implicit feedback, next_packet_size() is unused.
144 */
Daniel Mack245baf92012-08-30 18:52:30 +0200145int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200146{
147 unsigned long flags;
148 int ret;
149
150 if (ep->fill_max)
151 return ep->maxframesize;
152
153 spin_lock_irqsave(&ep->lock, flags);
154 ep->phase = (ep->phase & 0xffff)
155 + (ep->freqm << ep->datainterval);
156 ret = min(ep->phase >> 16, ep->maxframesize);
157 spin_unlock_irqrestore(&ep->lock, flags);
158
159 return ret;
160}
161
162static void retire_outbound_urb(struct snd_usb_endpoint *ep,
163 struct snd_urb_ctx *urb_ctx)
164{
165 if (ep->retire_data_urb)
166 ep->retire_data_urb(ep->data_subs, urb_ctx->urb);
167}
168
169static void retire_inbound_urb(struct snd_usb_endpoint *ep,
170 struct snd_urb_ctx *urb_ctx)
171{
172 struct urb *urb = urb_ctx->urb;
173
Daniel Mack2b58fd52012-09-04 10:23:07 +0200174 if (unlikely(ep->skip_packets > 0)) {
175 ep->skip_packets--;
176 return;
177 }
178
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200179 if (ep->sync_slave)
180 snd_usb_handle_sync_urb(ep->sync_slave, ep, urb);
181
182 if (ep->retire_data_urb)
183 ep->retire_data_urb(ep->data_subs, urb);
184}
185
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200186static void prepare_silent_urb(struct snd_usb_endpoint *ep,
187 struct snd_urb_ctx *ctx)
188{
189 struct urb *urb = ctx->urb;
190 unsigned int offs = 0;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200191 unsigned int extra = 0;
192 __le32 packet_length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200193 int i;
194
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200195 /* For tx_length_quirk, put packet length at start of packet */
196 if (ep->chip->tx_length_quirk)
197 extra = sizeof(packet_length);
198
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200199 for (i = 0; i < ctx->packets; ++i) {
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200200 unsigned int offset;
201 unsigned int length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200202 int counts;
203
204 if (ctx->packet_size[i])
205 counts = ctx->packet_size[i];
206 else
207 counts = snd_usb_endpoint_next_packet_size(ep);
208
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200209 length = counts * ep->stride; /* number of silent bytes */
210 offset = offs * ep->stride + extra * i;
211 urb->iso_frame_desc[i].offset = offset;
212 urb->iso_frame_desc[i].length = length + extra;
213 if (extra) {
214 packet_length = cpu_to_le32(length);
215 memcpy(urb->transfer_buffer + offset,
216 &packet_length, sizeof(packet_length));
217 }
218 memset(urb->transfer_buffer + offset + extra,
219 ep->silence_value, length);
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200220 offs += counts;
221 }
222
223 urb->number_of_packets = ctx->packets;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200224 urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200225}
226
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200227/*
228 * Prepare a PLAYBACK urb for submission to the bus.
229 */
230static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
231 struct snd_urb_ctx *ctx)
232{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200233 struct urb *urb = ctx->urb;
234 unsigned char *cp = urb->transfer_buffer;
235
236 urb->dev = ep->chip->dev; /* we need to set this at each time */
237
238 switch (ep->type) {
239 case SND_USB_ENDPOINT_TYPE_DATA:
240 if (ep->prepare_data_urb) {
241 ep->prepare_data_urb(ep->data_subs, urb);
242 } else {
243 /* no data provider, so send silence */
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200244 prepare_silent_urb(ep, ctx);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200245 }
246 break;
247
248 case SND_USB_ENDPOINT_TYPE_SYNC:
249 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
250 /*
251 * fill the length and offset of each urb descriptor.
252 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
253 */
254 urb->iso_frame_desc[0].length = 4;
255 urb->iso_frame_desc[0].offset = 0;
256 cp[0] = ep->freqn;
257 cp[1] = ep->freqn >> 8;
258 cp[2] = ep->freqn >> 16;
259 cp[3] = ep->freqn >> 24;
260 } else {
261 /*
262 * fill the length and offset of each urb descriptor.
263 * the fixed 10.14 frequency is passed through the pipe.
264 */
265 urb->iso_frame_desc[0].length = 3;
266 urb->iso_frame_desc[0].offset = 0;
267 cp[0] = ep->freqn >> 2;
268 cp[1] = ep->freqn >> 10;
269 cp[2] = ep->freqn >> 18;
270 }
271
272 break;
273 }
274}
275
276/*
277 * Prepare a CAPTURE or SYNC urb for submission to the bus.
278 */
279static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
280 struct snd_urb_ctx *urb_ctx)
281{
282 int i, offs;
283 struct urb *urb = urb_ctx->urb;
284
285 urb->dev = ep->chip->dev; /* we need to set this at each time */
286
287 switch (ep->type) {
288 case SND_USB_ENDPOINT_TYPE_DATA:
289 offs = 0;
290 for (i = 0; i < urb_ctx->packets; i++) {
291 urb->iso_frame_desc[i].offset = offs;
292 urb->iso_frame_desc[i].length = ep->curpacksize;
293 offs += ep->curpacksize;
294 }
295
296 urb->transfer_buffer_length = offs;
297 urb->number_of_packets = urb_ctx->packets;
298 break;
299
300 case SND_USB_ENDPOINT_TYPE_SYNC:
301 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
302 urb->iso_frame_desc[0].offset = 0;
303 break;
304 }
305}
306
Daniel Mack94c27212012-04-12 13:51:15 +0200307/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200308 * Send output urbs that have been prepared previously. URBs are dequeued
Daniel Mack94c27212012-04-12 13:51:15 +0200309 * from ep->ready_playback_urbs and in case there there aren't any available
310 * or there are no packets that have been prepared, this function does
311 * nothing.
312 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200313 * The reason why the functionality of sending and preparing URBs is separated
314 * is that host controllers don't guarantee the order in which they return
315 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200316 *
317 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200318 * driven by dedicated sync endpoints, URBs are immediately re-submitted
319 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200320 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200321static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
322{
323 while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
324
325 unsigned long flags;
Andrew Morton68853fa2012-04-24 08:10:10 +0200326 struct snd_usb_packet_info *uninitialized_var(packet);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200327 struct snd_urb_ctx *ctx = NULL;
328 struct urb *urb;
329 int err, i;
330
331 spin_lock_irqsave(&ep->lock, flags);
332 if (ep->next_packet_read_pos != ep->next_packet_write_pos) {
333 packet = ep->next_packet + ep->next_packet_read_pos;
334 ep->next_packet_read_pos++;
335 ep->next_packet_read_pos %= MAX_URBS;
336
337 /* take URB out of FIFO */
338 if (!list_empty(&ep->ready_playback_urbs))
339 ctx = list_first_entry(&ep->ready_playback_urbs,
340 struct snd_urb_ctx, ready_list);
341 }
342 spin_unlock_irqrestore(&ep->lock, flags);
343
344 if (ctx == NULL)
345 return;
346
347 list_del_init(&ctx->ready_list);
348 urb = ctx->urb;
349
350 /* copy over the length information */
351 for (i = 0; i < packet->packets; i++)
352 ctx->packet_size[i] = packet->packet_size[i];
353
Daniel Mack94c27212012-04-12 13:51:15 +0200354 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200355 prepare_outbound_urb(ep, ctx);
356
357 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
358 if (err < 0)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100359 usb_audio_err(ep->chip,
Vamsi Krishna Samavedam0836aae2016-11-03 17:21:02 -0700360 "Unable to submit urb #%d: %d (urb %pK)\n",
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100361 ctx->index, err, ctx->urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200362 else
363 set_bit(ctx->index, &ep->active_mask);
364 }
365}
366
367/*
368 * complete callback for urbs
369 */
370static void snd_complete_urb(struct urb *urb)
371{
372 struct snd_urb_ctx *ctx = urb->context;
373 struct snd_usb_endpoint *ep = ctx->ep;
Takashi Iwai67e22502014-11-06 13:04:49 +0100374 struct snd_pcm_substream *substream;
375 unsigned long flags;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200376 int err;
377
378 if (unlikely(urb->status == -ENOENT || /* unlinked */
379 urb->status == -ENODEV || /* device removed */
380 urb->status == -ECONNRESET || /* unlinked */
Takashi Iwai47ab1542015-08-25 16:09:00 +0200381 urb->status == -ESHUTDOWN)) /* device disabled */
382 goto exit_clear;
383 /* device disconnected */
384 if (unlikely(atomic_read(&ep->chip->shutdown)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200385 goto exit_clear;
386
Ioan-Adrian Ratiu12dac5f2017-01-05 00:37:47 +0200387 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
388 goto exit_clear;
389
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200390 if (usb_pipeout(ep->pipe)) {
391 retire_outbound_urb(ep, ctx);
392 /* can be stopped during retire callback */
393 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
394 goto exit_clear;
395
Eldad Zack98ae4722013-04-03 23:18:52 +0200396 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200397 spin_lock_irqsave(&ep->lock, flags);
398 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
399 spin_unlock_irqrestore(&ep->lock, flags);
400 queue_pending_output_urbs(ep);
401
402 goto exit_clear;
403 }
404
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200405 prepare_outbound_urb(ep, ctx);
406 } else {
407 retire_inbound_urb(ep, ctx);
408 /* can be stopped during retire callback */
409 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
410 goto exit_clear;
411
412 prepare_inbound_urb(ep, ctx);
413 }
414
415 err = usb_submit_urb(urb, GFP_ATOMIC);
416 if (err == 0)
417 return;
418
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100419 usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err);
Takashi Iwai67e22502014-11-06 13:04:49 +0100420 if (ep->data_subs && ep->data_subs->pcm_substream) {
421 substream = ep->data_subs->pcm_substream;
Takashi Iwai1fb85102014-11-07 17:08:28 +0100422 snd_pcm_stop_xrun(substream);
Takashi Iwai67e22502014-11-06 13:04:49 +0100423 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200424
425exit_clear:
426 clear_bit(ctx->index, &ep->active_mask);
427}
428
Daniel Mack94c27212012-04-12 13:51:15 +0200429/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200430 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200431 *
432 * @chip: The chip
433 * @alts: The USB host interface
434 * @ep_num: The number of the endpoint to use
435 * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE
436 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
437 *
438 * If the requested endpoint has not been added to the given chip before,
439 * a new instance is created. Otherwise, a pointer to the previoulsy
440 * created instance is returned. In case of any error, NULL is returned.
441 *
442 * New endpoints will be added to chip->ep_list and must be freed by
443 * calling snd_usb_endpoint_free().
Takashi Iwai447d6272016-03-15 15:20:58 +0100444 *
445 * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
446 * bNumEndpoints > 1 beforehand.
Daniel Mack94c27212012-04-12 13:51:15 +0200447 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200448struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
449 struct usb_host_interface *alts,
450 int ep_num, int direction, int type)
451{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200452 struct snd_usb_endpoint *ep;
Daniel Mack68e67f42012-07-12 13:08:40 +0200453 int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200454
Eldad Zacke7e58df2013-08-03 10:51:15 +0200455 if (WARN_ON(!alts))
456 return NULL;
457
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200458 mutex_lock(&chip->mutex);
459
Eldad Zack88766f02013-04-03 23:18:49 +0200460 list_for_each_entry(ep, &chip->ep_list, list) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200461 if (ep->ep_num == ep_num &&
462 ep->iface == alts->desc.bInterfaceNumber &&
Eldad Zackdf23a242013-10-06 22:31:13 +0200463 ep->altsetting == alts->desc.bAlternateSetting) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100464 usb_audio_dbg(ep->chip,
Vamsi Krishna Samavedam0836aae2016-11-03 17:21:02 -0700465 "Re-using EP %x in iface %d,%d @%pK\n",
Eldad Zackdf23a242013-10-06 22:31:13 +0200466 ep_num, ep->iface, ep->altsetting, ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200467 goto __exit_unlock;
468 }
469 }
470
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100471 usb_audio_dbg(chip, "Creating new %s %s endpoint #%x\n",
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200472 is_playback ? "playback" : "capture",
473 type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
474 ep_num);
475
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200476 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
477 if (!ep)
478 goto __exit_unlock;
479
480 ep->chip = chip;
481 spin_lock_init(&ep->lock);
482 ep->type = type;
483 ep->ep_num = ep_num;
484 ep->iface = alts->desc.bInterfaceNumber;
Eldad Zackdf23a242013-10-06 22:31:13 +0200485 ep->altsetting = alts->desc.bAlternateSetting;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200486 INIT_LIST_HEAD(&ep->ready_playback_urbs);
487 ep_num &= USB_ENDPOINT_NUMBER_MASK;
488
489 if (is_playback)
490 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
491 else
492 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
493
494 if (type == SND_USB_ENDPOINT_TYPE_SYNC) {
495 if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
496 get_endpoint(alts, 1)->bRefresh >= 1 &&
497 get_endpoint(alts, 1)->bRefresh <= 9)
498 ep->syncinterval = get_endpoint(alts, 1)->bRefresh;
499 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
500 ep->syncinterval = 1;
501 else if (get_endpoint(alts, 1)->bInterval >= 1 &&
502 get_endpoint(alts, 1)->bInterval <= 16)
503 ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
504 else
505 ep->syncinterval = 3;
506
507 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
508 }
509
510 list_add_tail(&ep->list, &chip->ep_list);
511
512__exit_unlock:
513 mutex_unlock(&chip->mutex);
514
515 return ep;
516}
517
518/*
519 * wait until all urbs are processed.
520 */
521static int wait_clear_urbs(struct snd_usb_endpoint *ep)
522{
523 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200524 int alive;
525
526 do {
Joe Perches190006f2012-11-16 23:35:16 -0800527 alive = bitmap_weight(&ep->active_mask, ep->nurbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200528 if (!alive)
529 break;
530
531 schedule_timeout_uninterruptible(1);
532 } while (time_before(jiffies, end_time));
533
534 if (alive)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100535 usb_audio_err(ep->chip,
536 "timeout: still %d active urbs on EP #%x\n",
537 alive, ep->ep_num);
Takashi Iwaif58161b2012-11-08 08:52:45 +0100538 clear_bit(EP_FLAG_STOPPING, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200539
Ioan-Adrian Ratiube4e3ae2017-01-05 00:37:46 +0200540 ep->data_subs = NULL;
541 ep->sync_slave = NULL;
542 ep->retire_data_urb = NULL;
543 ep->prepare_data_urb = NULL;
544
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200545 return 0;
546}
547
Takashi Iwaif58161b2012-11-08 08:52:45 +0100548/* sync the pending stop operation;
549 * this function itself doesn't trigger the stop operation
550 */
551void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
552{
553 if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags))
554 wait_clear_urbs(ep);
555}
556
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200557/*
558 * unlink active urbs.
559 */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100560static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200561{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200562 unsigned int i;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200563
Takashi Iwai47ab1542015-08-25 16:09:00 +0200564 if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200565 return -EBADFD;
566
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200567 clear_bit(EP_FLAG_RUNNING, &ep->flags);
568
569 INIT_LIST_HEAD(&ep->ready_playback_urbs);
570 ep->next_packet_read_pos = 0;
571 ep->next_packet_write_pos = 0;
572
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200573 for (i = 0; i < ep->nurbs; i++) {
574 if (test_bit(i, &ep->active_mask)) {
575 if (!test_and_set_bit(i, &ep->unlink_mask)) {
576 struct urb *u = ep->urb[i].urb;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100577 usb_unlink_urb(u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200578 }
579 }
580 }
581
582 return 0;
583}
584
585/*
586 * release an endpoint's urbs
587 */
588static void release_urbs(struct snd_usb_endpoint *ep, int force)
589{
590 int i;
591
592 /* route incoming urbs to nirvana */
593 ep->retire_data_urb = NULL;
594 ep->prepare_data_urb = NULL;
595
596 /* stop urbs */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100597 deactivate_urbs(ep, force);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200598 wait_clear_urbs(ep);
599
600 for (i = 0; i < ep->nurbs; i++)
601 release_urb_ctx(&ep->urb[i]);
602
603 if (ep->syncbuf)
604 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
605 ep->syncbuf, ep->sync_dma);
606
607 ep->syncbuf = NULL;
608 ep->nurbs = 0;
609}
610
Daniel Mack94c27212012-04-12 13:51:15 +0200611/*
612 * configure a data endpoint
613 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200614static int data_ep_set_params(struct snd_usb_endpoint *ep,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700615 snd_pcm_format_t pcm_format,
616 unsigned int channels,
617 unsigned int period_bytes,
Alan Stern976b6c02013-09-24 15:51:58 -0400618 unsigned int frames_per_period,
619 unsigned int periods_per_buffer,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200620 struct audioformat *fmt,
621 struct snd_usb_endpoint *sync_ep)
622{
Alan Stern976b6c02013-09-24 15:51:58 -0400623 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
624 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
625 unsigned int max_urbs, i;
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700626 int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200627 int tx_length_quirk = (ep->chip->tx_length_quirk &&
628 usb_pipeout(ep->pipe));
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200629
Daniel Mackd24f5062013-04-17 00:01:38 +0800630 if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
631 /*
632 * When operating in DSD DOP mode, the size of a sample frame
633 * in hardware differs from the actual physical format width
634 * because we need to make room for the DOP markers.
635 */
636 frame_bits += channels << 3;
637 }
638
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200639 ep->datainterval = fmt->datainterval;
640 ep->stride = frame_bits >> 3;
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700641 ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200642
Clemens Ladisch57e6dae2013-08-08 11:24:55 +0200643 /* assume max. frequency is 25% higher than nominal */
644 ep->freqmax = ep->freqn + (ep->freqn >> 2);
Ricard Wanderlofab309652015-10-11 20:54:51 +0200645 /* Round up freqmax to nearest integer in order to calculate maximum
646 * packet size, which must represent a whole number of frames.
647 * This is accomplished by adding 0x0.ffff before converting the
648 * Q16.16 format into integer.
649 * In order to accurately calculate the maximum packet size when
650 * the data interval is more than 1 (i.e. ep->datainterval > 0),
651 * multiply by the data interval prior to rounding. For instance,
652 * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
653 * frames with a data interval of 1, but 11 (10.25) frames with a
654 * data interval of 2.
655 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
656 * maximum datainterval value of 3, at USB full speed, higher for
657 * USB high speed, noting that ep->freqmax is in units of
658 * frames per packet in Q16.16 format.)
659 */
660 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
661 (frame_bits >> 3);
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200662 if (tx_length_quirk)
663 maxsize += sizeof(__le32); /* Space for length descriptor */
Clemens Ladisch57e6dae2013-08-08 11:24:55 +0200664 /* but wMaxPacketSize might reduce this */
665 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200666 /* whatever fits into a max. size packet */
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200667 unsigned int data_maxsize = maxsize = ep->maxpacksize;
668
669 if (tx_length_quirk)
670 /* Need to remove the length descriptor to calc freq */
671 data_maxsize -= sizeof(__le32);
672 ep->freqmax = (data_maxsize / (frame_bits >> 3))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200673 << (16 - ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200674 }
675
676 if (ep->fill_max)
677 ep->curpacksize = ep->maxpacksize;
678 else
679 ep->curpacksize = maxsize;
680
Alan Stern976b6c02013-09-24 15:51:58 -0400681 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200682 packs_per_ms = 8 >> ep->datainterval;
Alan Stern976b6c02013-09-24 15:51:58 -0400683 max_packs_per_urb = MAX_PACKS_HS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200684 } else {
Alan Stern976b6c02013-09-24 15:51:58 -0400685 packs_per_ms = 1;
686 max_packs_per_urb = MAX_PACKS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200687 }
Eldad Zack98ae4722013-04-03 23:18:52 +0200688 if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
Alan Stern976b6c02013-09-24 15:51:58 -0400689 max_packs_per_urb = min(max_packs_per_urb,
690 1U << sync_ep->syncinterval);
691 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200692
Alan Stern976b6c02013-09-24 15:51:58 -0400693 /*
694 * Capture endpoints need to use small URBs because there's no way
695 * to tell in advance where the next period will end, and we don't
696 * want the next URB to complete much after the period ends.
697 *
698 * Playback endpoints with implicit sync much use the same parameters
699 * as their corresponding capture endpoint.
700 */
701 if (usb_pipein(ep->pipe) ||
702 snd_usb_endpoint_implicit_feedback_sink(ep)) {
703
Thomas Pugliesea93455e2013-11-26 13:58:15 -0600704 urb_packs = packs_per_ms;
705 /*
706 * Wireless devices can poll at a max rate of once per 4ms.
707 * For dataintervals less than 5, increase the packet count to
708 * allow the host controller to use bursting to fill in the
709 * gaps.
710 */
711 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
712 int interval = ep->datainterval;
713 while (interval < 5) {
714 urb_packs <<= 1;
715 ++interval;
716 }
717 }
Alan Stern976b6c02013-09-24 15:51:58 -0400718 /* make capture URBs <= 1 ms and smaller than a period */
Thomas Pugliesea93455e2013-11-26 13:58:15 -0600719 urb_packs = min(max_packs_per_urb, urb_packs);
Alan Stern976b6c02013-09-24 15:51:58 -0400720 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
721 urb_packs >>= 1;
722 ep->nurbs = MAX_URBS;
723
724 /*
725 * Playback endpoints without implicit sync are adjusted so that
726 * a period fits as evenly as possible in the smallest number of
727 * URBs. The total number of URBs is adjusted to the size of the
728 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
729 */
730 } else {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200731 /* determine how small a packet can be */
Alan Stern976b6c02013-09-24 15:51:58 -0400732 minsize = (ep->freqn >> (16 - ep->datainterval)) *
733 (frame_bits >> 3);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200734 /* with sync from device, assume it can be 12% lower */
735 if (sync_ep)
736 minsize -= minsize >> 3;
737 minsize = max(minsize, 1u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200738
Alan Stern976b6c02013-09-24 15:51:58 -0400739 /* how many packets will contain an entire ALSA period? */
740 max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
741
742 /* how many URBs will contain a period? */
743 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
744 max_packs_per_urb);
745 /* how many packets are needed in each URB? */
746 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
747
748 /* limit the number of frames in a single URB */
749 ep->max_urb_frames = DIV_ROUND_UP(frames_per_period,
750 urbs_per_period);
751
752 /* try to use enough URBs to contain an entire ALSA buffer */
753 max_urbs = min((unsigned) MAX_URBS,
754 MAX_QUEUE * packs_per_ms / urb_packs);
755 ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200756 }
757
758 /* allocate and initialize data urbs */
759 for (i = 0; i < ep->nurbs; i++) {
760 struct snd_urb_ctx *u = &ep->urb[i];
761 u->index = i;
762 u->ep = ep;
Alan Stern976b6c02013-09-24 15:51:58 -0400763 u->packets = urb_packs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200764 u->buffer_size = maxsize * u->packets;
765
766 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
767 u->packets++; /* for transfer delimiter */
768 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
769 if (!u->urb)
770 goto out_of_memory;
771
772 u->urb->transfer_buffer =
773 usb_alloc_coherent(ep->chip->dev, u->buffer_size,
774 GFP_KERNEL, &u->urb->transfer_dma);
775 if (!u->urb->transfer_buffer)
776 goto out_of_memory;
777 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +0200778 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200779 u->urb->interval = 1 << ep->datainterval;
780 u->urb->context = u;
781 u->urb->complete = snd_complete_urb;
782 INIT_LIST_HEAD(&u->ready_list);
783 }
784
785 return 0;
786
787out_of_memory:
788 release_urbs(ep, 0);
789 return -ENOMEM;
790}
791
Daniel Mack94c27212012-04-12 13:51:15 +0200792/*
793 * configure a sync endpoint
794 */
Eldad Zack93721032013-10-06 22:31:06 +0200795static int sync_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200796{
797 int i;
798
799 ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4,
800 GFP_KERNEL, &ep->sync_dma);
801 if (!ep->syncbuf)
802 return -ENOMEM;
803
804 for (i = 0; i < SYNC_URBS; i++) {
805 struct snd_urb_ctx *u = &ep->urb[i];
806 u->index = i;
807 u->ep = ep;
808 u->packets = 1;
809 u->urb = usb_alloc_urb(1, GFP_KERNEL);
810 if (!u->urb)
811 goto out_of_memory;
812 u->urb->transfer_buffer = ep->syncbuf + i * 4;
813 u->urb->transfer_dma = ep->sync_dma + i * 4;
814 u->urb->transfer_buffer_length = 4;
815 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +0200816 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200817 u->urb->number_of_packets = 1;
818 u->urb->interval = 1 << ep->syncinterval;
819 u->urb->context = u;
820 u->urb->complete = snd_complete_urb;
821 }
822
823 ep->nurbs = SYNC_URBS;
824
825 return 0;
826
827out_of_memory:
828 release_urbs(ep, 0);
829 return -ENOMEM;
830}
831
Daniel Mack94c27212012-04-12 13:51:15 +0200832/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200833 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200834 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200835 * @ep: the snd_usb_endpoint to configure
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700836 * @pcm_format: the audio fomat.
837 * @channels: the number of audio channels.
838 * @period_bytes: the number of bytes in one alsa period.
Alan Stern976b6c02013-09-24 15:51:58 -0400839 * @period_frames: the number of frames in one alsa period.
840 * @buffer_periods: the number of periods in one alsa buffer.
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700841 * @rate: the frame rate.
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200842 * @fmt: the USB audio format information
843 * @sync_ep: the sync endpoint to use, if any
Daniel Mack94c27212012-04-12 13:51:15 +0200844 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200845 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +0200846 * An endpoint must be configured before it can be started.
847 * An endpoint that is already running can not be reconfigured.
848 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200849int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700850 snd_pcm_format_t pcm_format,
851 unsigned int channels,
852 unsigned int period_bytes,
Alan Stern976b6c02013-09-24 15:51:58 -0400853 unsigned int period_frames,
854 unsigned int buffer_periods,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700855 unsigned int rate,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200856 struct audioformat *fmt,
857 struct snd_usb_endpoint *sync_ep)
858{
859 int err;
860
861 if (ep->use_count != 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100862 usb_audio_warn(ep->chip,
863 "Unable to change format on ep #%x: already in use\n",
864 ep->ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200865 return -EBUSY;
866 }
867
868 /* release old buffers, if any */
869 release_urbs(ep, 0);
870
871 ep->datainterval = fmt->datainterval;
872 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +0200873 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200874
875 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL)
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700876 ep->freqn = get_usb_full_speed_rate(rate);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200877 else
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700878 ep->freqn = get_usb_high_speed_rate(rate);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200879
880 /* calculate the frequency in 16.16 format */
881 ep->freqm = ep->freqn;
882 ep->freqshift = INT_MIN;
883
884 ep->phase = 0;
885
886 switch (ep->type) {
887 case SND_USB_ENDPOINT_TYPE_DATA:
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700888 err = data_ep_set_params(ep, pcm_format, channels,
Alan Stern976b6c02013-09-24 15:51:58 -0400889 period_bytes, period_frames,
890 buffer_periods, fmt, sync_ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200891 break;
892 case SND_USB_ENDPOINT_TYPE_SYNC:
Eldad Zack93721032013-10-06 22:31:06 +0200893 err = sync_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200894 break;
895 default:
896 err = -EINVAL;
897 }
898
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100899 usb_audio_dbg(ep->chip,
900 "Setting params for ep #%x (type %d, %d urbs), ret=%d\n",
901 ep->ep_num, ep->type, ep->nurbs, err);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200902
903 return err;
904}
905
Daniel Mack94c27212012-04-12 13:51:15 +0200906/**
907 * snd_usb_endpoint_start: start an snd_usb_endpoint
908 *
Ioan-Adrian Ratiube4e3ae2017-01-05 00:37:46 +0200909 * @ep: the endpoint to start
Daniel Mack94c27212012-04-12 13:51:15 +0200910 *
911 * A call to this function will increment the use count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200912 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +0200913 * submitted. Otherwise, this function does nothing.
914 *
915 * Must be balanced to calls of snd_usb_endpoint_stop().
916 *
917 * Returns an error if the URB submission failed, 0 in all other cases.
918 */
Ioan-Adrian Ratiube4e3ae2017-01-05 00:37:46 +0200919int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200920{
921 int err;
922 unsigned int i;
923
Takashi Iwai47ab1542015-08-25 16:09:00 +0200924 if (atomic_read(&ep->chip->shutdown))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200925 return -EBADFD;
926
927 /* already running? */
928 if (++ep->use_count != 1)
929 return 0;
930
Daniel Mack015618b2012-08-29 13:17:05 +0200931 /* just to be sure */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100932 deactivate_urbs(ep, false);
Daniel Mack015618b2012-08-29 13:17:05 +0200933
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200934 ep->active_mask = 0;
935 ep->unlink_mask = 0;
936 ep->phase = 0;
937
Daniel Mack2b58fd52012-09-04 10:23:07 +0200938 snd_usb_endpoint_start_quirk(ep);
939
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200940 /*
941 * If this endpoint has a data endpoint as implicit feedback source,
942 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200943 * wait for the record urbs to return and queue the playback urbs
944 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200945 */
946
947 set_bit(EP_FLAG_RUNNING, &ep->flags);
948
Eldad Zack98ae4722013-04-03 23:18:52 +0200949 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200950 for (i = 0; i < ep->nurbs; i++) {
951 struct snd_urb_ctx *ctx = ep->urb + i;
952 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
953 }
954
955 return 0;
956 }
957
958 for (i = 0; i < ep->nurbs; i++) {
959 struct urb *urb = ep->urb[i].urb;
960
961 if (snd_BUG_ON(!urb))
962 goto __error;
963
964 if (usb_pipeout(ep->pipe)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200965 prepare_outbound_urb(ep, urb->context);
966 } else {
967 prepare_inbound_urb(ep, urb->context);
968 }
969
970 err = usb_submit_urb(urb, GFP_ATOMIC);
971 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100972 usb_audio_err(ep->chip,
973 "cannot submit urb %d, error %d: %s\n",
974 i, err, usb_error_string(err));
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200975 goto __error;
976 }
977 set_bit(i, &ep->active_mask);
978 }
979
980 return 0;
981
982__error:
983 clear_bit(EP_FLAG_RUNNING, &ep->flags);
984 ep->use_count--;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100985 deactivate_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200986 return -EPIPE;
987}
988
Daniel Mack94c27212012-04-12 13:51:15 +0200989/**
990 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
991 *
992 * @ep: the endpoint to stop (may be NULL)
993 *
994 * A call to this function will decrement the use count of the endpoint.
995 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200996 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +0200997 *
998 * Must be balanced to calls of snd_usb_endpoint_start().
Takashi Iwaib2eb9502012-11-21 08:30:48 +0100999 *
1000 * The caller needs to synchronize the pending stop operation via
1001 * snd_usb_endpoint_sync_pending_stop().
Daniel Mack94c27212012-04-12 13:51:15 +02001002 */
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001003void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001004{
1005 if (!ep)
1006 return;
1007
1008 if (snd_BUG_ON(ep->use_count == 0))
1009 return;
1010
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001011 if (--ep->use_count == 0) {
Takashi Iwaiccc16962012-11-21 08:22:52 +01001012 deactivate_urbs(ep, false);
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001013 set_bit(EP_FLAG_STOPPING, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001014 }
1015}
1016
Daniel Mack94c27212012-04-12 13:51:15 +02001017/**
Daniel Mack94c27212012-04-12 13:51:15 +02001018 * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
1019 *
1020 * @ep: the endpoint to deactivate
1021 *
Eldad Zack9b7c5522013-10-06 22:31:10 +02001022 * If the endpoint is not currently in use, this functions will
1023 * deactivate its associated URBs.
Daniel Mack94c27212012-04-12 13:51:15 +02001024 *
1025 * In case of any active users, this functions does nothing.
Daniel Mack94c27212012-04-12 13:51:15 +02001026 */
Eldad Zack9b7c5522013-10-06 22:31:10 +02001027void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001028{
1029 if (!ep)
Eldad Zack9b7c5522013-10-06 22:31:10 +02001030 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001031
1032 if (ep->use_count != 0)
Eldad Zack9b7c5522013-10-06 22:31:10 +02001033 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001034
Eldad Zack239b9f72013-10-06 22:31:09 +02001035 deactivate_urbs(ep, true);
1036 wait_clear_urbs(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001037}
1038
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001039/**
Takashi Iwai92a586b2014-06-25 14:24:47 +02001040 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
1041 *
1042 * @ep: the endpoint to release
1043 *
1044 * This function does not care for the endpoint's use count but will tear
1045 * down all the streaming URBs immediately.
1046 */
1047void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
1048{
1049 release_urbs(ep, 1);
1050}
1051
1052/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001053 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +02001054 *
Takashi Iwaia6cece92014-10-31 11:24:32 +01001055 * @ep: the endpoint to free
Daniel Mack94c27212012-04-12 13:51:15 +02001056 *
Takashi Iwai92a586b2014-06-25 14:24:47 +02001057 * This free all resources of the given ep.
Daniel Mack94c27212012-04-12 13:51:15 +02001058 */
Takashi Iwaia6cece92014-10-31 11:24:32 +01001059void snd_usb_endpoint_free(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001060{
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001061 kfree(ep);
1062}
1063
Daniel Mack94c27212012-04-12 13:51:15 +02001064/**
1065 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001066 *
Daniel Mack94c27212012-04-12 13:51:15 +02001067 * @ep: the endpoint to handle the packet
1068 * @sender: the sending endpoint
1069 * @urb: the received packet
1070 *
1071 * This function is called from the context of an endpoint that received
1072 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001073 */
1074void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
1075 struct snd_usb_endpoint *sender,
1076 const struct urb *urb)
1077{
1078 int shift;
1079 unsigned int f;
1080 unsigned long flags;
1081
1082 snd_BUG_ON(ep == sender);
1083
Daniel Mack94c27212012-04-12 13:51:15 +02001084 /*
1085 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001086 * a new outbound URB that has the same layout as the received packet
1087 * and add it to the list of pending urbs. queue_pending_output_urbs()
1088 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +02001089 */
Eldad Zack98ae4722013-04-03 23:18:52 +02001090 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001091 ep->use_count != 0) {
1092
1093 /* implicit feedback case */
1094 int i, bytes = 0;
1095 struct snd_urb_ctx *in_ctx;
1096 struct snd_usb_packet_info *out_packet;
1097
1098 in_ctx = urb->context;
1099
1100 /* Count overall packet size */
1101 for (i = 0; i < in_ctx->packets; i++)
1102 if (urb->iso_frame_desc[i].status == 0)
1103 bytes += urb->iso_frame_desc[i].actual_length;
1104
1105 /*
1106 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1107 * streaming once it received a 0-byte OUT URB
1108 */
1109 if (bytes == 0)
1110 return;
1111
1112 spin_lock_irqsave(&ep->lock, flags);
1113 out_packet = ep->next_packet + ep->next_packet_write_pos;
1114
1115 /*
1116 * Iterate through the inbound packet and prepare the lengths
1117 * for the output packet. The OUT packet we are about to send
Eldad Zack28acb122012-11-28 23:55:34 +01001118 * will have the same amount of payload bytes per stride as the
1119 * IN packet we just received. Since the actual size is scaled
1120 * by the stride, use the sender stride to calculate the length
1121 * in case the number of channels differ between the implicitly
1122 * fed-back endpoint and the synchronizing endpoint.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001123 */
1124
1125 out_packet->packets = in_ctx->packets;
1126 for (i = 0; i < in_ctx->packets; i++) {
1127 if (urb->iso_frame_desc[i].status == 0)
1128 out_packet->packet_size[i] =
Eldad Zack28acb122012-11-28 23:55:34 +01001129 urb->iso_frame_desc[i].actual_length / sender->stride;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001130 else
1131 out_packet->packet_size[i] = 0;
1132 }
1133
1134 ep->next_packet_write_pos++;
1135 ep->next_packet_write_pos %= MAX_URBS;
1136 spin_unlock_irqrestore(&ep->lock, flags);
1137 queue_pending_output_urbs(ep);
1138
1139 return;
1140 }
1141
Daniel Mack94c27212012-04-12 13:51:15 +02001142 /*
1143 * process after playback sync complete
1144 *
1145 * Full speed devices report feedback values in 10.14 format as samples
1146 * per frame, high speed devices in 16.16 format as samples per
1147 * microframe.
1148 *
1149 * Because the Audio Class 1 spec was written before USB 2.0, many high
1150 * speed devices use a wrong interpretation, some others use an
1151 * entirely different format.
1152 *
1153 * Therefore, we cannot predict what format any particular device uses
1154 * and must detect it automatically.
1155 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001156
1157 if (urb->iso_frame_desc[0].status != 0 ||
1158 urb->iso_frame_desc[0].actual_length < 3)
1159 return;
1160
1161 f = le32_to_cpup(urb->transfer_buffer);
1162 if (urb->iso_frame_desc[0].actual_length == 3)
1163 f &= 0x00ffffff;
1164 else
1165 f &= 0x0fffffff;
1166
1167 if (f == 0)
1168 return;
1169
Daniel Mackca0dd272016-08-22 08:53:37 +02001170 if (unlikely(sender->tenor_fb_quirk)) {
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001171 /*
Daniel Mackca0dd272016-08-22 08:53:37 +02001172 * Devices based on Tenor 8802 chipsets (TEAC UD-H01
1173 * and others) sometimes change the feedback value
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001174 * by +/- 0x1.0000.
1175 */
1176 if (f < ep->freqn - 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001177 f += 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001178 else if (f > ep->freqn + 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001179 f -= 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001180 } else if (unlikely(ep->freqshift == INT_MIN)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001181 /*
1182 * The first time we see a feedback value, determine its format
1183 * by shifting it left or right until it matches the nominal
1184 * frequency value. This assumes that the feedback does not
1185 * differ from the nominal value more than +50% or -25%.
1186 */
1187 shift = 0;
1188 while (f < ep->freqn - ep->freqn / 4) {
1189 f <<= 1;
1190 shift++;
1191 }
1192 while (f > ep->freqn + ep->freqn / 2) {
1193 f >>= 1;
1194 shift--;
1195 }
1196 ep->freqshift = shift;
1197 } else if (ep->freqshift >= 0)
1198 f <<= ep->freqshift;
1199 else
1200 f >>= -ep->freqshift;
1201
1202 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1203 /*
1204 * If the frequency looks valid, set it.
1205 * This value is referred to in prepare_playback_urb().
1206 */
1207 spin_lock_irqsave(&ep->lock, flags);
1208 ep->freqm = f;
1209 spin_unlock_irqrestore(&ep->lock, flags);
1210 } else {
1211 /*
1212 * Out of range; maybe the shift value is wrong.
1213 * Reset it so that we autodetect again the next time.
1214 */
1215 ep->freqshift = INT_MIN;
1216 }
1217}
1218