blob: 5cb03272255cf852288ebbf16cfad13a38ae81bb [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
Kalle Valo2e1cb232011-10-05 12:23:49 +030018#include "hif.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030019#include "debug.h"
20#include "hif-ops.h"
21#include <asm/unaligned.h>
22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
Kalle Valof2f92192011-10-24 12:17:20 +030025/* Functions for Tx credit handling */
26static void ath6kl_deposit_credit_to_ep(struct ath6kl_htc_credit_info
27 *cred_info,
28 struct htc_endpoint_credit_dist
29 *ep_dist, int credits)
30{
31 ep_dist->credits += credits;
32 ep_dist->cred_assngd += credits;
33 cred_info->cur_free_credits -= credits;
34}
35
36static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
37 struct list_head *ep_list,
38 int tot_credits)
39{
40 struct htc_endpoint_credit_dist *cur_ep_dist;
41 int count;
42
43 cred_info->cur_free_credits = tot_credits;
44 cred_info->total_avail_credits = tot_credits;
45
46 list_for_each_entry(cur_ep_dist, ep_list, list) {
47 if (cur_ep_dist->endpoint == ENDPOINT_0)
48 continue;
49
50 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
51
52 if (tot_credits > 4) {
53 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
54 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
55 ath6kl_deposit_credit_to_ep(cred_info,
56 cur_ep_dist,
57 cur_ep_dist->cred_min);
58 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
59 }
60 }
61
62 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
63 ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
64 cur_ep_dist->cred_min);
65 /*
66 * Control service is always marked active, it
67 * never goes inactive EVER.
68 */
69 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
70 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
71 /* this is the lowest priority data endpoint */
72 /* FIXME: this looks fishy, check */
73 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
74
75 /*
76 * Streams have to be created (explicit | implicit) for all
77 * kinds of traffic. BE endpoints are also inactive in the
78 * beginning. When BE traffic starts it creates implicit
79 * streams that redistributes credits.
80 *
81 * Note: all other endpoints have minimums set but are
82 * initially given NO credits. credits will be distributed
83 * as traffic activity demands
84 */
85 }
86
87 WARN_ON(cred_info->cur_free_credits <= 0);
88
89 list_for_each_entry(cur_ep_dist, ep_list, list) {
90 if (cur_ep_dist->endpoint == ENDPOINT_0)
91 continue;
92
93 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
94 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
95 else {
96 /*
97 * For the remaining data endpoints, we assume that
98 * each cred_per_msg are the same. We use a simple
99 * calculation here, we take the remaining credits
100 * and determine how many max messages this can
101 * cover and then set each endpoint's normal value
102 * equal to 3/4 this amount.
103 */
104 count = (cred_info->cur_free_credits /
105 cur_ep_dist->cred_per_msg)
106 * cur_ep_dist->cred_per_msg;
107 count = (count * 3) >> 2;
108 count = max(count, cur_ep_dist->cred_per_msg);
109 cur_ep_dist->cred_norm = count;
110
111 }
112 }
113}
114
115/* initialize and setup credit distribution */
116int ath6kl_setup_credit_dist(void *htc_handle,
117 struct ath6kl_htc_credit_info *cred_info)
118{
119 u16 servicepriority[5];
120
121 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
122
123 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
124 servicepriority[1] = WMI_DATA_VO_SVC;
125 servicepriority[2] = WMI_DATA_VI_SVC;
126 servicepriority[3] = WMI_DATA_BE_SVC;
127 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
128
129 /* set priority list */
130 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
131
132 return 0;
133}
134
135/* reduce an ep's credits back to a set limit */
136static void ath6kl_reduce_credits(struct ath6kl_htc_credit_info *cred_info,
137 struct htc_endpoint_credit_dist *ep_dist,
138 int limit)
139{
140 int credits;
141
142 ep_dist->cred_assngd = limit;
143
144 if (ep_dist->credits <= limit)
145 return;
146
147 credits = ep_dist->credits - limit;
148 ep_dist->credits -= credits;
149 cred_info->cur_free_credits += credits;
150}
151
152static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
153 struct list_head *epdist_list)
154{
155 struct htc_endpoint_credit_dist *cur_dist_list;
156
157 list_for_each_entry(cur_dist_list, epdist_list, list) {
158 if (cur_dist_list->endpoint == ENDPOINT_0)
159 continue;
160
161 if (cur_dist_list->cred_to_dist > 0) {
162 cur_dist_list->credits +=
163 cur_dist_list->cred_to_dist;
164 cur_dist_list->cred_to_dist = 0;
165 if (cur_dist_list->credits >
166 cur_dist_list->cred_assngd)
167 ath6kl_reduce_credits(cred_info,
168 cur_dist_list,
169 cur_dist_list->cred_assngd);
170
171 if (cur_dist_list->credits >
172 cur_dist_list->cred_norm)
173 ath6kl_reduce_credits(cred_info, cur_dist_list,
174 cur_dist_list->cred_norm);
175
176 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
177 if (cur_dist_list->txq_depth == 0)
178 ath6kl_reduce_credits(cred_info,
179 cur_dist_list, 0);
180 }
181 }
182 }
183}
184
185/*
186 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
187 * question.
188 */
189static void ath6kl_seek_credits(struct ath6kl_htc_credit_info *cred_info,
190 struct htc_endpoint_credit_dist *ep_dist)
191{
192 struct htc_endpoint_credit_dist *curdist_list;
193 int credits = 0;
194 int need;
195
196 if (ep_dist->svc_id == WMI_CONTROL_SVC)
197 goto out;
198
199 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
200 (ep_dist->svc_id == WMI_DATA_VO_SVC))
201 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
202 goto out;
203
204 /*
205 * For all other services, we follow a simple algorithm of:
206 *
207 * 1. checking the free pool for credits
208 * 2. checking lower priority endpoints for credits to take
209 */
210
211 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
212
213 if (credits >= ep_dist->seek_cred)
214 goto out;
215
216 /*
217 * We don't have enough in the free pool, try taking away from
218 * lower priority services The rule for taking away credits:
219 *
220 * 1. Only take from lower priority endpoints
221 * 2. Only take what is allocated above the minimum (never
222 * starve an endpoint completely)
223 * 3. Only take what you need.
224 */
225
226 list_for_each_entry_reverse(curdist_list,
227 &cred_info->lowestpri_ep_dist,
228 list) {
229 if (curdist_list == ep_dist)
230 break;
231
232 need = ep_dist->seek_cred - cred_info->cur_free_credits;
233
234 if ((curdist_list->cred_assngd - need) >=
235 curdist_list->cred_min) {
236 /*
237 * The current one has been allocated more than
238 * it's minimum and it has enough credits assigned
239 * above it's minimum to fulfill our need try to
240 * take away just enough to fulfill our need.
241 */
242 ath6kl_reduce_credits(cred_info, curdist_list,
243 curdist_list->cred_assngd - need);
244
245 if (cred_info->cur_free_credits >=
246 ep_dist->seek_cred)
247 break;
248 }
249
250 if (curdist_list->endpoint == ENDPOINT_0)
251 break;
252 }
253
254 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
255
256out:
257 /* did we find some credits? */
258 if (credits)
259 ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
260
261 ep_dist->seek_cred = 0;
262}
263
264/* redistribute credits based on activity change */
265static void ath6kl_redistribute_credits(struct ath6kl_htc_credit_info *info,
266 struct list_head *ep_dist_list)
267{
268 struct htc_endpoint_credit_dist *curdist_list;
269
270 list_for_each_entry(curdist_list, ep_dist_list, list) {
271 if (curdist_list->endpoint == ENDPOINT_0)
272 continue;
273
274 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
275 (curdist_list->svc_id == WMI_DATA_BE_SVC))
276 curdist_list->dist_flags |= HTC_EP_ACTIVE;
277
278 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
279 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
280 if (curdist_list->txq_depth == 0)
281 ath6kl_reduce_credits(info, curdist_list, 0);
282 else
283 ath6kl_reduce_credits(info,
284 curdist_list,
285 curdist_list->cred_min);
286 }
287 }
288}
289
290/*
291 *
292 * This function is invoked whenever endpoints require credit
293 * distributions. A lock is held while this function is invoked, this
294 * function shall NOT block. The ep_dist_list is a list of distribution
295 * structures in prioritized order as defined by the call to the
296 * htc_set_credit_dist() api.
297 */
298static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
299 struct list_head *ep_dist_list,
300 enum htc_credit_dist_reason reason)
301{
302 switch (reason) {
303 case HTC_CREDIT_DIST_SEND_COMPLETE:
304 ath6kl_credit_update(cred_info, ep_dist_list);
305 break;
306 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
307 ath6kl_redistribute_credits(cred_info, ep_dist_list);
308 break;
309 default:
310 break;
311 }
312
313 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
314 WARN_ON(cred_info->cur_free_credits < 0);
315}
316
Kalle Valodfa01042011-09-06 11:10:49 +0300317static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530318{
319 u8 *align_addr;
320
321 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
322 align_addr = PTR_ALIGN(*buf - 4, 4);
323 memmove(align_addr, *buf, len);
324 *buf = align_addr;
325 }
326}
327
Kalle Valodfa01042011-09-06 11:10:49 +0300328static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
329 int ctrl0, int ctrl1)
Kalle Valobdcd8172011-07-18 00:22:30 +0300330{
331 struct htc_frame_hdr *hdr;
332
333 packet->buf -= HTC_HDR_LENGTH;
334 hdr = (struct htc_frame_hdr *)packet->buf;
335
336 /* Endianess? */
337 put_unaligned((u16)packet->act_len, &hdr->payld_len);
338 hdr->flags = flags;
339 hdr->eid = packet->endpoint;
340 hdr->ctrl[0] = ctrl0;
341 hdr->ctrl[1] = ctrl1;
342}
343
344static void htc_reclaim_txctrl_buf(struct htc_target *target,
345 struct htc_packet *pkt)
346{
347 spin_lock_bh(&target->htc_lock);
348 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
349 spin_unlock_bh(&target->htc_lock);
350}
351
352static struct htc_packet *htc_get_control_buf(struct htc_target *target,
353 bool tx)
354{
355 struct htc_packet *packet = NULL;
356 struct list_head *buf_list;
357
358 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
359
360 spin_lock_bh(&target->htc_lock);
361
362 if (list_empty(buf_list)) {
363 spin_unlock_bh(&target->htc_lock);
364 return NULL;
365 }
366
367 packet = list_first_entry(buf_list, struct htc_packet, list);
368 list_del(&packet->list);
369 spin_unlock_bh(&target->htc_lock);
370
371 if (tx)
372 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
373
374 return packet;
375}
376
377static void htc_tx_comp_update(struct htc_target *target,
378 struct htc_endpoint *endpoint,
379 struct htc_packet *packet)
380{
381 packet->completion = NULL;
382 packet->buf += HTC_HDR_LENGTH;
383
384 if (!packet->status)
385 return;
386
387 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
388 packet->status, packet->endpoint, packet->act_len,
389 packet->info.tx.cred_used);
390
391 /* on failure to submit, reclaim credits for this packet */
392 spin_lock_bh(&target->tx_lock);
393 endpoint->cred_dist.cred_to_dist +=
394 packet->info.tx.cred_used;
395 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
396
Kalle Valo471e92f2011-10-13 15:21:37 +0300397 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +0300398 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +0300399
Kalle Valo3c370392011-10-24 12:17:12 +0300400 ath6kl_credit_distribute(target->credit_info,
401 &target->cred_dist_list,
402 HTC_CREDIT_DIST_SEND_COMPLETE);
Kalle Valobdcd8172011-07-18 00:22:30 +0300403
404 spin_unlock_bh(&target->tx_lock);
405}
406
407static void htc_tx_complete(struct htc_endpoint *endpoint,
408 struct list_head *txq)
409{
410 if (list_empty(txq))
411 return;
412
Kalle Valoebf29c92011-10-13 15:21:15 +0300413 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300414 "htc tx complete ep %d pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300415 endpoint->eid, get_queue_depth(txq));
416
417 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
418}
419
420static void htc_tx_comp_handler(struct htc_target *target,
421 struct htc_packet *packet)
422{
423 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
424 struct list_head container;
425
426 htc_tx_comp_update(target, endpoint, packet);
427 INIT_LIST_HEAD(&container);
428 list_add_tail(&packet->list, &container);
429 /* do completion */
430 htc_tx_complete(endpoint, &container);
431}
432
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530433static void htc_async_tx_scat_complete(struct htc_target *target,
434 struct hif_scatter_req *scat_req)
Kalle Valobdcd8172011-07-18 00:22:30 +0300435{
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530436 struct htc_endpoint *endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +0300437 struct htc_packet *packet;
438 struct list_head tx_compq;
439 int i;
440
441 INIT_LIST_HEAD(&tx_compq);
442
Kalle Valoebf29c92011-10-13 15:21:15 +0300443 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300444 "htc tx scat complete len %d entries %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300445 scat_req->len, scat_req->scat_entries);
446
447 if (scat_req->status)
448 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
449
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530450 packet = scat_req->scat_list[0].packet;
451 endpoint = &target->endpoint[packet->endpoint];
452
Kalle Valobdcd8172011-07-18 00:22:30 +0300453 /* walk through the scatter list and process */
454 for (i = 0; i < scat_req->scat_entries; i++) {
455 packet = scat_req->scat_list[i].packet;
456 if (!packet) {
457 WARN_ON(1);
458 return;
459 }
460
461 packet->status = scat_req->status;
462 htc_tx_comp_update(target, endpoint, packet);
463 list_add_tail(&packet->list, &tx_compq);
464 }
465
466 /* free scatter request */
467 hif_scatter_req_add(target->dev->ar, scat_req);
468
469 /* complete all packets */
470 htc_tx_complete(endpoint, &tx_compq);
471}
472
Kalle Valodfa01042011-09-06 11:10:49 +0300473static int ath6kl_htc_tx_issue(struct htc_target *target,
474 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300475{
476 int status;
477 bool sync = false;
478 u32 padded_len, send_len;
479
480 if (!packet->completion)
481 sync = true;
482
483 send_len = packet->act_len + HTC_HDR_LENGTH;
484
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530485 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300486
Kalle Valoebf29c92011-10-13 15:21:15 +0300487 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300488 "htc tx issue len %d padded_len %d mbox 0x%X %s\n",
489 send_len, padded_len,
490 target->dev->ar->mbox_info.htc_addr,
491 sync ? "sync" : "async");
Kalle Valobdcd8172011-07-18 00:22:30 +0300492
493 if (sync) {
494 status = hif_read_write_sync(target->dev->ar,
495 target->dev->ar->mbox_info.htc_addr,
496 packet->buf, padded_len,
497 HIF_WR_SYNC_BLOCK_INC);
498
499 packet->status = status;
Kalle Valo65d2bb12011-08-14 18:10:03 -0700500 packet->buf += HTC_HDR_LENGTH;
Kalle Valobdcd8172011-07-18 00:22:30 +0300501 } else
502 status = hif_write_async(target->dev->ar,
503 target->dev->ar->mbox_info.htc_addr,
504 packet->buf, padded_len,
505 HIF_WR_ASYNC_BLOCK_INC, packet);
506
507 return status;
508}
509
510static int htc_check_credits(struct htc_target *target,
511 struct htc_endpoint *ep, u8 *flags,
512 enum htc_endpoint_id eid, unsigned int len,
513 int *req_cred)
514{
515
516 *req_cred = (len > target->tgt_cred_sz) ?
517 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
518
Kalle Valo471e92f2011-10-13 15:21:37 +0300519 ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds required %d got %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300520 *req_cred, ep->cred_dist.credits);
521
522 if (ep->cred_dist.credits < *req_cred) {
523 if (eid == ENDPOINT_0)
524 return -EINVAL;
525
526 /* Seek more credits */
527 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
528
Kalle Valo471e92f2011-10-13 15:21:37 +0300529 ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +0300530 target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300531
Kalle Valo3c370392011-10-24 12:17:12 +0300532 ath6kl_seek_credits(target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300533
534 ep->cred_dist.seek_cred = 0;
535
536 if (ep->cred_dist.credits < *req_cred) {
Kalle Valoebf29c92011-10-13 15:21:15 +0300537 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300538 "htc creds not enough credits for ep %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300539 eid);
540 return -EINVAL;
541 }
542 }
543
544 ep->cred_dist.credits -= *req_cred;
545 ep->ep_st.cred_cosumd += *req_cred;
546
547 /* When we are getting low on credits, ask for more */
548 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
549 ep->cred_dist.seek_cred =
550 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
551
Kalle Valo471e92f2011-10-13 15:21:37 +0300552 ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +0300553 target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300554
Kalle Valo3c370392011-10-24 12:17:12 +0300555 ath6kl_seek_credits(target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300556
557 /* see if we were successful in getting more */
558 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
559 /* tell the target we need credits ASAP! */
560 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
561 ep->ep_st.cred_low_indicate += 1;
Kalle Valo471e92f2011-10-13 15:21:37 +0300562 ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds host needs credits\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300563 }
564 }
565
566 return 0;
567}
568
Kalle Valodfa01042011-09-06 11:10:49 +0300569static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
570 struct htc_endpoint *endpoint,
571 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300572{
573 int req_cred;
574 u8 flags;
575 struct htc_packet *packet;
576 unsigned int len;
577
578 while (true) {
579
580 flags = 0;
581
582 if (list_empty(&endpoint->txq))
583 break;
584 packet = list_first_entry(&endpoint->txq, struct htc_packet,
585 list);
586
Kalle Valoebf29c92011-10-13 15:21:15 +0300587 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300588 "htc tx got packet 0x%p queue depth %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300589 packet, get_queue_depth(&endpoint->txq));
590
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530591 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300592 packet->act_len + HTC_HDR_LENGTH);
593
594 if (htc_check_credits(target, endpoint, &flags,
595 packet->endpoint, len, &req_cred))
596 break;
597
598 /* now we can fully move onto caller's queue */
599 packet = list_first_entry(&endpoint->txq, struct htc_packet,
600 list);
601 list_move_tail(&packet->list, queue);
602
603 /* save the number of credits this packet consumed */
604 packet->info.tx.cred_used = req_cred;
605
606 /* all TX packets are handled asynchronously */
607 packet->completion = htc_tx_comp_handler;
608 packet->context = target;
609 endpoint->ep_st.tx_issued += 1;
610
611 /* save send flags */
612 packet->info.tx.flags = flags;
613 packet->info.tx.seqno = endpoint->seqno;
614 endpoint->seqno++;
615 }
616}
617
618/* See if the padded tx length falls on a credit boundary */
619static int htc_get_credit_padding(unsigned int cred_sz, int *len,
620 struct htc_endpoint *ep)
621{
622 int rem_cred, cred_pad;
623
624 rem_cred = *len % cred_sz;
625
626 /* No padding needed */
627 if (!rem_cred)
628 return 0;
629
630 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
631 return -1;
632
633 /*
634 * The transfer consumes a "partial" credit, this
635 * packet cannot be bundled unless we add
636 * additional "dummy" padding (max 255 bytes) to
637 * consume the entire credit.
638 */
639 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
640
641 if ((cred_pad > 0) && (cred_pad <= 255))
642 *len += cred_pad;
643 else
644 /* The amount of padding is too large, send as non-bundled */
645 return -1;
646
647 return cred_pad;
648}
649
Kalle Valodfa01042011-09-06 11:10:49 +0300650static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
651 struct htc_endpoint *endpoint,
652 struct hif_scatter_req *scat_req,
653 int n_scat,
654 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300655{
656 struct htc_packet *packet;
657 int i, len, rem_scat, cred_pad;
658 int status = 0;
659
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +0530660 rem_scat = target->max_tx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +0300661
662 for (i = 0; i < n_scat; i++) {
663 scat_req->scat_list[i].packet = NULL;
664
665 if (list_empty(queue))
666 break;
667
668 packet = list_first_entry(queue, struct htc_packet, list);
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530669 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300670 packet->act_len + HTC_HDR_LENGTH);
671
672 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
673 &len, endpoint);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530674 if (cred_pad < 0 || rem_scat < len) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300675 status = -ENOSPC;
676 break;
677 }
678
679 rem_scat -= len;
680 /* now remove it from the queue */
Kalle Valobdcd8172011-07-18 00:22:30 +0300681 list_del(&packet->list);
682
683 scat_req->scat_list[i].packet = packet;
684 /* prepare packet and flag message as part of a send bundle */
Kalle Valodfa01042011-09-06 11:10:49 +0300685 ath6kl_htc_tx_prep_pkt(packet,
Kalle Valobdcd8172011-07-18 00:22:30 +0300686 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
687 cred_pad, packet->info.tx.seqno);
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530688 /* Make sure the buffer is 4-byte aligned */
Kalle Valodfa01042011-09-06 11:10:49 +0300689 ath6kl_htc_tx_buf_align(&packet->buf,
690 packet->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +0300691 scat_req->scat_list[i].buf = packet->buf;
692 scat_req->scat_list[i].len = len;
693
694 scat_req->len += len;
695 scat_req->scat_entries++;
Kalle Valoebf29c92011-10-13 15:21:15 +0300696 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300697 "htc tx adding (%d) pkt 0x%p len %d remaining %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300698 i, packet, len, rem_scat);
699 }
700
701 /* Roll back scatter setup in case of any failure */
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530702 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300703 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
704 packet = scat_req->scat_list[i].packet;
705 if (packet) {
706 packet->buf += HTC_HDR_LENGTH;
707 list_add(&packet->list, queue);
708 }
709 }
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530710 return -EAGAIN;
Kalle Valobdcd8172011-07-18 00:22:30 +0300711 }
712
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530713 return status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300714}
715
716/*
Kalle Valodfa01042011-09-06 11:10:49 +0300717 * Drain a queue and send as bundles this function may return without fully
718 * draining the queue when
Kalle Valobdcd8172011-07-18 00:22:30 +0300719 *
720 * 1. scatter resources are exhausted
721 * 2. a message that will consume a partial credit will stop the
722 * bundling process early
723 * 3. we drop below the minimum number of messages for a bundle
724 */
Kalle Valodfa01042011-09-06 11:10:49 +0300725static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
726 struct list_head *queue,
727 int *sent_bundle, int *n_bundle_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +0300728{
729 struct htc_target *target = endpoint->target;
730 struct hif_scatter_req *scat_req = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300731 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530732 int status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300733
Kalle Valobdcd8172011-07-18 00:22:30 +0300734 while (true) {
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530735 status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300736 n_scat = get_queue_depth(queue);
737 n_scat = min(n_scat, target->msg_per_bndl_max);
738
739 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
740 /* not enough to bundle */
741 break;
742
743 scat_req = hif_scatter_req_get(target->dev->ar);
744
745 if (!scat_req) {
746 /* no scatter resources */
Kalle Valoebf29c92011-10-13 15:21:15 +0300747 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300748 "htc tx no more scatter resources\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300749 break;
750 }
751
Kalle Valo471e92f2011-10-13 15:21:37 +0300752 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300753 n_scat);
754
755 scat_req->len = 0;
756 scat_req->scat_entries = 0;
757
Kalle Valodfa01042011-09-06 11:10:49 +0300758 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
759 scat_req, n_scat,
760 queue);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530761 if (status == -EAGAIN) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300762 hif_scatter_req_add(target->dev->ar, scat_req);
763 break;
764 }
765
766 /* send path is always asynchronous */
767 scat_req->complete = htc_async_tx_scat_complete;
Kalle Valobdcd8172011-07-18 00:22:30 +0300768 n_sent_bundle++;
769 tot_pkts_bundle += scat_req->scat_entries;
770
Kalle Valoebf29c92011-10-13 15:21:15 +0300771 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300772 "htc tx scatter bytes %d entries %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300773 scat_req->len, scat_req->scat_entries);
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300774 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530775
776 if (status)
777 break;
Kalle Valobdcd8172011-07-18 00:22:30 +0300778 }
779
780 *sent_bundle = n_sent_bundle;
781 *n_bundle_pkts = tot_pkts_bundle;
Kalle Valo471e92f2011-10-13 15:21:37 +0300782 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
783 n_sent_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +0300784
785 return;
786}
787
Kalle Valodfa01042011-09-06 11:10:49 +0300788static void ath6kl_htc_tx_from_queue(struct htc_target *target,
789 struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +0300790{
791 struct list_head txq;
792 struct htc_packet *packet;
793 int bundle_sent;
794 int n_pkts_bundle;
795
796 spin_lock_bh(&target->tx_lock);
797
798 endpoint->tx_proc_cnt++;
799 if (endpoint->tx_proc_cnt > 1) {
800 endpoint->tx_proc_cnt--;
801 spin_unlock_bh(&target->tx_lock);
Kalle Valo471e92f2011-10-13 15:21:37 +0300802 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300803 return;
804 }
805
806 /*
807 * drain the endpoint TX queue for transmission as long
808 * as we have enough credits.
809 */
810 INIT_LIST_HEAD(&txq);
811
812 while (true) {
813
814 if (list_empty(&endpoint->txq))
815 break;
816
Kalle Valodfa01042011-09-06 11:10:49 +0300817 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300818
819 if (list_empty(&txq))
820 break;
821
822 spin_unlock_bh(&target->tx_lock);
823
824 bundle_sent = 0;
825 n_pkts_bundle = 0;
826
827 while (true) {
828 /* try to send a bundle on each pass */
829 if ((target->tx_bndl_enable) &&
830 (get_queue_depth(&txq) >=
831 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
832 int temp1 = 0, temp2 = 0;
833
Kalle Valodfa01042011-09-06 11:10:49 +0300834 ath6kl_htc_tx_bundle(endpoint, &txq,
835 &temp1, &temp2);
Kalle Valobdcd8172011-07-18 00:22:30 +0300836 bundle_sent += temp1;
837 n_pkts_bundle += temp2;
838 }
839
840 if (list_empty(&txq))
841 break;
842
843 packet = list_first_entry(&txq, struct htc_packet,
844 list);
845 list_del(&packet->list);
846
Kalle Valodfa01042011-09-06 11:10:49 +0300847 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
848 0, packet->info.tx.seqno);
849 ath6kl_htc_tx_issue(target, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +0300850 }
851
852 spin_lock_bh(&target->tx_lock);
853
854 endpoint->ep_st.tx_bundles += bundle_sent;
855 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
856 }
857
858 endpoint->tx_proc_cnt = 0;
859 spin_unlock_bh(&target->tx_lock);
860}
861
Kalle Valodfa01042011-09-06 11:10:49 +0300862static bool ath6kl_htc_tx_try(struct htc_target *target,
863 struct htc_endpoint *endpoint,
864 struct htc_packet *tx_pkt)
Kalle Valobdcd8172011-07-18 00:22:30 +0300865{
866 struct htc_ep_callbacks ep_cb;
867 int txq_depth;
868 bool overflow = false;
869
870 ep_cb = endpoint->ep_cb;
871
872 spin_lock_bh(&target->tx_lock);
873 txq_depth = get_queue_depth(&endpoint->txq);
874 spin_unlock_bh(&target->tx_lock);
875
876 if (txq_depth >= endpoint->max_txq_depth)
877 overflow = true;
878
879 if (overflow)
Kalle Valoebf29c92011-10-13 15:21:15 +0300880 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300881 "htc tx overflow ep %d depth %d max %d\n",
882 endpoint->eid, txq_depth,
Kalle Valobdcd8172011-07-18 00:22:30 +0300883 endpoint->max_txq_depth);
884
885 if (overflow && ep_cb.tx_full) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300886 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
887 HTC_SEND_FULL_DROP) {
888 endpoint->ep_st.tx_dropped += 1;
889 return false;
890 }
891 }
892
893 spin_lock_bh(&target->tx_lock);
894 list_add_tail(&tx_pkt->list, &endpoint->txq);
895 spin_unlock_bh(&target->tx_lock);
896
Kalle Valodfa01042011-09-06 11:10:49 +0300897 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300898
899 return true;
900}
901
902static void htc_chk_ep_txq(struct htc_target *target)
903{
904 struct htc_endpoint *endpoint;
905 struct htc_endpoint_credit_dist *cred_dist;
906
907 /*
908 * Run through the credit distribution list to see if there are
909 * packets queued. NOTE: no locks need to be taken since the
910 * distribution list is not dynamic (cannot be re-ordered) and we
911 * are not modifying any state.
912 */
913 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
Kalle Valoe8c39792011-10-24 12:17:04 +0300914 endpoint = cred_dist->htc_ep;
Kalle Valobdcd8172011-07-18 00:22:30 +0300915
916 spin_lock_bh(&target->tx_lock);
917 if (!list_empty(&endpoint->txq)) {
Kalle Valoebf29c92011-10-13 15:21:15 +0300918 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300919 "htc creds ep %d credits %d pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300920 cred_dist->endpoint,
921 endpoint->cred_dist.credits,
922 get_queue_depth(&endpoint->txq));
923 spin_unlock_bh(&target->tx_lock);
924 /*
925 * Try to start the stalled queue, this list is
926 * ordered by priority. If there are credits
927 * available the highest priority queue will get a
928 * chance to reclaim credits from lower priority
929 * ones.
930 */
Kalle Valodfa01042011-09-06 11:10:49 +0300931 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300932 spin_lock_bh(&target->tx_lock);
933 }
934 spin_unlock_bh(&target->tx_lock);
935 }
936}
937
938static int htc_setup_tx_complete(struct htc_target *target)
939{
940 struct htc_packet *send_pkt = NULL;
941 int status;
942
943 send_pkt = htc_get_control_buf(target, true);
944
945 if (!send_pkt)
946 return -ENOMEM;
947
948 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
949 struct htc_setup_comp_ext_msg *setup_comp_ext;
950 u32 flags = 0;
951
952 setup_comp_ext =
953 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
954 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
955 setup_comp_ext->msg_id =
956 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
957
958 if (target->msg_per_bndl_max > 0) {
959 /* Indicate HTC bundling to the target */
960 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
961 setup_comp_ext->msg_per_rxbndl =
962 target->msg_per_bndl_max;
963 }
964
965 memcpy(&setup_comp_ext->flags, &flags,
966 sizeof(setup_comp_ext->flags));
967 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
968 sizeof(struct htc_setup_comp_ext_msg),
969 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
970
971 } else {
972 struct htc_setup_comp_msg *setup_comp;
973 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
974 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
975 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
976 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
977 sizeof(struct htc_setup_comp_msg),
978 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
979 }
980
981 /* we want synchronous operation */
982 send_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +0300983 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
984 status = ath6kl_htc_tx_issue(target, send_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300985
986 if (send_pkt != NULL)
987 htc_reclaim_txctrl_buf(target, send_pkt);
988
989 return status;
990}
991
Kalle Vaload226ec2011-08-10 09:49:12 +0300992void ath6kl_htc_set_credit_dist(struct htc_target *target,
Kalle Valo3c370392011-10-24 12:17:12 +0300993 struct ath6kl_htc_credit_info *credit_info,
Kalle Vaload226ec2011-08-10 09:49:12 +0300994 u16 srvc_pri_order[], int list_len)
Kalle Valobdcd8172011-07-18 00:22:30 +0300995{
996 struct htc_endpoint *endpoint;
997 int i, ep;
998
Kalle Valo3c370392011-10-24 12:17:12 +0300999 target->credit_info = credit_info;
Kalle Valobdcd8172011-07-18 00:22:30 +03001000
1001 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1002 &target->cred_dist_list);
1003
1004 for (i = 0; i < list_len; i++) {
1005 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1006 endpoint = &target->endpoint[ep];
1007 if (endpoint->svc_id == srvc_pri_order[i]) {
1008 list_add_tail(&endpoint->cred_dist.list,
1009 &target->cred_dist_list);
1010 break;
1011 }
1012 }
1013 if (ep >= ENDPOINT_MAX) {
1014 WARN_ON(1);
1015 return;
1016 }
1017 }
1018}
1019
Kalle Vaload226ec2011-08-10 09:49:12 +03001020int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001021{
1022 struct htc_endpoint *endpoint;
1023 struct list_head queue;
1024
Kalle Valoebf29c92011-10-13 15:21:15 +03001025 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001026 "htc tx ep id %d buf 0x%p len %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001027 packet->endpoint, packet->buf, packet->act_len);
1028
1029 if (packet->endpoint >= ENDPOINT_MAX) {
1030 WARN_ON(1);
1031 return -EINVAL;
1032 }
1033
1034 endpoint = &target->endpoint[packet->endpoint];
1035
Kalle Valodfa01042011-09-06 11:10:49 +03001036 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001037 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1038 -ECANCELED : -ENOSPC;
1039 INIT_LIST_HEAD(&queue);
1040 list_add(&packet->list, &queue);
1041 htc_tx_complete(endpoint, &queue);
1042 }
1043
1044 return 0;
1045}
1046
1047/* flush endpoint TX queue */
Kalle Vaload226ec2011-08-10 09:49:12 +03001048void ath6kl_htc_flush_txep(struct htc_target *target,
1049 enum htc_endpoint_id eid, u16 tag)
Kalle Valobdcd8172011-07-18 00:22:30 +03001050{
1051 struct htc_packet *packet, *tmp_pkt;
1052 struct list_head discard_q, container;
1053 struct htc_endpoint *endpoint = &target->endpoint[eid];
1054
1055 if (!endpoint->svc_id) {
1056 WARN_ON(1);
1057 return;
1058 }
1059
1060 /* initialize the discard queue */
1061 INIT_LIST_HEAD(&discard_q);
1062
1063 spin_lock_bh(&target->tx_lock);
1064
1065 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1066 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1067 (tag == packet->info.tx.tag))
1068 list_move_tail(&packet->list, &discard_q);
1069 }
1070
1071 spin_unlock_bh(&target->tx_lock);
1072
1073 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1074 packet->status = -ECANCELED;
1075 list_del(&packet->list);
Kalle Valoebf29c92011-10-13 15:21:15 +03001076 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001077 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001078 packet, packet->act_len,
1079 packet->endpoint, packet->info.tx.tag);
1080
1081 INIT_LIST_HEAD(&container);
1082 list_add_tail(&packet->list, &container);
1083 htc_tx_complete(endpoint, &container);
1084 }
1085
1086}
1087
Kalle Vaload226ec2011-08-10 09:49:12 +03001088static void ath6kl_htc_flush_txep_all(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03001089{
1090 struct htc_endpoint *endpoint;
1091 int i;
1092
1093 dump_cred_dist_stats(target);
1094
1095 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1096 endpoint = &target->endpoint[i];
1097 if (endpoint->svc_id == 0)
1098 /* not in use.. */
1099 continue;
Kalle Vaload226ec2011-08-10 09:49:12 +03001100 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
Kalle Valobdcd8172011-07-18 00:22:30 +03001101 }
1102}
1103
Kalle Vaload226ec2011-08-10 09:49:12 +03001104void ath6kl_htc_indicate_activity_change(struct htc_target *target,
1105 enum htc_endpoint_id eid, bool active)
Kalle Valobdcd8172011-07-18 00:22:30 +03001106{
1107 struct htc_endpoint *endpoint = &target->endpoint[eid];
1108 bool dist = false;
1109
1110 if (endpoint->svc_id == 0) {
1111 WARN_ON(1);
1112 return;
1113 }
1114
1115 spin_lock_bh(&target->tx_lock);
1116
1117 if (active) {
1118 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1119 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1120 dist = true;
1121 }
1122 } else {
1123 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1124 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1125 dist = true;
1126 }
1127 }
1128
1129 if (dist) {
1130 endpoint->cred_dist.txq_depth =
1131 get_queue_depth(&endpoint->txq);
1132
Kalle Valo471e92f2011-10-13 15:21:37 +03001133 ath6kl_dbg(ATH6KL_DBG_HTC,
1134 "htc tx activity ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +03001135 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +03001136
Kalle Valo3c370392011-10-24 12:17:12 +03001137 ath6kl_credit_distribute(target->credit_info,
Kalle Valofa99e962011-10-24 12:16:55 +03001138 &target->cred_dist_list,
1139 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
Kalle Valobdcd8172011-07-18 00:22:30 +03001140 }
1141
1142 spin_unlock_bh(&target->tx_lock);
1143
1144 if (dist && !active)
1145 htc_chk_ep_txq(target);
1146}
1147
1148/* HTC Rx */
1149
Kalle Valo689def92011-09-06 11:10:49 +03001150static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1151 int n_look_ahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001152{
1153 endpoint->ep_st.rx_pkts++;
1154 if (n_look_ahds == 1)
1155 endpoint->ep_st.rx_lkahds++;
1156 else if (n_look_ahds > 1)
1157 endpoint->ep_st.rx_bundle_lkahd++;
1158}
1159
1160static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1161 enum htc_endpoint_id eid, int len)
1162{
1163 return (eid == target->dev->ar->ctrl_ep) ?
1164 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1165}
1166
1167static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1168{
1169 struct list_head queue;
1170
1171 INIT_LIST_HEAD(&queue);
1172 list_add_tail(&packet->list, &queue);
Kalle Vaload226ec2011-08-10 09:49:12 +03001173 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +03001174}
1175
1176static void htc_reclaim_rxbuf(struct htc_target *target,
1177 struct htc_packet *packet,
1178 struct htc_endpoint *ep)
1179{
1180 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1181 htc_rxpkt_reset(packet);
1182 packet->status = -ECANCELED;
1183 ep->ep_cb.rx(ep->target, packet);
1184 } else {
1185 htc_rxpkt_reset(packet);
1186 htc_add_rxbuf((void *)(target), packet);
1187 }
1188}
1189
1190static void reclaim_rx_ctrl_buf(struct htc_target *target,
1191 struct htc_packet *packet)
1192{
1193 spin_lock_bh(&target->htc_lock);
1194 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1195 spin_unlock_bh(&target->htc_lock);
1196}
1197
Kalle Valo689def92011-09-06 11:10:49 +03001198static int ath6kl_htc_rx_packet(struct htc_target *target,
1199 struct htc_packet *packet,
1200 u32 rx_len)
Kalle Valobdcd8172011-07-18 00:22:30 +03001201{
1202 struct ath6kl_device *dev = target->dev;
1203 u32 padded_len;
1204 int status;
1205
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301206 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001207
1208 if (padded_len > packet->buf_len) {
Kalle Valo471e92f2011-10-13 15:21:37 +03001209 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001210 padded_len, rx_len, packet->buf_len);
1211 return -ENOMEM;
1212 }
1213
Kalle Valoebf29c92011-10-13 15:21:15 +03001214 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001215 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001216 packet, packet->info.rx.exp_hdr,
Kalle Valo471e92f2011-10-13 15:21:37 +03001217 padded_len, dev->ar->mbox_info.htc_addr);
Kalle Valobdcd8172011-07-18 00:22:30 +03001218
1219 status = hif_read_write_sync(dev->ar,
1220 dev->ar->mbox_info.htc_addr,
1221 packet->buf, padded_len,
1222 HIF_RD_SYNC_BLOCK_FIX);
1223
1224 packet->status = status;
1225
1226 return status;
1227}
1228
1229/*
1230 * optimization for recv packets, we can indicate a
1231 * "hint" that there are more single-packets to fetch
1232 * on this endpoint.
1233 */
Kalle Valo689def92011-09-06 11:10:49 +03001234static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1235 struct htc_endpoint *endpoint,
1236 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001237{
1238 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1239
1240 if (htc_hdr->eid == packet->endpoint) {
1241 if (!list_empty(&endpoint->rx_bufq))
1242 packet->info.rx.indicat_flags |=
1243 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1244 }
1245}
1246
Kalle Valo689def92011-09-06 11:10:49 +03001247static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03001248{
1249 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1250
1251 if (ep_cb.rx_refill_thresh > 0) {
1252 spin_lock_bh(&endpoint->target->rx_lock);
1253 if (get_queue_depth(&endpoint->rx_bufq)
1254 < ep_cb.rx_refill_thresh) {
1255 spin_unlock_bh(&endpoint->target->rx_lock);
1256 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1257 return;
1258 }
1259 spin_unlock_bh(&endpoint->target->rx_lock);
1260 }
1261}
1262
1263/* This function is called with rx_lock held */
Kalle Valo689def92011-09-06 11:10:49 +03001264static int ath6kl_htc_rx_setup(struct htc_target *target,
1265 struct htc_endpoint *ep,
1266 u32 *lk_ahds, struct list_head *queue, int n_msg)
Kalle Valobdcd8172011-07-18 00:22:30 +03001267{
1268 struct htc_packet *packet;
1269 /* FIXME: type of lk_ahds can't be right */
1270 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1271 struct htc_ep_callbacks ep_cb;
1272 int status = 0, j, full_len;
1273 bool no_recycle;
1274
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301275 full_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001276 le16_to_cpu(htc_hdr->payld_len) +
1277 sizeof(*htc_hdr));
1278
1279 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1280 ath6kl_warn("Rx buffer requested with invalid length\n");
1281 return -EINVAL;
1282 }
1283
1284 ep_cb = ep->ep_cb;
1285 for (j = 0; j < n_msg; j++) {
1286
1287 /*
1288 * Reset flag, any packets allocated using the
1289 * rx_alloc() API cannot be recycled on
1290 * cleanup,they must be explicitly returned.
1291 */
1292 no_recycle = false;
1293
1294 if (ep_cb.rx_allocthresh &&
1295 (full_len > ep_cb.rx_alloc_thresh)) {
1296 ep->ep_st.rx_alloc_thresh_hit += 1;
1297 ep->ep_st.rxalloc_thresh_byte +=
1298 le16_to_cpu(htc_hdr->payld_len);
1299
1300 spin_unlock_bh(&target->rx_lock);
1301 no_recycle = true;
1302
1303 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1304 full_len);
1305 spin_lock_bh(&target->rx_lock);
1306 } else {
1307 /* refill handler is being used */
1308 if (list_empty(&ep->rx_bufq)) {
1309 if (ep_cb.rx_refill) {
1310 spin_unlock_bh(&target->rx_lock);
1311 ep_cb.rx_refill(ep->target, ep->eid);
1312 spin_lock_bh(&target->rx_lock);
1313 }
1314 }
1315
1316 if (list_empty(&ep->rx_bufq))
1317 packet = NULL;
1318 else {
1319 packet = list_first_entry(&ep->rx_bufq,
1320 struct htc_packet, list);
1321 list_del(&packet->list);
1322 }
1323 }
1324
1325 if (!packet) {
1326 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1327 target->ep_waiting = ep->eid;
1328 return -ENOSPC;
1329 }
1330
1331 /* clear flags */
1332 packet->info.rx.rx_flags = 0;
1333 packet->info.rx.indicat_flags = 0;
1334 packet->status = 0;
1335
1336 if (no_recycle)
1337 /*
1338 * flag that these packets cannot be
1339 * recycled, they have to be returned to
1340 * the user
1341 */
1342 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1343
1344 /* Caller needs to free this upon any failure */
1345 list_add_tail(&packet->list, queue);
1346
1347 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1348 status = -ECANCELED;
1349 break;
1350 }
1351
1352 if (j) {
1353 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1354 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1355 } else
1356 /* set expected look ahead */
1357 packet->info.rx.exp_hdr = *lk_ahds;
1358
1359 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1360 HTC_HDR_LENGTH;
1361 }
1362
1363 return status;
1364}
1365
Kalle Valo689def92011-09-06 11:10:49 +03001366static int ath6kl_htc_rx_alloc(struct htc_target *target,
1367 u32 lk_ahds[], int msg,
1368 struct htc_endpoint *endpoint,
1369 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03001370{
1371 int status = 0;
1372 struct htc_packet *packet, *tmp_pkt;
1373 struct htc_frame_hdr *htc_hdr;
1374 int i, n_msg;
1375
1376 spin_lock_bh(&target->rx_lock);
1377
1378 for (i = 0; i < msg; i++) {
1379
1380 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1381
1382 if (htc_hdr->eid >= ENDPOINT_MAX) {
1383 ath6kl_err("invalid ep in look-ahead: %d\n",
1384 htc_hdr->eid);
1385 status = -ENOMEM;
1386 break;
1387 }
1388
1389 if (htc_hdr->eid != endpoint->eid) {
1390 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1391 htc_hdr->eid, endpoint->eid, i);
1392 status = -ENOMEM;
1393 break;
1394 }
1395
1396 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1397 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1398 htc_hdr->payld_len,
1399 (u32) HTC_MAX_PAYLOAD_LENGTH);
1400 status = -ENOMEM;
1401 break;
1402 }
1403
1404 if (endpoint->svc_id == 0) {
1405 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1406 status = -ENOMEM;
1407 break;
1408 }
1409
1410 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1411 /*
1412 * HTC header indicates that every packet to follow
1413 * has the same padded length so that it can be
1414 * optimally fetched as a full bundle.
1415 */
1416 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1417 HTC_FLG_RX_BNDL_CNT_S;
1418
1419 /* the count doesn't include the starter frame */
1420 n_msg++;
1421 if (n_msg > target->msg_per_bndl_max) {
1422 status = -ENOMEM;
1423 break;
1424 }
1425
1426 endpoint->ep_st.rx_bundle_from_hdr += 1;
Kalle Valoebf29c92011-10-13 15:21:15 +03001427 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001428 "htc rx bundle pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001429 n_msg);
1430 } else
1431 /* HTC header only indicates 1 message to fetch */
1432 n_msg = 1;
1433
1434 /* Setup packet buffers for each message */
Kalle Valo689def92011-09-06 11:10:49 +03001435 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1436 queue, n_msg);
Kalle Valobdcd8172011-07-18 00:22:30 +03001437
1438 /*
1439 * This is due to unavailabilty of buffers to rx entire data.
1440 * Return no error so that free buffers from queue can be used
1441 * to receive partial data.
1442 */
1443 if (status == -ENOSPC) {
1444 spin_unlock_bh(&target->rx_lock);
1445 return 0;
1446 }
1447
1448 if (status)
1449 break;
1450 }
1451
1452 spin_unlock_bh(&target->rx_lock);
1453
1454 if (status) {
1455 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1456 list_del(&packet->list);
1457 htc_reclaim_rxbuf(target, packet,
1458 &target->endpoint[packet->endpoint]);
1459 }
1460 }
1461
1462 return status;
1463}
1464
1465static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1466{
1467 if (packets->endpoint != ENDPOINT_0) {
1468 WARN_ON(1);
1469 return;
1470 }
1471
1472 if (packets->status == -ECANCELED) {
1473 reclaim_rx_ctrl_buf(context, packets);
1474 return;
1475 }
1476
1477 if (packets->act_len > 0) {
1478 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1479 packets->act_len + HTC_HDR_LENGTH);
1480
Kalle Valo471e92f2011-10-13 15:21:37 +03001481 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1482 "htc rx unexpected endpoint 0 message", "",
Kalle Valoef094102011-09-27 14:30:45 +03001483 packets->buf - HTC_HDR_LENGTH,
1484 packets->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +03001485 }
1486
1487 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1488}
1489
1490static void htc_proc_cred_rpt(struct htc_target *target,
1491 struct htc_credit_report *rpt,
1492 int n_entries,
1493 enum htc_endpoint_id from_ep)
1494{
1495 struct htc_endpoint *endpoint;
1496 int tot_credits = 0, i;
1497 bool dist = false;
1498
Kalle Valoebf29c92011-10-13 15:21:15 +03001499 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001500 "htc creds report entries %d\n", n_entries);
Kalle Valobdcd8172011-07-18 00:22:30 +03001501
1502 spin_lock_bh(&target->tx_lock);
1503
1504 for (i = 0; i < n_entries; i++, rpt++) {
1505 if (rpt->eid >= ENDPOINT_MAX) {
1506 WARN_ON(1);
1507 spin_unlock_bh(&target->tx_lock);
1508 return;
1509 }
1510
1511 endpoint = &target->endpoint[rpt->eid];
1512
Kalle Valo471e92f2011-10-13 15:21:37 +03001513 ath6kl_dbg(ATH6KL_DBG_HTC,
1514 "htc creds report ep %d credits %d\n",
1515 rpt->eid, rpt->credits);
Kalle Valobdcd8172011-07-18 00:22:30 +03001516
1517 endpoint->ep_st.tx_cred_rpt += 1;
1518 endpoint->ep_st.cred_retnd += rpt->credits;
1519
1520 if (from_ep == rpt->eid) {
1521 /*
1522 * This credit report arrived on the same endpoint
1523 * indicating it arrived in an RX packet.
1524 */
1525 endpoint->ep_st.cred_from_rx += rpt->credits;
1526 endpoint->ep_st.cred_rpt_from_rx += 1;
1527 } else if (from_ep == ENDPOINT_0) {
1528 /* credit arrived on endpoint 0 as a NULL message */
1529 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1530 endpoint->ep_st.cred_rpt_ep0 += 1;
1531 } else {
1532 endpoint->ep_st.cred_from_other += rpt->credits;
1533 endpoint->ep_st.cred_rpt_from_other += 1;
1534 }
1535
Raja Mani5ba3ee42011-07-19 19:27:31 +05301536 if (rpt->eid == ENDPOINT_0)
Kalle Valobdcd8172011-07-18 00:22:30 +03001537 /* always give endpoint 0 credits back */
1538 endpoint->cred_dist.credits += rpt->credits;
1539 else {
1540 endpoint->cred_dist.cred_to_dist += rpt->credits;
1541 dist = true;
1542 }
1543
1544 /*
1545 * Refresh tx depth for distribution function that will
1546 * recover these credits NOTE: this is only valid when
1547 * there are credits to recover!
1548 */
1549 endpoint->cred_dist.txq_depth =
1550 get_queue_depth(&endpoint->txq);
1551
1552 tot_credits += rpt->credits;
1553 }
1554
Kalle Valoebf29c92011-10-13 15:21:15 +03001555 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001556 "htc creds report tot_credits %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001557 tot_credits);
1558
1559 if (dist) {
1560 /*
1561 * This was a credit return based on a completed send
1562 * operations note, this is done with the lock held
1563 */
Kalle Valo471e92f2011-10-13 15:21:37 +03001564 ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +03001565 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +03001566
Kalle Valo3c370392011-10-24 12:17:12 +03001567 ath6kl_credit_distribute(target->credit_info,
Kalle Valofa99e962011-10-24 12:16:55 +03001568 &target->cred_dist_list,
1569 HTC_CREDIT_DIST_SEND_COMPLETE);
Kalle Valobdcd8172011-07-18 00:22:30 +03001570 }
1571
1572 spin_unlock_bh(&target->tx_lock);
1573
1574 if (tot_credits)
1575 htc_chk_ep_txq(target);
1576}
1577
1578static int htc_parse_trailer(struct htc_target *target,
1579 struct htc_record_hdr *record,
1580 u8 *record_buf, u32 *next_lk_ahds,
1581 enum htc_endpoint_id endpoint,
1582 int *n_lk_ahds)
1583{
1584 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1585 struct htc_lookahead_report *lk_ahd;
1586 int len;
1587
1588 switch (record->rec_id) {
1589 case HTC_RECORD_CREDITS:
1590 len = record->len / sizeof(struct htc_credit_report);
1591 if (!len) {
1592 WARN_ON(1);
1593 return -EINVAL;
1594 }
1595
1596 htc_proc_cred_rpt(target,
1597 (struct htc_credit_report *) record_buf,
1598 len, endpoint);
1599 break;
1600 case HTC_RECORD_LOOKAHEAD:
1601 len = record->len / sizeof(*lk_ahd);
1602 if (!len) {
1603 WARN_ON(1);
1604 return -EINVAL;
1605 }
1606
1607 lk_ahd = (struct htc_lookahead_report *) record_buf;
1608 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1609 && next_lk_ahds) {
1610
Kalle Valoebf29c92011-10-13 15:21:15 +03001611 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001612 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001613 lk_ahd->pre_valid, lk_ahd->post_valid);
1614
1615 /* look ahead bytes are valid, copy them over */
1616 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1617
Kalle Valo471e92f2011-10-13 15:21:37 +03001618 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1619 "htc rx next look ahead",
Kalle Valoef094102011-09-27 14:30:45 +03001620 "", next_lk_ahds, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001621
1622 *n_lk_ahds = 1;
1623 }
1624 break;
1625 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1626 len = record->len / sizeof(*bundle_lkahd_rpt);
1627 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1628 WARN_ON(1);
1629 return -EINVAL;
1630 }
1631
1632 if (next_lk_ahds) {
1633 int i;
1634
1635 bundle_lkahd_rpt =
1636 (struct htc_bundle_lkahd_rpt *) record_buf;
1637
Kalle Valo471e92f2011-10-13 15:21:37 +03001638 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001639 "", record_buf, record->len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001640
1641 for (i = 0; i < len; i++) {
1642 memcpy((u8 *)&next_lk_ahds[i],
1643 bundle_lkahd_rpt->lk_ahd, 4);
1644 bundle_lkahd_rpt++;
1645 }
1646
1647 *n_lk_ahds = i;
1648 }
1649 break;
1650 default:
1651 ath6kl_err("unhandled record: id:%d len:%d\n",
1652 record->rec_id, record->len);
1653 break;
1654 }
1655
1656 return 0;
1657
1658}
1659
1660static int htc_proc_trailer(struct htc_target *target,
1661 u8 *buf, int len, u32 *next_lk_ahds,
1662 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1663{
1664 struct htc_record_hdr *record;
1665 int orig_len;
1666 int status;
1667 u8 *record_buf;
1668 u8 *orig_buf;
1669
Kalle Valo471e92f2011-10-13 15:21:37 +03001670 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1671 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001672
1673 orig_buf = buf;
1674 orig_len = len;
1675 status = 0;
1676
1677 while (len > 0) {
1678
1679 if (len < sizeof(struct htc_record_hdr)) {
1680 status = -ENOMEM;
1681 break;
1682 }
1683 /* these are byte aligned structs */
1684 record = (struct htc_record_hdr *) buf;
1685 len -= sizeof(struct htc_record_hdr);
1686 buf += sizeof(struct htc_record_hdr);
1687
1688 if (record->len > len) {
1689 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1690 record->len, record->rec_id, len);
1691 status = -ENOMEM;
1692 break;
1693 }
1694 record_buf = buf;
1695
1696 status = htc_parse_trailer(target, record, record_buf,
1697 next_lk_ahds, endpoint, n_lk_ahds);
1698
1699 if (status)
1700 break;
1701
1702 /* advance buffer past this record for next time around */
1703 buf += record->len;
1704 len -= record->len;
1705 }
1706
Raja Mani2588f552011-07-19 19:27:30 +05301707 if (status)
Kalle Valo471e92f2011-10-13 15:21:37 +03001708 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
Kalle Valoef094102011-09-27 14:30:45 +03001709 "", orig_buf, orig_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001710
1711 return status;
1712}
1713
Kalle Valo689def92011-09-06 11:10:49 +03001714static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1715 struct htc_packet *packet,
1716 u32 *next_lkahds, int *n_lkahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001717{
1718 int status = 0;
1719 u16 payload_len;
1720 u32 lk_ahd;
1721 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1722
1723 if (n_lkahds != NULL)
1724 *n_lkahds = 0;
1725
Kalle Valobdcd8172011-07-18 00:22:30 +03001726 /*
1727 * NOTE: we cannot assume the alignment of buf, so we use the safe
1728 * macros to retrieve 16 bit fields.
1729 */
1730 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1731
1732 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1733
1734 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1735 /*
1736 * Refresh the expected header and the actual length as it
1737 * was unknown when this packet was grabbed as part of the
1738 * bundle.
1739 */
1740 packet->info.rx.exp_hdr = lk_ahd;
1741 packet->act_len = payload_len + HTC_HDR_LENGTH;
1742
1743 /* validate the actual header that was refreshed */
1744 if (packet->act_len > packet->buf_len) {
1745 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1746 payload_len, lk_ahd);
1747 /*
1748 * Limit this to max buffer just to print out some
1749 * of the buffer.
1750 */
1751 packet->act_len = min(packet->act_len, packet->buf_len);
1752 status = -ENOMEM;
1753 goto fail_rx;
1754 }
1755
1756 if (packet->endpoint != htc_hdr->eid) {
1757 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1758 htc_hdr->eid, packet->endpoint);
1759 status = -ENOMEM;
1760 goto fail_rx;
1761 }
1762 }
1763
1764 if (lk_ahd != packet->info.rx.exp_hdr) {
Kalle Valo689def92011-09-06 11:10:49 +03001765 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1766 __func__, packet, packet->info.rx.rx_flags);
Kalle Valo471e92f2011-10-13 15:21:37 +03001767 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001768 "", &packet->info.rx.exp_hdr, 4);
Kalle Valo471e92f2011-10-13 15:21:37 +03001769 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
Kalle Valoef094102011-09-27 14:30:45 +03001770 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
Kalle Valobdcd8172011-07-18 00:22:30 +03001771 status = -ENOMEM;
1772 goto fail_rx;
1773 }
1774
1775 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1776 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1777 htc_hdr->ctrl[0] > payload_len) {
Kalle Valo689def92011-09-06 11:10:49 +03001778 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1779 __func__, payload_len, htc_hdr->ctrl[0]);
Kalle Valobdcd8172011-07-18 00:22:30 +03001780 status = -ENOMEM;
1781 goto fail_rx;
1782 }
1783
1784 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1785 next_lkahds = NULL;
1786 n_lkahds = NULL;
1787 }
1788
1789 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1790 + payload_len - htc_hdr->ctrl[0],
1791 htc_hdr->ctrl[0], next_lkahds,
1792 n_lkahds, packet->endpoint);
1793
1794 if (status)
1795 goto fail_rx;
1796
1797 packet->act_len -= htc_hdr->ctrl[0];
1798 }
1799
1800 packet->buf += HTC_HDR_LENGTH;
1801 packet->act_len -= HTC_HDR_LENGTH;
1802
1803fail_rx:
1804 if (status)
Kalle Valo471e92f2011-10-13 15:21:37 +03001805 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1806 "", packet->buf, packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001807
1808 return status;
1809}
1810
Kalle Valo689def92011-09-06 11:10:49 +03001811static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1812 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001813{
Kalle Valoebf29c92011-10-13 15:21:15 +03001814 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001815 "htc rx complete ep %d packet 0x%p\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001816 endpoint->eid, packet);
1817 endpoint->ep_cb.rx(endpoint->target, packet);
1818}
1819
Kalle Valo689def92011-09-06 11:10:49 +03001820static int ath6kl_htc_rx_bundle(struct htc_target *target,
1821 struct list_head *rxq,
1822 struct list_head *sync_compq,
1823 int *n_pkt_fetched, bool part_bundle)
Kalle Valobdcd8172011-07-18 00:22:30 +03001824{
1825 struct hif_scatter_req *scat_req;
1826 struct htc_packet *packet;
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05301827 int rem_space = target->max_rx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +03001828 int n_scat_pkt, status = 0, i, len;
1829
1830 n_scat_pkt = get_queue_depth(rxq);
1831 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1832
1833 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1834 /*
1835 * We were forced to split this bundle receive operation
1836 * all packets in this partial bundle must have their
1837 * lookaheads ignored.
1838 */
1839 part_bundle = true;
1840
1841 /*
1842 * This would only happen if the target ignored our max
1843 * bundle limit.
1844 */
Kalle Valo689def92011-09-06 11:10:49 +03001845 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1846 __func__, get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001847 }
1848
1849 len = 0;
1850
Kalle Valoebf29c92011-10-13 15:21:15 +03001851 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001852 "htc rx bundle depth %d pkts %d\n",
1853 get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001854
1855 scat_req = hif_scatter_req_get(target->dev->ar);
1856
1857 if (scat_req == NULL)
1858 goto fail_rx_pkt;
1859
Kalle Valobdcd8172011-07-18 00:22:30 +03001860 for (i = 0; i < n_scat_pkt; i++) {
1861 int pad_len;
1862
1863 packet = list_first_entry(rxq, struct htc_packet, list);
1864 list_del(&packet->list);
1865
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301866 pad_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001867 packet->act_len);
1868
1869 if ((rem_space - pad_len) < 0) {
1870 list_add(&packet->list, rxq);
1871 break;
1872 }
1873
1874 rem_space -= pad_len;
1875
1876 if (part_bundle || (i < (n_scat_pkt - 1)))
1877 /*
1878 * Packet 0..n-1 cannot be checked for look-aheads
1879 * since we are fetching a bundle the last packet
1880 * however can have it's lookahead used
1881 */
1882 packet->info.rx.rx_flags |=
1883 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1884
1885 /* NOTE: 1 HTC packet per scatter entry */
1886 scat_req->scat_list[i].buf = packet->buf;
1887 scat_req->scat_list[i].len = pad_len;
1888
1889 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1890
1891 list_add_tail(&packet->list, sync_compq);
1892
1893 WARN_ON(!scat_req->scat_list[i].len);
1894 len += scat_req->scat_list[i].len;
1895 }
1896
1897 scat_req->len = len;
1898 scat_req->scat_entries = i;
1899
Kalle Valo8e8ddb22011-10-05 12:23:33 +03001900 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
Kalle Valobdcd8172011-07-18 00:22:30 +03001901
1902 if (!status)
1903 *n_pkt_fetched = i;
1904
1905 /* free scatter request */
1906 hif_scatter_req_add(target->dev->ar, scat_req);
1907
1908fail_rx_pkt:
1909
1910 return status;
1911}
1912
Kalle Valo689def92011-09-06 11:10:49 +03001913static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1914 struct list_head *comp_pktq,
1915 u32 lk_ahds[],
1916 int *n_lk_ahd)
Kalle Valobdcd8172011-07-18 00:22:30 +03001917{
1918 struct htc_packet *packet, *tmp_pkt;
1919 struct htc_endpoint *ep;
1920 int status = 0;
1921
1922 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001923 ep = &target->endpoint[packet->endpoint];
1924
1925 /* process header for each of the recv packet */
Kalle Valo689def92011-09-06 11:10:49 +03001926 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1927 n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001928 if (status)
1929 return status;
1930
Vasanthakumar Thiagarajan4159cc92011-10-03 17:28:07 +05301931 list_del(&packet->list);
1932
Kalle Valobdcd8172011-07-18 00:22:30 +03001933 if (list_empty(comp_pktq)) {
1934 /*
1935 * Last packet's more packet flag is set
1936 * based on the lookahead.
1937 */
1938 if (*n_lk_ahd > 0)
Kalle Valo689def92011-09-06 11:10:49 +03001939 ath6kl_htc_rx_set_indicate(lk_ahds[0],
1940 ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001941 } else
1942 /*
1943 * Packets in a bundle automatically have
1944 * this flag set.
1945 */
1946 packet->info.rx.indicat_flags |=
1947 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1948
Kalle Valo689def92011-09-06 11:10:49 +03001949 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001950
1951 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1952 ep->ep_st.rx_bundl += 1;
1953
Kalle Valo689def92011-09-06 11:10:49 +03001954 ath6kl_htc_rx_complete(ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001955 }
1956
1957 return status;
1958}
1959
Kalle Valo689def92011-09-06 11:10:49 +03001960static int ath6kl_htc_rx_fetch(struct htc_target *target,
1961 struct list_head *rx_pktq,
1962 struct list_head *comp_pktq)
Kalle Valobdcd8172011-07-18 00:22:30 +03001963{
1964 int fetched_pkts;
1965 bool part_bundle = false;
1966 int status = 0;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301967 struct list_head tmp_rxq;
1968 struct htc_packet *packet, *tmp_pkt;
Kalle Valobdcd8172011-07-18 00:22:30 +03001969
1970 /* now go fetch the list of HTC packets */
1971 while (!list_empty(rx_pktq)) {
1972 fetched_pkts = 0;
1973
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301974 INIT_LIST_HEAD(&tmp_rxq);
1975
Kalle Valobdcd8172011-07-18 00:22:30 +03001976 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1977 /*
1978 * There are enough packets to attempt a
1979 * bundle transfer and recv bundling is
1980 * allowed.
1981 */
Kalle Valo689def92011-09-06 11:10:49 +03001982 status = ath6kl_htc_rx_bundle(target, rx_pktq,
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301983 &tmp_rxq,
Kalle Valo689def92011-09-06 11:10:49 +03001984 &fetched_pkts,
1985 part_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +03001986 if (status)
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301987 goto fail_rx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001988
1989 if (!list_empty(rx_pktq))
1990 part_bundle = true;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301991
1992 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001993 }
1994
1995 if (!fetched_pkts) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001996
1997 packet = list_first_entry(rx_pktq, struct htc_packet,
1998 list);
1999
Kalle Valobdcd8172011-07-18 00:22:30 +03002000 /* fully synchronous */
2001 packet->completion = NULL;
2002
Vasanthakumar Thiagarajanb8d5d5f2011-10-03 17:28:25 +05302003 if (!list_is_singular(rx_pktq))
Kalle Valobdcd8172011-07-18 00:22:30 +03002004 /*
2005 * look_aheads in all packet
2006 * except the last one in the
2007 * bundle must be ignored
2008 */
2009 packet->info.rx.rx_flags |=
2010 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2011
2012 /* go fetch the packet */
Kalle Valo689def92011-09-06 11:10:49 +03002013 status = ath6kl_htc_rx_packet(target, packet,
2014 packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03002015
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05302016 list_move_tail(&packet->list, &tmp_rxq);
2017
2018 if (status)
2019 goto fail_rx;
2020
2021 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002022 }
2023 }
2024
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05302025 return 0;
2026
2027fail_rx:
2028
2029 /*
2030 * Cleanup any packets we allocated but didn't use to
2031 * actually fetch any packets.
2032 */
2033
2034 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2035 list_del(&packet->list);
2036 htc_reclaim_rxbuf(target, packet,
2037 &target->endpoint[packet->endpoint]);
2038 }
2039
2040 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2041 list_del(&packet->list);
2042 htc_reclaim_rxbuf(target, packet,
2043 &target->endpoint[packet->endpoint]);
2044 }
2045
Kalle Valobdcd8172011-07-18 00:22:30 +03002046 return status;
2047}
2048
Kalle Vaload226ec2011-08-10 09:49:12 +03002049int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
Vasanthakumar Thiagarajan4533d902011-10-03 17:26:27 +05302050 u32 msg_look_ahead, int *num_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +03002051{
2052 struct htc_packet *packets, *tmp_pkt;
2053 struct htc_endpoint *endpoint;
2054 struct list_head rx_pktq, comp_pktq;
2055 int status = 0;
2056 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2057 int num_look_ahead = 1;
2058 enum htc_endpoint_id id;
2059 int n_fetched = 0;
2060
2061 *num_pkts = 0;
2062
2063 /*
2064 * On first entry copy the look_aheads into our temp array for
2065 * processing
2066 */
Vasanthakumar Thiagarajan4533d902011-10-03 17:26:27 +05302067 look_aheads[0] = msg_look_ahead;
Kalle Valobdcd8172011-07-18 00:22:30 +03002068
2069 while (true) {
2070
2071 /*
2072 * First lookahead sets the expected endpoint IDs for all
2073 * packets in a bundle.
2074 */
2075 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2076 endpoint = &target->endpoint[id];
2077
2078 if (id >= ENDPOINT_MAX) {
2079 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2080 id);
2081 status = -ENOMEM;
2082 break;
2083 }
2084
2085 INIT_LIST_HEAD(&rx_pktq);
2086 INIT_LIST_HEAD(&comp_pktq);
2087
2088 /*
2089 * Try to allocate as many HTC RX packets indicated by the
2090 * look_aheads.
2091 */
Kalle Valo689def92011-09-06 11:10:49 +03002092 status = ath6kl_htc_rx_alloc(target, look_aheads,
2093 num_look_ahead, endpoint,
2094 &rx_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002095 if (status)
2096 break;
2097
2098 if (get_queue_depth(&rx_pktq) >= 2)
2099 /*
2100 * A recv bundle was detected, force IRQ status
2101 * re-check again
2102 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05302103 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03002104
2105 n_fetched += get_queue_depth(&rx_pktq);
2106
2107 num_look_ahead = 0;
2108
Kalle Valo689def92011-09-06 11:10:49 +03002109 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002110
2111 if (!status)
Kalle Valo689def92011-09-06 11:10:49 +03002112 ath6kl_htc_rx_chk_water_mark(endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +03002113
2114 /* Process fetched packets */
Kalle Valo689def92011-09-06 11:10:49 +03002115 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2116 look_aheads,
2117 &num_look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03002118
2119 if (!num_look_ahead || status)
2120 break;
2121
2122 /*
2123 * For SYNCH processing, if we get here, we are running
2124 * through the loop again due to a detected lookahead. Set
2125 * flag that we should re-check IRQ status registers again
2126 * before leaving IRQ processing, this can net better
2127 * performance in high throughput situations.
2128 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05302129 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03002130 }
2131
2132 if (status) {
2133 ath6kl_err("failed to get pending recv messages: %d\n",
2134 status);
Kalle Valobdcd8172011-07-18 00:22:30 +03002135
2136 /* cleanup any packets in sync completion queue */
2137 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2138 list_del(&packets->list);
2139 htc_reclaim_rxbuf(target, packets,
2140 &target->endpoint[packets->endpoint]);
2141 }
2142
2143 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2144 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002145 ath6kl_hif_rx_control(target->dev, false);
Kalle Valobdcd8172011-07-18 00:22:30 +03002146 }
2147 }
2148
2149 /*
2150 * Before leaving, check to see if host ran out of buffers and
2151 * needs to stop the receiver.
2152 */
2153 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2154 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002155 ath6kl_hif_rx_control(target->dev, false);
Kalle Valobdcd8172011-07-18 00:22:30 +03002156 }
2157 *num_pkts = n_fetched;
2158
2159 return status;
2160}
2161
2162/*
2163 * Synchronously wait for a control message from the target,
2164 * This function is used at initialization time ONLY. At init messages
2165 * on ENDPOINT 0 are expected.
2166 */
2167static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2168{
2169 struct htc_packet *packet = NULL;
2170 struct htc_frame_hdr *htc_hdr;
2171 u32 look_ahead;
2172
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002173 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
Kalle Valobdcd8172011-07-18 00:22:30 +03002174 HTC_TARGET_RESPONSE_TIMEOUT))
2175 return NULL;
2176
Kalle Valoebf29c92011-10-13 15:21:15 +03002177 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002178 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03002179
2180 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2181
2182 if (htc_hdr->eid != ENDPOINT_0)
2183 return NULL;
2184
2185 packet = htc_get_control_buf(target, false);
2186
2187 if (!packet)
2188 return NULL;
2189
2190 packet->info.rx.rx_flags = 0;
2191 packet->info.rx.exp_hdr = look_ahead;
2192 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2193
2194 if (packet->act_len > packet->buf_len)
2195 goto fail_ctrl_rx;
2196
2197 /* we want synchronous operation */
2198 packet->completion = NULL;
2199
2200 /* get the message from the device, this will block */
Kalle Valo689def92011-09-06 11:10:49 +03002201 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
Kalle Valobdcd8172011-07-18 00:22:30 +03002202 goto fail_ctrl_rx;
2203
2204 /* process receive header */
Kalle Valo689def92011-09-06 11:10:49 +03002205 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
Kalle Valobdcd8172011-07-18 00:22:30 +03002206
2207 if (packet->status) {
Kalle Valo689def92011-09-06 11:10:49 +03002208 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002209 packet->status);
2210 goto fail_ctrl_rx;
2211 }
2212
2213 return packet;
2214
2215fail_ctrl_rx:
2216 if (packet != NULL) {
2217 htc_rxpkt_reset(packet);
2218 reclaim_rx_ctrl_buf(target, packet);
2219 }
2220
2221 return NULL;
2222}
2223
Kalle Vaload226ec2011-08-10 09:49:12 +03002224int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2225 struct list_head *pkt_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03002226{
2227 struct htc_endpoint *endpoint;
2228 struct htc_packet *first_pkt;
2229 bool rx_unblock = false;
2230 int status = 0, depth;
2231
2232 if (list_empty(pkt_queue))
2233 return -ENOMEM;
2234
2235 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2236
2237 if (first_pkt->endpoint >= ENDPOINT_MAX)
2238 return status;
2239
2240 depth = get_queue_depth(pkt_queue);
2241
Kalle Valoebf29c92011-10-13 15:21:15 +03002242 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002243 "htc rx add multiple ep id %d cnt %d len %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002244 first_pkt->endpoint, depth, first_pkt->buf_len);
2245
2246 endpoint = &target->endpoint[first_pkt->endpoint];
2247
2248 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2249 struct htc_packet *packet, *tmp_pkt;
2250
2251 /* walk through queue and mark each one canceled */
2252 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2253 packet->status = -ECANCELED;
2254 list_del(&packet->list);
Kalle Valo689def92011-09-06 11:10:49 +03002255 ath6kl_htc_rx_complete(endpoint, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03002256 }
2257
2258 return status;
2259 }
2260
2261 spin_lock_bh(&target->rx_lock);
2262
2263 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2264
2265 /* check if we are blocked waiting for a new buffer */
2266 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2267 if (target->ep_waiting == first_pkt->endpoint) {
Kalle Valoebf29c92011-10-13 15:21:15 +03002268 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002269 "htc rx blocked on ep %d, unblocking\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002270 target->ep_waiting);
2271 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2272 target->ep_waiting = ENDPOINT_MAX;
2273 rx_unblock = true;
2274 }
2275 }
2276
2277 spin_unlock_bh(&target->rx_lock);
2278
2279 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2280 /* TODO : implement a buffer threshold count? */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002281 ath6kl_hif_rx_control(target->dev, true);
Kalle Valobdcd8172011-07-18 00:22:30 +03002282
2283 return status;
2284}
2285
Kalle Vaload226ec2011-08-10 09:49:12 +03002286void ath6kl_htc_flush_rx_buf(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002287{
2288 struct htc_endpoint *endpoint;
2289 struct htc_packet *packet, *tmp_pkt;
2290 int i;
2291
2292 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2293 endpoint = &target->endpoint[i];
2294 if (!endpoint->svc_id)
2295 /* not in use.. */
2296 continue;
2297
2298 spin_lock_bh(&target->rx_lock);
2299 list_for_each_entry_safe(packet, tmp_pkt,
2300 &endpoint->rx_bufq, list) {
2301 list_del(&packet->list);
2302 spin_unlock_bh(&target->rx_lock);
Kalle Valoebf29c92011-10-13 15:21:15 +03002303 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002304 "htc rx flush pkt 0x%p len %d ep %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002305 packet, packet->buf_len,
2306 packet->endpoint);
2307 dev_kfree_skb(packet->pkt_cntxt);
2308 spin_lock_bh(&target->rx_lock);
2309 }
2310 spin_unlock_bh(&target->rx_lock);
2311 }
2312}
2313
Kalle Vaload226ec2011-08-10 09:49:12 +03002314int ath6kl_htc_conn_service(struct htc_target *target,
2315 struct htc_service_connect_req *conn_req,
2316 struct htc_service_connect_resp *conn_resp)
Kalle Valobdcd8172011-07-18 00:22:30 +03002317{
2318 struct htc_packet *rx_pkt = NULL;
2319 struct htc_packet *tx_pkt = NULL;
2320 struct htc_conn_service_resp *resp_msg;
2321 struct htc_conn_service_msg *conn_msg;
2322 struct htc_endpoint *endpoint;
2323 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2324 unsigned int max_msg_sz = 0;
2325 int status = 0;
2326
Kalle Valoebf29c92011-10-13 15:21:15 +03002327 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002328 "htc connect service target 0x%p service id 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002329 target, conn_req->svc_id);
2330
2331 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2332 /* special case for pseudo control service */
2333 assigned_ep = ENDPOINT_0;
2334 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2335 } else {
2336 /* allocate a packet to send to the target */
2337 tx_pkt = htc_get_control_buf(target, true);
2338
2339 if (!tx_pkt)
2340 return -ENOMEM;
2341
2342 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2343 memset(conn_msg, 0, sizeof(*conn_msg));
2344 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2345 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2346 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2347
2348 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2349 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2350 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2351
2352 /* we want synchronous operation */
2353 tx_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +03002354 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2355 status = ath6kl_htc_tx_issue(target, tx_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03002356
2357 if (status)
2358 goto fail_tx;
2359
2360 /* wait for response */
2361 rx_pkt = htc_wait_for_ctrl_msg(target);
2362
2363 if (!rx_pkt) {
2364 status = -ENOMEM;
2365 goto fail_tx;
2366 }
2367
2368 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2369
2370 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2371 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2372 status = -ENOMEM;
2373 goto fail_tx;
2374 }
2375
2376 conn_resp->resp_code = resp_msg->status;
2377 /* check response status */
2378 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2379 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2380 resp_msg->svc_id, resp_msg->status);
2381 status = -ENOMEM;
2382 goto fail_tx;
2383 }
2384
2385 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2386 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2387 }
2388
2389 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2390 status = -ENOMEM;
2391 goto fail_tx;
2392 }
2393
2394 endpoint = &target->endpoint[assigned_ep];
2395 endpoint->eid = assigned_ep;
2396 if (endpoint->svc_id) {
2397 status = -ENOMEM;
2398 goto fail_tx;
2399 }
2400
2401 /* return assigned endpoint to caller */
2402 conn_resp->endpoint = assigned_ep;
2403 conn_resp->len_max = max_msg_sz;
2404
2405 /* setup the endpoint */
2406
2407 /* this marks the endpoint in use */
2408 endpoint->svc_id = conn_req->svc_id;
2409
2410 endpoint->max_txq_depth = conn_req->max_txq_depth;
2411 endpoint->len_max = max_msg_sz;
2412 endpoint->ep_cb = conn_req->ep_cb;
2413 endpoint->cred_dist.svc_id = conn_req->svc_id;
Kalle Valoe8c39792011-10-24 12:17:04 +03002414 endpoint->cred_dist.htc_ep = endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +03002415 endpoint->cred_dist.endpoint = assigned_ep;
2416 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2417
2418 if (conn_req->max_rxmsg_sz) {
2419 /*
2420 * Override cred_per_msg calculation, this optimizes
2421 * the credit-low indications since the host will actually
2422 * issue smaller messages in the Send path.
2423 */
2424 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2425 status = -ENOMEM;
2426 goto fail_tx;
2427 }
2428 endpoint->cred_dist.cred_per_msg =
2429 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2430 } else
2431 endpoint->cred_dist.cred_per_msg =
2432 max_msg_sz / target->tgt_cred_sz;
2433
2434 if (!endpoint->cred_dist.cred_per_msg)
2435 endpoint->cred_dist.cred_per_msg = 1;
2436
2437 /* save local connection flags */
2438 endpoint->conn_flags = conn_req->flags;
2439
2440fail_tx:
2441 if (tx_pkt)
2442 htc_reclaim_txctrl_buf(target, tx_pkt);
2443
2444 if (rx_pkt) {
2445 htc_rxpkt_reset(rx_pkt);
2446 reclaim_rx_ctrl_buf(target, rx_pkt);
2447 }
2448
2449 return status;
2450}
2451
2452static void reset_ep_state(struct htc_target *target)
2453{
2454 struct htc_endpoint *endpoint;
2455 int i;
2456
2457 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2458 endpoint = &target->endpoint[i];
2459 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2460 endpoint->svc_id = 0;
2461 endpoint->len_max = 0;
2462 endpoint->max_txq_depth = 0;
2463 memset(&endpoint->ep_st, 0,
2464 sizeof(endpoint->ep_st));
2465 INIT_LIST_HEAD(&endpoint->rx_bufq);
2466 INIT_LIST_HEAD(&endpoint->txq);
2467 endpoint->target = target;
2468 }
2469
2470 /* reset distribution list */
Kalle Valo3c370392011-10-24 12:17:12 +03002471 /* FIXME: free existing entries */
Kalle Valobdcd8172011-07-18 00:22:30 +03002472 INIT_LIST_HEAD(&target->cred_dist_list);
2473}
2474
Kalle Vaload226ec2011-08-10 09:49:12 +03002475int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2476 enum htc_endpoint_id endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03002477{
2478 int num;
2479
2480 spin_lock_bh(&target->rx_lock);
2481 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2482 spin_unlock_bh(&target->rx_lock);
2483 return num;
2484}
2485
2486static void htc_setup_msg_bndl(struct htc_target *target)
2487{
Kalle Valobdcd8172011-07-18 00:22:30 +03002488 /* limit what HTC can handle */
2489 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2490 target->msg_per_bndl_max);
2491
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302492 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002493 target->msg_per_bndl_max = 0;
2494 return;
2495 }
2496
2497 /* limit bundle what the device layer can handle */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302498 target->msg_per_bndl_max = min(target->max_scat_entries,
Kalle Valobdcd8172011-07-18 00:22:30 +03002499 target->msg_per_bndl_max);
2500
Kalle Valoebf29c92011-10-13 15:21:15 +03002501 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002502 "htc bundling allowed msg_per_bndl_max %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002503 target->msg_per_bndl_max);
2504
2505 /* Max rx bundle size is limited by the max tx bundle size */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302506 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
Kalle Valobdcd8172011-07-18 00:22:30 +03002507 /* Max tx bundle size if limited by the extended mbox address range */
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302508 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302509 target->max_xfer_szper_scatreq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002510
Kalle Valo471e92f2011-10-13 15:21:37 +03002511 ath6kl_dbg(ATH6KL_DBG_HTC, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302512 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
Kalle Valobdcd8172011-07-18 00:22:30 +03002513
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302514 if (target->max_tx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002515 target->tx_bndl_enable = true;
2516
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302517 if (target->max_rx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002518 target->rx_bndl_enable = true;
2519
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05302520 if ((target->tgt_cred_sz % target->block_sz) != 0) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002521 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2522 target->tgt_cred_sz);
2523
2524 /*
2525 * Disallow send bundling since the credit size is
2526 * not aligned to a block size the I/O block
2527 * padding will spill into the next credit buffer
2528 * which is fatal.
2529 */
2530 target->tx_bndl_enable = false;
2531 }
2532}
2533
Kalle Vaload226ec2011-08-10 09:49:12 +03002534int ath6kl_htc_wait_target(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002535{
2536 struct htc_packet *packet = NULL;
2537 struct htc_ready_ext_msg *rdy_msg;
2538 struct htc_service_connect_req connect;
2539 struct htc_service_connect_resp resp;
2540 int status;
2541
2542 /* we should be getting 1 control message that the target is ready */
2543 packet = htc_wait_for_ctrl_msg(target);
2544
2545 if (!packet)
2546 return -ENOMEM;
2547
2548 /* we controlled the buffer creation so it's properly aligned */
2549 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2550
2551 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2552 (packet->act_len < sizeof(struct htc_ready_msg))) {
2553 status = -ENOMEM;
2554 goto fail_wait_target;
2555 }
2556
2557 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2558 status = -ENOMEM;
2559 goto fail_wait_target;
2560 }
2561
2562 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2563 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2564
Kalle Valoebf29c92011-10-13 15:21:15 +03002565 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002566 "htc target ready credits %d size %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002567 target->tgt_creds, target->tgt_cred_sz);
2568
2569 /* check if this is an extended ready message */
2570 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2571 /* this is an extended message */
2572 target->htc_tgt_ver = rdy_msg->htc_ver;
2573 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2574 } else {
2575 /* legacy */
2576 target->htc_tgt_ver = HTC_VERSION_2P0;
2577 target->msg_per_bndl_max = 0;
2578 }
2579
Kalle Valo471e92f2011-10-13 15:21:37 +03002580 ath6kl_dbg(ATH6KL_DBG_HTC, "htc using protocol %s (%d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002581 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2582 target->htc_tgt_ver);
2583
2584 if (target->msg_per_bndl_max > 0)
2585 htc_setup_msg_bndl(target);
2586
2587 /* setup our pseudo HTC control endpoint connection */
2588 memset(&connect, 0, sizeof(connect));
2589 memset(&resp, 0, sizeof(resp));
2590 connect.ep_cb.rx = htc_ctrl_rx;
2591 connect.ep_cb.rx_refill = NULL;
2592 connect.ep_cb.tx_full = NULL;
2593 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2594 connect.svc_id = HTC_CTRL_RSVD_SVC;
2595
2596 /* connect fake service */
Kalle Vaload226ec2011-08-10 09:49:12 +03002597 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
Kalle Valobdcd8172011-07-18 00:22:30 +03002598
2599 if (status)
2600 ath6kl_hif_cleanup_scatter(target->dev->ar);
2601
2602fail_wait_target:
2603 if (packet) {
2604 htc_rxpkt_reset(packet);
2605 reclaim_rx_ctrl_buf(target, packet);
2606 }
2607
2608 return status;
2609}
2610
2611/*
2612 * Start HTC, enable interrupts and let the target know
2613 * host has finished setup.
2614 */
Kalle Vaload226ec2011-08-10 09:49:12 +03002615int ath6kl_htc_start(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002616{
2617 struct htc_packet *packet;
2618 int status;
2619
2620 /* Disable interrupts at the chip level */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002621 ath6kl_hif_disable_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002622
2623 target->htc_flags = 0;
2624 target->rx_st_flags = 0;
2625
2626 /* Push control receive buffers into htc control endpoint */
2627 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2628 status = htc_add_rxbuf(target, packet);
2629 if (status)
2630 return status;
2631 }
2632
2633 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
Kalle Valo3c370392011-10-24 12:17:12 +03002634 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
Kalle Valofa99e962011-10-24 12:16:55 +03002635 target->tgt_creds);
Kalle Valobdcd8172011-07-18 00:22:30 +03002636
2637 dump_cred_dist_stats(target);
2638
2639 /* Indicate to the target of the setup completion */
2640 status = htc_setup_tx_complete(target);
2641
2642 if (status)
2643 return status;
2644
2645 /* unmask interrupts */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002646 status = ath6kl_hif_unmask_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002647
2648 if (status)
Kalle Vaload226ec2011-08-10 09:49:12 +03002649 ath6kl_htc_stop(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002650
2651 return status;
2652}
2653
2654/* htc_stop: stop interrupt reception, and flush all queued buffers */
Kalle Vaload226ec2011-08-10 09:49:12 +03002655void ath6kl_htc_stop(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002656{
2657 spin_lock_bh(&target->htc_lock);
2658 target->htc_flags |= HTC_OP_STATE_STOPPING;
2659 spin_unlock_bh(&target->htc_lock);
2660
2661 /*
2662 * Masking interrupts is a synchronous operation, when this
2663 * function returns all pending HIF I/O has completed, we can
2664 * safely flush the queues.
2665 */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002666 ath6kl_hif_mask_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002667
Kalle Vaload226ec2011-08-10 09:49:12 +03002668 ath6kl_htc_flush_txep_all(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002669
Kalle Vaload226ec2011-08-10 09:49:12 +03002670 ath6kl_htc_flush_rx_buf(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002671
2672 reset_ep_state(target);
2673}
2674
Kalle Vaload226ec2011-08-10 09:49:12 +03002675void *ath6kl_htc_create(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +03002676{
2677 struct htc_target *target = NULL;
2678 struct htc_packet *packet;
2679 int status = 0, i = 0;
2680 u32 block_size, ctrl_bufsz;
2681
2682 target = kzalloc(sizeof(*target), GFP_KERNEL);
2683 if (!target) {
2684 ath6kl_err("unable to allocate memory\n");
2685 return NULL;
2686 }
2687
2688 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2689 if (!target->dev) {
2690 ath6kl_err("unable to allocate memory\n");
2691 status = -ENOMEM;
2692 goto fail_create_htc;
2693 }
2694
2695 spin_lock_init(&target->htc_lock);
2696 spin_lock_init(&target->rx_lock);
2697 spin_lock_init(&target->tx_lock);
2698
2699 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2700 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2701 INIT_LIST_HEAD(&target->cred_dist_list);
2702
2703 target->dev->ar = ar;
2704 target->dev->htc_cnxt = target;
Kalle Valobdcd8172011-07-18 00:22:30 +03002705 target->ep_waiting = ENDPOINT_MAX;
2706
2707 reset_ep_state(target);
2708
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002709 status = ath6kl_hif_setup(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002710
2711 if (status)
2712 goto fail_create_htc;
2713
2714 block_size = ar->mbox_info.block_size;
2715
2716 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2717 (block_size + HTC_HDR_LENGTH) :
2718 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2719
2720 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2721 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2722 if (!packet)
2723 break;
2724
2725 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2726 if (!packet->buf_start) {
2727 kfree(packet);
2728 break;
2729 }
2730
2731 packet->buf_len = ctrl_bufsz;
2732 if (i < NUM_CONTROL_RX_BUFFERS) {
2733 packet->act_len = 0;
2734 packet->buf = packet->buf_start;
2735 packet->endpoint = ENDPOINT_0;
2736 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2737 } else
2738 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2739 }
2740
2741fail_create_htc:
2742 if (i != NUM_CONTROL_BUFFERS || status) {
2743 if (target) {
Kalle Vaload226ec2011-08-10 09:49:12 +03002744 ath6kl_htc_cleanup(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002745 target = NULL;
2746 }
2747 }
2748
2749 return target;
2750}
2751
2752/* cleanup the HTC instance */
Kalle Vaload226ec2011-08-10 09:49:12 +03002753void ath6kl_htc_cleanup(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002754{
2755 struct htc_packet *packet, *tmp_packet;
2756
2757 ath6kl_hif_cleanup_scatter(target->dev->ar);
2758
2759 list_for_each_entry_safe(packet, tmp_packet,
2760 &target->free_ctrl_txbuf, list) {
2761 list_del(&packet->list);
2762 kfree(packet->buf_start);
2763 kfree(packet);
2764 }
2765
2766 list_for_each_entry_safe(packet, tmp_packet,
2767 &target->free_ctrl_rxbuf, list) {
2768 list_del(&packet->list);
2769 kfree(packet->buf_start);
2770 kfree(packet);
2771 }
2772
2773 kfree(target->dev);
2774 kfree(target);
2775}