blob: 1385f719ad0778d2ad8b8ab732d9c46f4f511e8e [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
Vasanthakumar Thiagarajan1b2df402012-02-06 20:15:53 +05303 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
Kalle Valobdcd8172011-07-18 00:22:30 +03004 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "core.h"
Kalle Valo2e1cb232011-10-05 12:23:49 +030019#include "hif.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030020#include "debug.h"
21#include "hif-ops.h"
22#include <asm/unaligned.h>
23
24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25
Kalle Valof2f92192011-10-24 12:17:20 +030026/* Functions for Tx credit handling */
Kalle Valocb64a612011-10-24 12:17:28 +030027static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
28 struct htc_endpoint_credit_dist *ep_dist,
29 int credits)
Kalle Valof2f92192011-10-24 12:17:20 +030030{
Kalle Valo02f0d6f2011-10-24 12:17:59 +030031 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
32 ep_dist->endpoint, credits);
33
Kalle Valof2f92192011-10-24 12:17:20 +030034 ep_dist->credits += credits;
35 ep_dist->cred_assngd += credits;
36 cred_info->cur_free_credits -= credits;
37}
38
39static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
40 struct list_head *ep_list,
41 int tot_credits)
42{
43 struct htc_endpoint_credit_dist *cur_ep_dist;
44 int count;
45
Kalle Valo02f0d6f2011-10-24 12:17:59 +030046 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
47
Kalle Valof2f92192011-10-24 12:17:20 +030048 cred_info->cur_free_credits = tot_credits;
49 cred_info->total_avail_credits = tot_credits;
50
51 list_for_each_entry(cur_ep_dist, ep_list, list) {
52 if (cur_ep_dist->endpoint == ENDPOINT_0)
53 continue;
54
55 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
56
57 if (tot_credits > 4) {
58 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
59 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
Kalle Valocb64a612011-10-24 12:17:28 +030060 ath6kl_credit_deposit(cred_info,
61 cur_ep_dist,
62 cur_ep_dist->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +030063 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
64 }
65 }
66
67 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
Kalle Valocb64a612011-10-24 12:17:28 +030068 ath6kl_credit_deposit(cred_info, cur_ep_dist,
69 cur_ep_dist->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +030070 /*
71 * Control service is always marked active, it
72 * never goes inactive EVER.
73 */
74 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
75 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
76 /* this is the lowest priority data endpoint */
77 /* FIXME: this looks fishy, check */
78 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
79
80 /*
81 * Streams have to be created (explicit | implicit) for all
82 * kinds of traffic. BE endpoints are also inactive in the
83 * beginning. When BE traffic starts it creates implicit
84 * streams that redistributes credits.
85 *
86 * Note: all other endpoints have minimums set but are
87 * initially given NO credits. credits will be distributed
88 * as traffic activity demands
89 */
90 }
91
92 WARN_ON(cred_info->cur_free_credits <= 0);
93
94 list_for_each_entry(cur_ep_dist, ep_list, list) {
95 if (cur_ep_dist->endpoint == ENDPOINT_0)
96 continue;
97
98 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
99 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
100 else {
101 /*
102 * For the remaining data endpoints, we assume that
103 * each cred_per_msg are the same. We use a simple
104 * calculation here, we take the remaining credits
105 * and determine how many max messages this can
106 * cover and then set each endpoint's normal value
107 * equal to 3/4 this amount.
108 */
109 count = (cred_info->cur_free_credits /
110 cur_ep_dist->cred_per_msg)
111 * cur_ep_dist->cred_per_msg;
112 count = (count * 3) >> 2;
113 count = max(count, cur_ep_dist->cred_per_msg);
114 cur_ep_dist->cred_norm = count;
115
116 }
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300117
118 ath6kl_dbg(ATH6KL_DBG_CREDIT,
119 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
120 cur_ep_dist->endpoint,
121 cur_ep_dist->svc_id,
122 cur_ep_dist->credits,
123 cur_ep_dist->cred_per_msg,
124 cur_ep_dist->cred_norm,
125 cur_ep_dist->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +0300126 }
127}
128
129/* initialize and setup credit distribution */
Kalle Valocb64a612011-10-24 12:17:28 +0300130int ath6kl_credit_setup(void *htc_handle,
131 struct ath6kl_htc_credit_info *cred_info)
Kalle Valof2f92192011-10-24 12:17:20 +0300132{
133 u16 servicepriority[5];
134
135 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
136
137 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
138 servicepriority[1] = WMI_DATA_VO_SVC;
139 servicepriority[2] = WMI_DATA_VI_SVC;
140 servicepriority[3] = WMI_DATA_BE_SVC;
141 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
142
143 /* set priority list */
144 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
145
146 return 0;
147}
148
149/* reduce an ep's credits back to a set limit */
Kalle Valocb64a612011-10-24 12:17:28 +0300150static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
151 struct htc_endpoint_credit_dist *ep_dist,
152 int limit)
Kalle Valof2f92192011-10-24 12:17:20 +0300153{
154 int credits;
155
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300156 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
157 ep_dist->endpoint, limit);
158
Kalle Valof2f92192011-10-24 12:17:20 +0300159 ep_dist->cred_assngd = limit;
160
161 if (ep_dist->credits <= limit)
162 return;
163
164 credits = ep_dist->credits - limit;
165 ep_dist->credits -= credits;
166 cred_info->cur_free_credits += credits;
167}
168
169static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
170 struct list_head *epdist_list)
171{
172 struct htc_endpoint_credit_dist *cur_dist_list;
173
174 list_for_each_entry(cur_dist_list, epdist_list, list) {
175 if (cur_dist_list->endpoint == ENDPOINT_0)
176 continue;
177
178 if (cur_dist_list->cred_to_dist > 0) {
179 cur_dist_list->credits +=
180 cur_dist_list->cred_to_dist;
181 cur_dist_list->cred_to_dist = 0;
182 if (cur_dist_list->credits >
183 cur_dist_list->cred_assngd)
Kalle Valocb64a612011-10-24 12:17:28 +0300184 ath6kl_credit_reduce(cred_info,
Kalle Valof2f92192011-10-24 12:17:20 +0300185 cur_dist_list,
186 cur_dist_list->cred_assngd);
187
188 if (cur_dist_list->credits >
189 cur_dist_list->cred_norm)
Kalle Valocb64a612011-10-24 12:17:28 +0300190 ath6kl_credit_reduce(cred_info, cur_dist_list,
191 cur_dist_list->cred_norm);
Kalle Valof2f92192011-10-24 12:17:20 +0300192
193 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
194 if (cur_dist_list->txq_depth == 0)
Kalle Valocb64a612011-10-24 12:17:28 +0300195 ath6kl_credit_reduce(cred_info,
196 cur_dist_list, 0);
Kalle Valof2f92192011-10-24 12:17:20 +0300197 }
198 }
199 }
200}
201
202/*
203 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
204 * question.
205 */
Kalle Valocb64a612011-10-24 12:17:28 +0300206static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
Kalle Valof2f92192011-10-24 12:17:20 +0300207 struct htc_endpoint_credit_dist *ep_dist)
208{
209 struct htc_endpoint_credit_dist *curdist_list;
210 int credits = 0;
211 int need;
212
213 if (ep_dist->svc_id == WMI_CONTROL_SVC)
214 goto out;
215
216 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
217 (ep_dist->svc_id == WMI_DATA_VO_SVC))
218 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
219 goto out;
220
221 /*
222 * For all other services, we follow a simple algorithm of:
223 *
224 * 1. checking the free pool for credits
225 * 2. checking lower priority endpoints for credits to take
226 */
227
228 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
229
230 if (credits >= ep_dist->seek_cred)
231 goto out;
232
233 /*
234 * We don't have enough in the free pool, try taking away from
235 * lower priority services The rule for taking away credits:
236 *
237 * 1. Only take from lower priority endpoints
238 * 2. Only take what is allocated above the minimum (never
239 * starve an endpoint completely)
240 * 3. Only take what you need.
241 */
242
243 list_for_each_entry_reverse(curdist_list,
244 &cred_info->lowestpri_ep_dist,
245 list) {
246 if (curdist_list == ep_dist)
247 break;
248
249 need = ep_dist->seek_cred - cred_info->cur_free_credits;
250
251 if ((curdist_list->cred_assngd - need) >=
252 curdist_list->cred_min) {
253 /*
254 * The current one has been allocated more than
255 * it's minimum and it has enough credits assigned
256 * above it's minimum to fulfill our need try to
257 * take away just enough to fulfill our need.
258 */
Kalle Valocb64a612011-10-24 12:17:28 +0300259 ath6kl_credit_reduce(cred_info, curdist_list,
260 curdist_list->cred_assngd - need);
Kalle Valof2f92192011-10-24 12:17:20 +0300261
262 if (cred_info->cur_free_credits >=
263 ep_dist->seek_cred)
264 break;
265 }
266
267 if (curdist_list->endpoint == ENDPOINT_0)
268 break;
269 }
270
271 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
272
273out:
274 /* did we find some credits? */
275 if (credits)
Kalle Valocb64a612011-10-24 12:17:28 +0300276 ath6kl_credit_deposit(cred_info, ep_dist, credits);
Kalle Valof2f92192011-10-24 12:17:20 +0300277
278 ep_dist->seek_cred = 0;
279}
280
281/* redistribute credits based on activity change */
Kalle Valocb64a612011-10-24 12:17:28 +0300282static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
283 struct list_head *ep_dist_list)
Kalle Valof2f92192011-10-24 12:17:20 +0300284{
285 struct htc_endpoint_credit_dist *curdist_list;
286
287 list_for_each_entry(curdist_list, ep_dist_list, list) {
288 if (curdist_list->endpoint == ENDPOINT_0)
289 continue;
290
291 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
292 (curdist_list->svc_id == WMI_DATA_BE_SVC))
293 curdist_list->dist_flags |= HTC_EP_ACTIVE;
294
295 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
296 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
297 if (curdist_list->txq_depth == 0)
Kalle Valocb64a612011-10-24 12:17:28 +0300298 ath6kl_credit_reduce(info, curdist_list, 0);
Kalle Valof2f92192011-10-24 12:17:20 +0300299 else
Kalle Valocb64a612011-10-24 12:17:28 +0300300 ath6kl_credit_reduce(info,
301 curdist_list,
302 curdist_list->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +0300303 }
304 }
305}
306
307/*
308 *
309 * This function is invoked whenever endpoints require credit
310 * distributions. A lock is held while this function is invoked, this
311 * function shall NOT block. The ep_dist_list is a list of distribution
312 * structures in prioritized order as defined by the call to the
313 * htc_set_credit_dist() api.
314 */
315static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
316 struct list_head *ep_dist_list,
317 enum htc_credit_dist_reason reason)
318{
319 switch (reason) {
320 case HTC_CREDIT_DIST_SEND_COMPLETE:
321 ath6kl_credit_update(cred_info, ep_dist_list);
322 break;
323 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
Kalle Valocb64a612011-10-24 12:17:28 +0300324 ath6kl_credit_redistribute(cred_info, ep_dist_list);
Kalle Valof2f92192011-10-24 12:17:20 +0300325 break;
326 default:
327 break;
328 }
329
330 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
331 WARN_ON(cred_info->cur_free_credits < 0);
332}
333
Kalle Valodfa01042011-09-06 11:10:49 +0300334static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530335{
336 u8 *align_addr;
337
338 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
339 align_addr = PTR_ALIGN(*buf - 4, 4);
340 memmove(align_addr, *buf, len);
341 *buf = align_addr;
342 }
343}
344
Kalle Valodfa01042011-09-06 11:10:49 +0300345static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
346 int ctrl0, int ctrl1)
Kalle Valobdcd8172011-07-18 00:22:30 +0300347{
348 struct htc_frame_hdr *hdr;
349
350 packet->buf -= HTC_HDR_LENGTH;
351 hdr = (struct htc_frame_hdr *)packet->buf;
352
353 /* Endianess? */
354 put_unaligned((u16)packet->act_len, &hdr->payld_len);
355 hdr->flags = flags;
356 hdr->eid = packet->endpoint;
357 hdr->ctrl[0] = ctrl0;
358 hdr->ctrl[1] = ctrl1;
359}
360
361static void htc_reclaim_txctrl_buf(struct htc_target *target,
362 struct htc_packet *pkt)
363{
364 spin_lock_bh(&target->htc_lock);
365 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
366 spin_unlock_bh(&target->htc_lock);
367}
368
369static struct htc_packet *htc_get_control_buf(struct htc_target *target,
370 bool tx)
371{
372 struct htc_packet *packet = NULL;
373 struct list_head *buf_list;
374
375 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
376
377 spin_lock_bh(&target->htc_lock);
378
379 if (list_empty(buf_list)) {
380 spin_unlock_bh(&target->htc_lock);
381 return NULL;
382 }
383
384 packet = list_first_entry(buf_list, struct htc_packet, list);
385 list_del(&packet->list);
386 spin_unlock_bh(&target->htc_lock);
387
388 if (tx)
389 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
390
391 return packet;
392}
393
394static void htc_tx_comp_update(struct htc_target *target,
395 struct htc_endpoint *endpoint,
396 struct htc_packet *packet)
397{
398 packet->completion = NULL;
399 packet->buf += HTC_HDR_LENGTH;
400
401 if (!packet->status)
402 return;
403
404 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
405 packet->status, packet->endpoint, packet->act_len,
406 packet->info.tx.cred_used);
407
408 /* on failure to submit, reclaim credits for this packet */
409 spin_lock_bh(&target->tx_lock);
410 endpoint->cred_dist.cred_to_dist +=
411 packet->info.tx.cred_used;
412 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
413
Kalle Valo471e92f2011-10-13 15:21:37 +0300414 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +0300415 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +0300416
Kalle Valo3c370392011-10-24 12:17:12 +0300417 ath6kl_credit_distribute(target->credit_info,
418 &target->cred_dist_list,
419 HTC_CREDIT_DIST_SEND_COMPLETE);
Kalle Valobdcd8172011-07-18 00:22:30 +0300420
421 spin_unlock_bh(&target->tx_lock);
422}
423
424static void htc_tx_complete(struct htc_endpoint *endpoint,
425 struct list_head *txq)
426{
427 if (list_empty(txq))
428 return;
429
Kalle Valoebf29c92011-10-13 15:21:15 +0300430 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300431 "htc tx complete ep %d pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300432 endpoint->eid, get_queue_depth(txq));
433
434 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
435}
436
437static void htc_tx_comp_handler(struct htc_target *target,
438 struct htc_packet *packet)
439{
440 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
441 struct list_head container;
442
Kalle Valo2387f0d2011-10-30 21:16:49 +0200443 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
444 packet->info.tx.seqno);
445
Kalle Valobdcd8172011-07-18 00:22:30 +0300446 htc_tx_comp_update(target, endpoint, packet);
447 INIT_LIST_HEAD(&container);
448 list_add_tail(&packet->list, &container);
449 /* do completion */
450 htc_tx_complete(endpoint, &container);
451}
452
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530453static void htc_async_tx_scat_complete(struct htc_target *target,
454 struct hif_scatter_req *scat_req)
Kalle Valobdcd8172011-07-18 00:22:30 +0300455{
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530456 struct htc_endpoint *endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +0300457 struct htc_packet *packet;
458 struct list_head tx_compq;
459 int i;
460
461 INIT_LIST_HEAD(&tx_compq);
462
Kalle Valoebf29c92011-10-13 15:21:15 +0300463 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300464 "htc tx scat complete len %d entries %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300465 scat_req->len, scat_req->scat_entries);
466
467 if (scat_req->status)
468 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
469
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530470 packet = scat_req->scat_list[0].packet;
471 endpoint = &target->endpoint[packet->endpoint];
472
Kalle Valobdcd8172011-07-18 00:22:30 +0300473 /* walk through the scatter list and process */
474 for (i = 0; i < scat_req->scat_entries; i++) {
475 packet = scat_req->scat_list[i].packet;
476 if (!packet) {
477 WARN_ON(1);
478 return;
479 }
480
481 packet->status = scat_req->status;
482 htc_tx_comp_update(target, endpoint, packet);
483 list_add_tail(&packet->list, &tx_compq);
484 }
485
486 /* free scatter request */
487 hif_scatter_req_add(target->dev->ar, scat_req);
488
489 /* complete all packets */
490 htc_tx_complete(endpoint, &tx_compq);
491}
492
Kalle Valodfa01042011-09-06 11:10:49 +0300493static int ath6kl_htc_tx_issue(struct htc_target *target,
494 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300495{
496 int status;
497 bool sync = false;
498 u32 padded_len, send_len;
499
500 if (!packet->completion)
501 sync = true;
502
503 send_len = packet->act_len + HTC_HDR_LENGTH;
504
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530505 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300506
Kalle Valoebf29c92011-10-13 15:21:15 +0300507 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo2387f0d2011-10-30 21:16:49 +0200508 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
509 send_len, packet->info.tx.seqno, padded_len,
Kalle Valo471e92f2011-10-13 15:21:37 +0300510 target->dev->ar->mbox_info.htc_addr,
511 sync ? "sync" : "async");
Kalle Valobdcd8172011-07-18 00:22:30 +0300512
513 if (sync) {
514 status = hif_read_write_sync(target->dev->ar,
515 target->dev->ar->mbox_info.htc_addr,
516 packet->buf, padded_len,
517 HIF_WR_SYNC_BLOCK_INC);
518
519 packet->status = status;
Kalle Valo65d2bb12011-08-14 18:10:03 -0700520 packet->buf += HTC_HDR_LENGTH;
Kalle Valobdcd8172011-07-18 00:22:30 +0300521 } else
522 status = hif_write_async(target->dev->ar,
523 target->dev->ar->mbox_info.htc_addr,
524 packet->buf, padded_len,
525 HIF_WR_ASYNC_BLOCK_INC, packet);
526
527 return status;
528}
529
530static int htc_check_credits(struct htc_target *target,
531 struct htc_endpoint *ep, u8 *flags,
532 enum htc_endpoint_id eid, unsigned int len,
533 int *req_cred)
534{
535
536 *req_cred = (len > target->tgt_cred_sz) ?
537 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
538
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300539 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300540 *req_cred, ep->cred_dist.credits);
541
542 if (ep->cred_dist.credits < *req_cred) {
543 if (eid == ENDPOINT_0)
544 return -EINVAL;
545
546 /* Seek more credits */
547 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
548
Kalle Valocb64a612011-10-24 12:17:28 +0300549 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300550
551 ep->cred_dist.seek_cred = 0;
552
553 if (ep->cred_dist.credits < *req_cred) {
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300554 ath6kl_dbg(ATH6KL_DBG_CREDIT,
555 "credit not found for ep %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300556 eid);
557 return -EINVAL;
558 }
559 }
560
561 ep->cred_dist.credits -= *req_cred;
562 ep->ep_st.cred_cosumd += *req_cred;
563
564 /* When we are getting low on credits, ask for more */
565 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
566 ep->cred_dist.seek_cred =
567 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
568
Kalle Valocb64a612011-10-24 12:17:28 +0300569 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300570
571 /* see if we were successful in getting more */
572 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
573 /* tell the target we need credits ASAP! */
574 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
575 ep->ep_st.cred_low_indicate += 1;
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300576 ath6kl_dbg(ATH6KL_DBG_CREDIT,
577 "credit we need credits asap\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300578 }
579 }
580
581 return 0;
582}
583
Kalle Valodfa01042011-09-06 11:10:49 +0300584static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
585 struct htc_endpoint *endpoint,
586 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300587{
588 int req_cred;
589 u8 flags;
590 struct htc_packet *packet;
591 unsigned int len;
592
593 while (true) {
594
595 flags = 0;
596
597 if (list_empty(&endpoint->txq))
598 break;
599 packet = list_first_entry(&endpoint->txq, struct htc_packet,
600 list);
601
Kalle Valoebf29c92011-10-13 15:21:15 +0300602 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300603 "htc tx got packet 0x%p queue depth %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300604 packet, get_queue_depth(&endpoint->txq));
605
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530606 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300607 packet->act_len + HTC_HDR_LENGTH);
608
609 if (htc_check_credits(target, endpoint, &flags,
610 packet->endpoint, len, &req_cred))
611 break;
612
613 /* now we can fully move onto caller's queue */
614 packet = list_first_entry(&endpoint->txq, struct htc_packet,
615 list);
616 list_move_tail(&packet->list, queue);
617
618 /* save the number of credits this packet consumed */
619 packet->info.tx.cred_used = req_cred;
620
621 /* all TX packets are handled asynchronously */
622 packet->completion = htc_tx_comp_handler;
623 packet->context = target;
624 endpoint->ep_st.tx_issued += 1;
625
626 /* save send flags */
627 packet->info.tx.flags = flags;
628 packet->info.tx.seqno = endpoint->seqno;
629 endpoint->seqno++;
630 }
631}
632
633/* See if the padded tx length falls on a credit boundary */
634static int htc_get_credit_padding(unsigned int cred_sz, int *len,
635 struct htc_endpoint *ep)
636{
637 int rem_cred, cred_pad;
638
639 rem_cred = *len % cred_sz;
640
641 /* No padding needed */
642 if (!rem_cred)
643 return 0;
644
645 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
646 return -1;
647
648 /*
649 * The transfer consumes a "partial" credit, this
650 * packet cannot be bundled unless we add
651 * additional "dummy" padding (max 255 bytes) to
652 * consume the entire credit.
653 */
654 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
655
656 if ((cred_pad > 0) && (cred_pad <= 255))
657 *len += cred_pad;
658 else
659 /* The amount of padding is too large, send as non-bundled */
660 return -1;
661
662 return cred_pad;
663}
664
Kalle Valodfa01042011-09-06 11:10:49 +0300665static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
666 struct htc_endpoint *endpoint,
667 struct hif_scatter_req *scat_req,
668 int n_scat,
669 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300670{
671 struct htc_packet *packet;
672 int i, len, rem_scat, cred_pad;
673 int status = 0;
674
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +0530675 rem_scat = target->max_tx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +0300676
677 for (i = 0; i < n_scat; i++) {
678 scat_req->scat_list[i].packet = NULL;
679
680 if (list_empty(queue))
681 break;
682
683 packet = list_first_entry(queue, struct htc_packet, list);
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530684 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300685 packet->act_len + HTC_HDR_LENGTH);
686
687 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
688 &len, endpoint);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530689 if (cred_pad < 0 || rem_scat < len) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300690 status = -ENOSPC;
691 break;
692 }
693
694 rem_scat -= len;
695 /* now remove it from the queue */
Kalle Valobdcd8172011-07-18 00:22:30 +0300696 list_del(&packet->list);
697
698 scat_req->scat_list[i].packet = packet;
699 /* prepare packet and flag message as part of a send bundle */
Kalle Valodfa01042011-09-06 11:10:49 +0300700 ath6kl_htc_tx_prep_pkt(packet,
Kalle Valobdcd8172011-07-18 00:22:30 +0300701 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
702 cred_pad, packet->info.tx.seqno);
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530703 /* Make sure the buffer is 4-byte aligned */
Kalle Valodfa01042011-09-06 11:10:49 +0300704 ath6kl_htc_tx_buf_align(&packet->buf,
705 packet->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +0300706 scat_req->scat_list[i].buf = packet->buf;
707 scat_req->scat_list[i].len = len;
708
709 scat_req->len += len;
710 scat_req->scat_entries++;
Kalle Valoebf29c92011-10-13 15:21:15 +0300711 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo2387f0d2011-10-30 21:16:49 +0200712 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
713 i, packet, packet->info.tx.seqno, len, rem_scat);
Kalle Valobdcd8172011-07-18 00:22:30 +0300714 }
715
716 /* Roll back scatter setup in case of any failure */
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530717 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300718 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
719 packet = scat_req->scat_list[i].packet;
720 if (packet) {
721 packet->buf += HTC_HDR_LENGTH;
722 list_add(&packet->list, queue);
723 }
724 }
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530725 return -EAGAIN;
Kalle Valobdcd8172011-07-18 00:22:30 +0300726 }
727
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530728 return status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300729}
730
731/*
Kalle Valodfa01042011-09-06 11:10:49 +0300732 * Drain a queue and send as bundles this function may return without fully
733 * draining the queue when
Kalle Valobdcd8172011-07-18 00:22:30 +0300734 *
735 * 1. scatter resources are exhausted
736 * 2. a message that will consume a partial credit will stop the
737 * bundling process early
738 * 3. we drop below the minimum number of messages for a bundle
739 */
Kalle Valodfa01042011-09-06 11:10:49 +0300740static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
741 struct list_head *queue,
742 int *sent_bundle, int *n_bundle_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +0300743{
744 struct htc_target *target = endpoint->target;
745 struct hif_scatter_req *scat_req = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300746 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530747 int status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300748
Kalle Valobdcd8172011-07-18 00:22:30 +0300749 while (true) {
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530750 status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300751 n_scat = get_queue_depth(queue);
752 n_scat = min(n_scat, target->msg_per_bndl_max);
753
754 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
755 /* not enough to bundle */
756 break;
757
758 scat_req = hif_scatter_req_get(target->dev->ar);
759
760 if (!scat_req) {
761 /* no scatter resources */
Kalle Valoebf29c92011-10-13 15:21:15 +0300762 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300763 "htc tx no more scatter resources\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300764 break;
765 }
766
Kalle Valo471e92f2011-10-13 15:21:37 +0300767 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300768 n_scat);
769
770 scat_req->len = 0;
771 scat_req->scat_entries = 0;
772
Kalle Valodfa01042011-09-06 11:10:49 +0300773 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
774 scat_req, n_scat,
775 queue);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530776 if (status == -EAGAIN) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300777 hif_scatter_req_add(target->dev->ar, scat_req);
778 break;
779 }
780
781 /* send path is always asynchronous */
782 scat_req->complete = htc_async_tx_scat_complete;
Kalle Valobdcd8172011-07-18 00:22:30 +0300783 n_sent_bundle++;
784 tot_pkts_bundle += scat_req->scat_entries;
785
Kalle Valoebf29c92011-10-13 15:21:15 +0300786 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300787 "htc tx scatter bytes %d entries %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300788 scat_req->len, scat_req->scat_entries);
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300789 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530790
791 if (status)
792 break;
Kalle Valobdcd8172011-07-18 00:22:30 +0300793 }
794
795 *sent_bundle = n_sent_bundle;
796 *n_bundle_pkts = tot_pkts_bundle;
Kalle Valo471e92f2011-10-13 15:21:37 +0300797 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
798 n_sent_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +0300799
800 return;
801}
802
Kalle Valodfa01042011-09-06 11:10:49 +0300803static void ath6kl_htc_tx_from_queue(struct htc_target *target,
804 struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +0300805{
806 struct list_head txq;
807 struct htc_packet *packet;
808 int bundle_sent;
809 int n_pkts_bundle;
810
811 spin_lock_bh(&target->tx_lock);
812
813 endpoint->tx_proc_cnt++;
814 if (endpoint->tx_proc_cnt > 1) {
815 endpoint->tx_proc_cnt--;
816 spin_unlock_bh(&target->tx_lock);
Kalle Valo471e92f2011-10-13 15:21:37 +0300817 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300818 return;
819 }
820
821 /*
822 * drain the endpoint TX queue for transmission as long
823 * as we have enough credits.
824 */
825 INIT_LIST_HEAD(&txq);
826
827 while (true) {
828
829 if (list_empty(&endpoint->txq))
830 break;
831
Kalle Valodfa01042011-09-06 11:10:49 +0300832 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300833
834 if (list_empty(&txq))
835 break;
836
837 spin_unlock_bh(&target->tx_lock);
838
839 bundle_sent = 0;
840 n_pkts_bundle = 0;
841
842 while (true) {
843 /* try to send a bundle on each pass */
844 if ((target->tx_bndl_enable) &&
845 (get_queue_depth(&txq) >=
846 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
847 int temp1 = 0, temp2 = 0;
848
Kalle Valodfa01042011-09-06 11:10:49 +0300849 ath6kl_htc_tx_bundle(endpoint, &txq,
850 &temp1, &temp2);
Kalle Valobdcd8172011-07-18 00:22:30 +0300851 bundle_sent += temp1;
852 n_pkts_bundle += temp2;
853 }
854
855 if (list_empty(&txq))
856 break;
857
858 packet = list_first_entry(&txq, struct htc_packet,
859 list);
860 list_del(&packet->list);
861
Kalle Valodfa01042011-09-06 11:10:49 +0300862 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
863 0, packet->info.tx.seqno);
864 ath6kl_htc_tx_issue(target, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +0300865 }
866
867 spin_lock_bh(&target->tx_lock);
868
869 endpoint->ep_st.tx_bundles += bundle_sent;
870 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
871 }
872
873 endpoint->tx_proc_cnt = 0;
874 spin_unlock_bh(&target->tx_lock);
875}
876
Kalle Valodfa01042011-09-06 11:10:49 +0300877static bool ath6kl_htc_tx_try(struct htc_target *target,
878 struct htc_endpoint *endpoint,
879 struct htc_packet *tx_pkt)
Kalle Valobdcd8172011-07-18 00:22:30 +0300880{
881 struct htc_ep_callbacks ep_cb;
882 int txq_depth;
883 bool overflow = false;
884
885 ep_cb = endpoint->ep_cb;
886
887 spin_lock_bh(&target->tx_lock);
888 txq_depth = get_queue_depth(&endpoint->txq);
889 spin_unlock_bh(&target->tx_lock);
890
891 if (txq_depth >= endpoint->max_txq_depth)
892 overflow = true;
893
894 if (overflow)
Kalle Valoebf29c92011-10-13 15:21:15 +0300895 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300896 "htc tx overflow ep %d depth %d max %d\n",
897 endpoint->eid, txq_depth,
Kalle Valobdcd8172011-07-18 00:22:30 +0300898 endpoint->max_txq_depth);
899
900 if (overflow && ep_cb.tx_full) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300901 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
902 HTC_SEND_FULL_DROP) {
903 endpoint->ep_st.tx_dropped += 1;
904 return false;
905 }
906 }
907
908 spin_lock_bh(&target->tx_lock);
909 list_add_tail(&tx_pkt->list, &endpoint->txq);
910 spin_unlock_bh(&target->tx_lock);
911
Kalle Valodfa01042011-09-06 11:10:49 +0300912 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300913
914 return true;
915}
916
917static void htc_chk_ep_txq(struct htc_target *target)
918{
919 struct htc_endpoint *endpoint;
920 struct htc_endpoint_credit_dist *cred_dist;
921
922 /*
923 * Run through the credit distribution list to see if there are
924 * packets queued. NOTE: no locks need to be taken since the
925 * distribution list is not dynamic (cannot be re-ordered) and we
926 * are not modifying any state.
927 */
928 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
Kalle Valoe8c39792011-10-24 12:17:04 +0300929 endpoint = cred_dist->htc_ep;
Kalle Valobdcd8172011-07-18 00:22:30 +0300930
931 spin_lock_bh(&target->tx_lock);
932 if (!list_empty(&endpoint->txq)) {
Kalle Valoebf29c92011-10-13 15:21:15 +0300933 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300934 "htc creds ep %d credits %d pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300935 cred_dist->endpoint,
936 endpoint->cred_dist.credits,
937 get_queue_depth(&endpoint->txq));
938 spin_unlock_bh(&target->tx_lock);
939 /*
940 * Try to start the stalled queue, this list is
941 * ordered by priority. If there are credits
942 * available the highest priority queue will get a
943 * chance to reclaim credits from lower priority
944 * ones.
945 */
Kalle Valodfa01042011-09-06 11:10:49 +0300946 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300947 spin_lock_bh(&target->tx_lock);
948 }
949 spin_unlock_bh(&target->tx_lock);
950 }
951}
952
953static int htc_setup_tx_complete(struct htc_target *target)
954{
955 struct htc_packet *send_pkt = NULL;
956 int status;
957
958 send_pkt = htc_get_control_buf(target, true);
959
960 if (!send_pkt)
961 return -ENOMEM;
962
963 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
964 struct htc_setup_comp_ext_msg *setup_comp_ext;
965 u32 flags = 0;
966
967 setup_comp_ext =
968 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
969 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
970 setup_comp_ext->msg_id =
971 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
972
973 if (target->msg_per_bndl_max > 0) {
974 /* Indicate HTC bundling to the target */
975 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
976 setup_comp_ext->msg_per_rxbndl =
977 target->msg_per_bndl_max;
978 }
979
980 memcpy(&setup_comp_ext->flags, &flags,
981 sizeof(setup_comp_ext->flags));
982 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
983 sizeof(struct htc_setup_comp_ext_msg),
984 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
985
986 } else {
987 struct htc_setup_comp_msg *setup_comp;
988 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
989 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
990 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
991 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
992 sizeof(struct htc_setup_comp_msg),
993 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
994 }
995
996 /* we want synchronous operation */
997 send_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +0300998 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
999 status = ath6kl_htc_tx_issue(target, send_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001000
1001 if (send_pkt != NULL)
1002 htc_reclaim_txctrl_buf(target, send_pkt);
1003
1004 return status;
1005}
1006
Kalle Vaload226ec2011-08-10 09:49:12 +03001007void ath6kl_htc_set_credit_dist(struct htc_target *target,
Kalle Valo3c370392011-10-24 12:17:12 +03001008 struct ath6kl_htc_credit_info *credit_info,
Kalle Vaload226ec2011-08-10 09:49:12 +03001009 u16 srvc_pri_order[], int list_len)
Kalle Valobdcd8172011-07-18 00:22:30 +03001010{
1011 struct htc_endpoint *endpoint;
1012 int i, ep;
1013
Kalle Valo3c370392011-10-24 12:17:12 +03001014 target->credit_info = credit_info;
Kalle Valobdcd8172011-07-18 00:22:30 +03001015
1016 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1017 &target->cred_dist_list);
1018
1019 for (i = 0; i < list_len; i++) {
1020 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1021 endpoint = &target->endpoint[ep];
1022 if (endpoint->svc_id == srvc_pri_order[i]) {
1023 list_add_tail(&endpoint->cred_dist.list,
1024 &target->cred_dist_list);
1025 break;
1026 }
1027 }
1028 if (ep >= ENDPOINT_MAX) {
1029 WARN_ON(1);
1030 return;
1031 }
1032 }
1033}
1034
Kalle Vaload226ec2011-08-10 09:49:12 +03001035int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001036{
1037 struct htc_endpoint *endpoint;
1038 struct list_head queue;
1039
Kalle Valoebf29c92011-10-13 15:21:15 +03001040 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001041 "htc tx ep id %d buf 0x%p len %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001042 packet->endpoint, packet->buf, packet->act_len);
1043
1044 if (packet->endpoint >= ENDPOINT_MAX) {
1045 WARN_ON(1);
1046 return -EINVAL;
1047 }
1048
1049 endpoint = &target->endpoint[packet->endpoint];
1050
Kalle Valodfa01042011-09-06 11:10:49 +03001051 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001052 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1053 -ECANCELED : -ENOSPC;
1054 INIT_LIST_HEAD(&queue);
1055 list_add(&packet->list, &queue);
1056 htc_tx_complete(endpoint, &queue);
1057 }
1058
1059 return 0;
1060}
1061
1062/* flush endpoint TX queue */
Kalle Vaload226ec2011-08-10 09:49:12 +03001063void ath6kl_htc_flush_txep(struct htc_target *target,
1064 enum htc_endpoint_id eid, u16 tag)
Kalle Valobdcd8172011-07-18 00:22:30 +03001065{
1066 struct htc_packet *packet, *tmp_pkt;
1067 struct list_head discard_q, container;
1068 struct htc_endpoint *endpoint = &target->endpoint[eid];
1069
1070 if (!endpoint->svc_id) {
1071 WARN_ON(1);
1072 return;
1073 }
1074
1075 /* initialize the discard queue */
1076 INIT_LIST_HEAD(&discard_q);
1077
1078 spin_lock_bh(&target->tx_lock);
1079
1080 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1081 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1082 (tag == packet->info.tx.tag))
1083 list_move_tail(&packet->list, &discard_q);
1084 }
1085
1086 spin_unlock_bh(&target->tx_lock);
1087
1088 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1089 packet->status = -ECANCELED;
1090 list_del(&packet->list);
Kalle Valoebf29c92011-10-13 15:21:15 +03001091 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001092 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001093 packet, packet->act_len,
1094 packet->endpoint, packet->info.tx.tag);
1095
1096 INIT_LIST_HEAD(&container);
1097 list_add_tail(&packet->list, &container);
1098 htc_tx_complete(endpoint, &container);
1099 }
1100
1101}
1102
Kalle Vaload226ec2011-08-10 09:49:12 +03001103static void ath6kl_htc_flush_txep_all(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03001104{
1105 struct htc_endpoint *endpoint;
1106 int i;
1107
1108 dump_cred_dist_stats(target);
1109
1110 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1111 endpoint = &target->endpoint[i];
1112 if (endpoint->svc_id == 0)
1113 /* not in use.. */
1114 continue;
Kalle Vaload226ec2011-08-10 09:49:12 +03001115 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
Kalle Valobdcd8172011-07-18 00:22:30 +03001116 }
1117}
1118
Kalle Vaload226ec2011-08-10 09:49:12 +03001119void ath6kl_htc_indicate_activity_change(struct htc_target *target,
1120 enum htc_endpoint_id eid, bool active)
Kalle Valobdcd8172011-07-18 00:22:30 +03001121{
1122 struct htc_endpoint *endpoint = &target->endpoint[eid];
1123 bool dist = false;
1124
1125 if (endpoint->svc_id == 0) {
1126 WARN_ON(1);
1127 return;
1128 }
1129
1130 spin_lock_bh(&target->tx_lock);
1131
1132 if (active) {
1133 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1134 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1135 dist = true;
1136 }
1137 } else {
1138 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1139 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1140 dist = true;
1141 }
1142 }
1143
1144 if (dist) {
1145 endpoint->cred_dist.txq_depth =
1146 get_queue_depth(&endpoint->txq);
1147
Kalle Valo471e92f2011-10-13 15:21:37 +03001148 ath6kl_dbg(ATH6KL_DBG_HTC,
1149 "htc tx activity ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +03001150 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +03001151
Kalle Valo3c370392011-10-24 12:17:12 +03001152 ath6kl_credit_distribute(target->credit_info,
Kalle Valofa99e962011-10-24 12:16:55 +03001153 &target->cred_dist_list,
1154 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
Kalle Valobdcd8172011-07-18 00:22:30 +03001155 }
1156
1157 spin_unlock_bh(&target->tx_lock);
1158
1159 if (dist && !active)
1160 htc_chk_ep_txq(target);
1161}
1162
1163/* HTC Rx */
1164
Kalle Valo689def92011-09-06 11:10:49 +03001165static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1166 int n_look_ahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001167{
1168 endpoint->ep_st.rx_pkts++;
1169 if (n_look_ahds == 1)
1170 endpoint->ep_st.rx_lkahds++;
1171 else if (n_look_ahds > 1)
1172 endpoint->ep_st.rx_bundle_lkahd++;
1173}
1174
1175static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1176 enum htc_endpoint_id eid, int len)
1177{
1178 return (eid == target->dev->ar->ctrl_ep) ?
1179 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1180}
1181
1182static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1183{
1184 struct list_head queue;
1185
1186 INIT_LIST_HEAD(&queue);
1187 list_add_tail(&packet->list, &queue);
Kalle Vaload226ec2011-08-10 09:49:12 +03001188 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +03001189}
1190
1191static void htc_reclaim_rxbuf(struct htc_target *target,
1192 struct htc_packet *packet,
1193 struct htc_endpoint *ep)
1194{
1195 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1196 htc_rxpkt_reset(packet);
1197 packet->status = -ECANCELED;
1198 ep->ep_cb.rx(ep->target, packet);
1199 } else {
1200 htc_rxpkt_reset(packet);
1201 htc_add_rxbuf((void *)(target), packet);
1202 }
1203}
1204
1205static void reclaim_rx_ctrl_buf(struct htc_target *target,
1206 struct htc_packet *packet)
1207{
1208 spin_lock_bh(&target->htc_lock);
1209 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1210 spin_unlock_bh(&target->htc_lock);
1211}
1212
Kalle Valo689def92011-09-06 11:10:49 +03001213static int ath6kl_htc_rx_packet(struct htc_target *target,
1214 struct htc_packet *packet,
1215 u32 rx_len)
Kalle Valobdcd8172011-07-18 00:22:30 +03001216{
1217 struct ath6kl_device *dev = target->dev;
1218 u32 padded_len;
1219 int status;
1220
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301221 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001222
1223 if (padded_len > packet->buf_len) {
Kalle Valo471e92f2011-10-13 15:21:37 +03001224 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001225 padded_len, rx_len, packet->buf_len);
1226 return -ENOMEM;
1227 }
1228
Kalle Valoebf29c92011-10-13 15:21:15 +03001229 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001230 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001231 packet, packet->info.rx.exp_hdr,
Kalle Valo471e92f2011-10-13 15:21:37 +03001232 padded_len, dev->ar->mbox_info.htc_addr);
Kalle Valobdcd8172011-07-18 00:22:30 +03001233
1234 status = hif_read_write_sync(dev->ar,
1235 dev->ar->mbox_info.htc_addr,
1236 packet->buf, padded_len,
1237 HIF_RD_SYNC_BLOCK_FIX);
1238
1239 packet->status = status;
1240
1241 return status;
1242}
1243
1244/*
1245 * optimization for recv packets, we can indicate a
1246 * "hint" that there are more single-packets to fetch
1247 * on this endpoint.
1248 */
Kalle Valo689def92011-09-06 11:10:49 +03001249static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1250 struct htc_endpoint *endpoint,
1251 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001252{
1253 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1254
1255 if (htc_hdr->eid == packet->endpoint) {
1256 if (!list_empty(&endpoint->rx_bufq))
1257 packet->info.rx.indicat_flags |=
1258 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1259 }
1260}
1261
Kalle Valo689def92011-09-06 11:10:49 +03001262static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03001263{
1264 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1265
1266 if (ep_cb.rx_refill_thresh > 0) {
1267 spin_lock_bh(&endpoint->target->rx_lock);
1268 if (get_queue_depth(&endpoint->rx_bufq)
1269 < ep_cb.rx_refill_thresh) {
1270 spin_unlock_bh(&endpoint->target->rx_lock);
1271 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1272 return;
1273 }
1274 spin_unlock_bh(&endpoint->target->rx_lock);
1275 }
1276}
1277
1278/* This function is called with rx_lock held */
Kalle Valo689def92011-09-06 11:10:49 +03001279static int ath6kl_htc_rx_setup(struct htc_target *target,
1280 struct htc_endpoint *ep,
1281 u32 *lk_ahds, struct list_head *queue, int n_msg)
Kalle Valobdcd8172011-07-18 00:22:30 +03001282{
1283 struct htc_packet *packet;
1284 /* FIXME: type of lk_ahds can't be right */
1285 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1286 struct htc_ep_callbacks ep_cb;
1287 int status = 0, j, full_len;
1288 bool no_recycle;
1289
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301290 full_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001291 le16_to_cpu(htc_hdr->payld_len) +
1292 sizeof(*htc_hdr));
1293
1294 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1295 ath6kl_warn("Rx buffer requested with invalid length\n");
1296 return -EINVAL;
1297 }
1298
1299 ep_cb = ep->ep_cb;
1300 for (j = 0; j < n_msg; j++) {
1301
1302 /*
1303 * Reset flag, any packets allocated using the
1304 * rx_alloc() API cannot be recycled on
1305 * cleanup,they must be explicitly returned.
1306 */
1307 no_recycle = false;
1308
1309 if (ep_cb.rx_allocthresh &&
1310 (full_len > ep_cb.rx_alloc_thresh)) {
1311 ep->ep_st.rx_alloc_thresh_hit += 1;
1312 ep->ep_st.rxalloc_thresh_byte +=
1313 le16_to_cpu(htc_hdr->payld_len);
1314
1315 spin_unlock_bh(&target->rx_lock);
1316 no_recycle = true;
1317
1318 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1319 full_len);
1320 spin_lock_bh(&target->rx_lock);
1321 } else {
1322 /* refill handler is being used */
1323 if (list_empty(&ep->rx_bufq)) {
1324 if (ep_cb.rx_refill) {
1325 spin_unlock_bh(&target->rx_lock);
1326 ep_cb.rx_refill(ep->target, ep->eid);
1327 spin_lock_bh(&target->rx_lock);
1328 }
1329 }
1330
1331 if (list_empty(&ep->rx_bufq))
1332 packet = NULL;
1333 else {
1334 packet = list_first_entry(&ep->rx_bufq,
1335 struct htc_packet, list);
1336 list_del(&packet->list);
1337 }
1338 }
1339
1340 if (!packet) {
1341 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1342 target->ep_waiting = ep->eid;
1343 return -ENOSPC;
1344 }
1345
1346 /* clear flags */
1347 packet->info.rx.rx_flags = 0;
1348 packet->info.rx.indicat_flags = 0;
1349 packet->status = 0;
1350
1351 if (no_recycle)
1352 /*
1353 * flag that these packets cannot be
1354 * recycled, they have to be returned to
1355 * the user
1356 */
1357 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1358
1359 /* Caller needs to free this upon any failure */
1360 list_add_tail(&packet->list, queue);
1361
1362 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1363 status = -ECANCELED;
1364 break;
1365 }
1366
1367 if (j) {
1368 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1369 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1370 } else
1371 /* set expected look ahead */
1372 packet->info.rx.exp_hdr = *lk_ahds;
1373
1374 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1375 HTC_HDR_LENGTH;
1376 }
1377
1378 return status;
1379}
1380
Kalle Valo689def92011-09-06 11:10:49 +03001381static int ath6kl_htc_rx_alloc(struct htc_target *target,
1382 u32 lk_ahds[], int msg,
1383 struct htc_endpoint *endpoint,
1384 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03001385{
1386 int status = 0;
1387 struct htc_packet *packet, *tmp_pkt;
1388 struct htc_frame_hdr *htc_hdr;
1389 int i, n_msg;
1390
1391 spin_lock_bh(&target->rx_lock);
1392
1393 for (i = 0; i < msg; i++) {
1394
1395 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1396
1397 if (htc_hdr->eid >= ENDPOINT_MAX) {
1398 ath6kl_err("invalid ep in look-ahead: %d\n",
1399 htc_hdr->eid);
1400 status = -ENOMEM;
1401 break;
1402 }
1403
1404 if (htc_hdr->eid != endpoint->eid) {
1405 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1406 htc_hdr->eid, endpoint->eid, i);
1407 status = -ENOMEM;
1408 break;
1409 }
1410
1411 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1412 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1413 htc_hdr->payld_len,
1414 (u32) HTC_MAX_PAYLOAD_LENGTH);
1415 status = -ENOMEM;
1416 break;
1417 }
1418
1419 if (endpoint->svc_id == 0) {
1420 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1421 status = -ENOMEM;
1422 break;
1423 }
1424
1425 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1426 /*
1427 * HTC header indicates that every packet to follow
1428 * has the same padded length so that it can be
1429 * optimally fetched as a full bundle.
1430 */
1431 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1432 HTC_FLG_RX_BNDL_CNT_S;
1433
1434 /* the count doesn't include the starter frame */
1435 n_msg++;
1436 if (n_msg > target->msg_per_bndl_max) {
1437 status = -ENOMEM;
1438 break;
1439 }
1440
1441 endpoint->ep_st.rx_bundle_from_hdr += 1;
Kalle Valoebf29c92011-10-13 15:21:15 +03001442 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001443 "htc rx bundle pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001444 n_msg);
1445 } else
1446 /* HTC header only indicates 1 message to fetch */
1447 n_msg = 1;
1448
1449 /* Setup packet buffers for each message */
Kalle Valo689def92011-09-06 11:10:49 +03001450 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1451 queue, n_msg);
Kalle Valobdcd8172011-07-18 00:22:30 +03001452
1453 /*
1454 * This is due to unavailabilty of buffers to rx entire data.
1455 * Return no error so that free buffers from queue can be used
1456 * to receive partial data.
1457 */
1458 if (status == -ENOSPC) {
1459 spin_unlock_bh(&target->rx_lock);
1460 return 0;
1461 }
1462
1463 if (status)
1464 break;
1465 }
1466
1467 spin_unlock_bh(&target->rx_lock);
1468
1469 if (status) {
1470 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1471 list_del(&packet->list);
1472 htc_reclaim_rxbuf(target, packet,
1473 &target->endpoint[packet->endpoint]);
1474 }
1475 }
1476
1477 return status;
1478}
1479
1480static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1481{
1482 if (packets->endpoint != ENDPOINT_0) {
1483 WARN_ON(1);
1484 return;
1485 }
1486
1487 if (packets->status == -ECANCELED) {
1488 reclaim_rx_ctrl_buf(context, packets);
1489 return;
1490 }
1491
1492 if (packets->act_len > 0) {
1493 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1494 packets->act_len + HTC_HDR_LENGTH);
1495
Kalle Valo471e92f2011-10-13 15:21:37 +03001496 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1497 "htc rx unexpected endpoint 0 message", "",
Kalle Valoef094102011-09-27 14:30:45 +03001498 packets->buf - HTC_HDR_LENGTH,
1499 packets->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +03001500 }
1501
1502 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1503}
1504
1505static void htc_proc_cred_rpt(struct htc_target *target,
1506 struct htc_credit_report *rpt,
1507 int n_entries,
1508 enum htc_endpoint_id from_ep)
1509{
1510 struct htc_endpoint *endpoint;
1511 int tot_credits = 0, i;
1512 bool dist = false;
1513
Kalle Valobdcd8172011-07-18 00:22:30 +03001514 spin_lock_bh(&target->tx_lock);
1515
1516 for (i = 0; i < n_entries; i++, rpt++) {
1517 if (rpt->eid >= ENDPOINT_MAX) {
1518 WARN_ON(1);
1519 spin_unlock_bh(&target->tx_lock);
1520 return;
1521 }
1522
1523 endpoint = &target->endpoint[rpt->eid];
1524
Kalle Valo02f0d6f2011-10-24 12:17:59 +03001525 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1526 "credit report ep %d credits %d\n",
Kalle Valo471e92f2011-10-13 15:21:37 +03001527 rpt->eid, rpt->credits);
Kalle Valobdcd8172011-07-18 00:22:30 +03001528
1529 endpoint->ep_st.tx_cred_rpt += 1;
1530 endpoint->ep_st.cred_retnd += rpt->credits;
1531
1532 if (from_ep == rpt->eid) {
1533 /*
1534 * This credit report arrived on the same endpoint
1535 * indicating it arrived in an RX packet.
1536 */
1537 endpoint->ep_st.cred_from_rx += rpt->credits;
1538 endpoint->ep_st.cred_rpt_from_rx += 1;
1539 } else if (from_ep == ENDPOINT_0) {
1540 /* credit arrived on endpoint 0 as a NULL message */
1541 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1542 endpoint->ep_st.cred_rpt_ep0 += 1;
1543 } else {
1544 endpoint->ep_st.cred_from_other += rpt->credits;
1545 endpoint->ep_st.cred_rpt_from_other += 1;
1546 }
1547
Raja Mani5ba3ee42011-07-19 19:27:31 +05301548 if (rpt->eid == ENDPOINT_0)
Kalle Valobdcd8172011-07-18 00:22:30 +03001549 /* always give endpoint 0 credits back */
1550 endpoint->cred_dist.credits += rpt->credits;
1551 else {
1552 endpoint->cred_dist.cred_to_dist += rpt->credits;
1553 dist = true;
1554 }
1555
1556 /*
1557 * Refresh tx depth for distribution function that will
1558 * recover these credits NOTE: this is only valid when
1559 * there are credits to recover!
1560 */
1561 endpoint->cred_dist.txq_depth =
1562 get_queue_depth(&endpoint->txq);
1563
1564 tot_credits += rpt->credits;
1565 }
1566
Kalle Valobdcd8172011-07-18 00:22:30 +03001567 if (dist) {
1568 /*
1569 * This was a credit return based on a completed send
1570 * operations note, this is done with the lock held
1571 */
Kalle Valo3c370392011-10-24 12:17:12 +03001572 ath6kl_credit_distribute(target->credit_info,
Kalle Valofa99e962011-10-24 12:16:55 +03001573 &target->cred_dist_list,
1574 HTC_CREDIT_DIST_SEND_COMPLETE);
Kalle Valobdcd8172011-07-18 00:22:30 +03001575 }
1576
1577 spin_unlock_bh(&target->tx_lock);
1578
1579 if (tot_credits)
1580 htc_chk_ep_txq(target);
1581}
1582
1583static int htc_parse_trailer(struct htc_target *target,
1584 struct htc_record_hdr *record,
1585 u8 *record_buf, u32 *next_lk_ahds,
1586 enum htc_endpoint_id endpoint,
1587 int *n_lk_ahds)
1588{
1589 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1590 struct htc_lookahead_report *lk_ahd;
1591 int len;
1592
1593 switch (record->rec_id) {
1594 case HTC_RECORD_CREDITS:
1595 len = record->len / sizeof(struct htc_credit_report);
1596 if (!len) {
1597 WARN_ON(1);
1598 return -EINVAL;
1599 }
1600
1601 htc_proc_cred_rpt(target,
1602 (struct htc_credit_report *) record_buf,
1603 len, endpoint);
1604 break;
1605 case HTC_RECORD_LOOKAHEAD:
1606 len = record->len / sizeof(*lk_ahd);
1607 if (!len) {
1608 WARN_ON(1);
1609 return -EINVAL;
1610 }
1611
1612 lk_ahd = (struct htc_lookahead_report *) record_buf;
1613 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1614 && next_lk_ahds) {
1615
Kalle Valoebf29c92011-10-13 15:21:15 +03001616 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001617 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001618 lk_ahd->pre_valid, lk_ahd->post_valid);
1619
1620 /* look ahead bytes are valid, copy them over */
1621 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1622
Kalle Valo471e92f2011-10-13 15:21:37 +03001623 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1624 "htc rx next look ahead",
Kalle Valoef094102011-09-27 14:30:45 +03001625 "", next_lk_ahds, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001626
1627 *n_lk_ahds = 1;
1628 }
1629 break;
1630 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1631 len = record->len / sizeof(*bundle_lkahd_rpt);
1632 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1633 WARN_ON(1);
1634 return -EINVAL;
1635 }
1636
1637 if (next_lk_ahds) {
1638 int i;
1639
1640 bundle_lkahd_rpt =
1641 (struct htc_bundle_lkahd_rpt *) record_buf;
1642
Kalle Valo471e92f2011-10-13 15:21:37 +03001643 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001644 "", record_buf, record->len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001645
1646 for (i = 0; i < len; i++) {
1647 memcpy((u8 *)&next_lk_ahds[i],
1648 bundle_lkahd_rpt->lk_ahd, 4);
1649 bundle_lkahd_rpt++;
1650 }
1651
1652 *n_lk_ahds = i;
1653 }
1654 break;
1655 default:
1656 ath6kl_err("unhandled record: id:%d len:%d\n",
1657 record->rec_id, record->len);
1658 break;
1659 }
1660
1661 return 0;
1662
1663}
1664
1665static int htc_proc_trailer(struct htc_target *target,
1666 u8 *buf, int len, u32 *next_lk_ahds,
1667 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1668{
1669 struct htc_record_hdr *record;
1670 int orig_len;
1671 int status;
1672 u8 *record_buf;
1673 u8 *orig_buf;
1674
Kalle Valo471e92f2011-10-13 15:21:37 +03001675 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1676 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001677
1678 orig_buf = buf;
1679 orig_len = len;
1680 status = 0;
1681
1682 while (len > 0) {
1683
1684 if (len < sizeof(struct htc_record_hdr)) {
1685 status = -ENOMEM;
1686 break;
1687 }
1688 /* these are byte aligned structs */
1689 record = (struct htc_record_hdr *) buf;
1690 len -= sizeof(struct htc_record_hdr);
1691 buf += sizeof(struct htc_record_hdr);
1692
1693 if (record->len > len) {
1694 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1695 record->len, record->rec_id, len);
1696 status = -ENOMEM;
1697 break;
1698 }
1699 record_buf = buf;
1700
1701 status = htc_parse_trailer(target, record, record_buf,
1702 next_lk_ahds, endpoint, n_lk_ahds);
1703
1704 if (status)
1705 break;
1706
1707 /* advance buffer past this record for next time around */
1708 buf += record->len;
1709 len -= record->len;
1710 }
1711
Raja Mani2588f552011-07-19 19:27:30 +05301712 if (status)
Kalle Valo471e92f2011-10-13 15:21:37 +03001713 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
Kalle Valoef094102011-09-27 14:30:45 +03001714 "", orig_buf, orig_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001715
1716 return status;
1717}
1718
Kalle Valo689def92011-09-06 11:10:49 +03001719static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1720 struct htc_packet *packet,
1721 u32 *next_lkahds, int *n_lkahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001722{
1723 int status = 0;
1724 u16 payload_len;
1725 u32 lk_ahd;
1726 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1727
1728 if (n_lkahds != NULL)
1729 *n_lkahds = 0;
1730
Kalle Valobdcd8172011-07-18 00:22:30 +03001731 /*
1732 * NOTE: we cannot assume the alignment of buf, so we use the safe
1733 * macros to retrieve 16 bit fields.
1734 */
1735 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1736
1737 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1738
1739 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1740 /*
1741 * Refresh the expected header and the actual length as it
1742 * was unknown when this packet was grabbed as part of the
1743 * bundle.
1744 */
1745 packet->info.rx.exp_hdr = lk_ahd;
1746 packet->act_len = payload_len + HTC_HDR_LENGTH;
1747
1748 /* validate the actual header that was refreshed */
1749 if (packet->act_len > packet->buf_len) {
1750 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1751 payload_len, lk_ahd);
1752 /*
1753 * Limit this to max buffer just to print out some
1754 * of the buffer.
1755 */
1756 packet->act_len = min(packet->act_len, packet->buf_len);
1757 status = -ENOMEM;
1758 goto fail_rx;
1759 }
1760
1761 if (packet->endpoint != htc_hdr->eid) {
1762 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1763 htc_hdr->eid, packet->endpoint);
1764 status = -ENOMEM;
1765 goto fail_rx;
1766 }
1767 }
1768
1769 if (lk_ahd != packet->info.rx.exp_hdr) {
Kalle Valo689def92011-09-06 11:10:49 +03001770 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1771 __func__, packet, packet->info.rx.rx_flags);
Kalle Valo471e92f2011-10-13 15:21:37 +03001772 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001773 "", &packet->info.rx.exp_hdr, 4);
Kalle Valo471e92f2011-10-13 15:21:37 +03001774 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
Kalle Valoef094102011-09-27 14:30:45 +03001775 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
Kalle Valobdcd8172011-07-18 00:22:30 +03001776 status = -ENOMEM;
1777 goto fail_rx;
1778 }
1779
1780 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1781 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1782 htc_hdr->ctrl[0] > payload_len) {
Kalle Valo689def92011-09-06 11:10:49 +03001783 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1784 __func__, payload_len, htc_hdr->ctrl[0]);
Kalle Valobdcd8172011-07-18 00:22:30 +03001785 status = -ENOMEM;
1786 goto fail_rx;
1787 }
1788
1789 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1790 next_lkahds = NULL;
1791 n_lkahds = NULL;
1792 }
1793
1794 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1795 + payload_len - htc_hdr->ctrl[0],
1796 htc_hdr->ctrl[0], next_lkahds,
1797 n_lkahds, packet->endpoint);
1798
1799 if (status)
1800 goto fail_rx;
1801
1802 packet->act_len -= htc_hdr->ctrl[0];
1803 }
1804
1805 packet->buf += HTC_HDR_LENGTH;
1806 packet->act_len -= HTC_HDR_LENGTH;
1807
1808fail_rx:
1809 if (status)
Kalle Valo471e92f2011-10-13 15:21:37 +03001810 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1811 "", packet->buf, packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001812
1813 return status;
1814}
1815
Kalle Valo689def92011-09-06 11:10:49 +03001816static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1817 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001818{
Kalle Valoebf29c92011-10-13 15:21:15 +03001819 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001820 "htc rx complete ep %d packet 0x%p\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001821 endpoint->eid, packet);
1822 endpoint->ep_cb.rx(endpoint->target, packet);
1823}
1824
Kalle Valo689def92011-09-06 11:10:49 +03001825static int ath6kl_htc_rx_bundle(struct htc_target *target,
1826 struct list_head *rxq,
1827 struct list_head *sync_compq,
1828 int *n_pkt_fetched, bool part_bundle)
Kalle Valobdcd8172011-07-18 00:22:30 +03001829{
1830 struct hif_scatter_req *scat_req;
1831 struct htc_packet *packet;
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05301832 int rem_space = target->max_rx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +03001833 int n_scat_pkt, status = 0, i, len;
1834
1835 n_scat_pkt = get_queue_depth(rxq);
1836 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1837
1838 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1839 /*
1840 * We were forced to split this bundle receive operation
1841 * all packets in this partial bundle must have their
1842 * lookaheads ignored.
1843 */
1844 part_bundle = true;
1845
1846 /*
1847 * This would only happen if the target ignored our max
1848 * bundle limit.
1849 */
Kalle Valo689def92011-09-06 11:10:49 +03001850 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1851 __func__, get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001852 }
1853
1854 len = 0;
1855
Kalle Valoebf29c92011-10-13 15:21:15 +03001856 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001857 "htc rx bundle depth %d pkts %d\n",
1858 get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001859
1860 scat_req = hif_scatter_req_get(target->dev->ar);
1861
1862 if (scat_req == NULL)
1863 goto fail_rx_pkt;
1864
Kalle Valobdcd8172011-07-18 00:22:30 +03001865 for (i = 0; i < n_scat_pkt; i++) {
1866 int pad_len;
1867
1868 packet = list_first_entry(rxq, struct htc_packet, list);
1869 list_del(&packet->list);
1870
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301871 pad_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001872 packet->act_len);
1873
1874 if ((rem_space - pad_len) < 0) {
1875 list_add(&packet->list, rxq);
1876 break;
1877 }
1878
1879 rem_space -= pad_len;
1880
1881 if (part_bundle || (i < (n_scat_pkt - 1)))
1882 /*
1883 * Packet 0..n-1 cannot be checked for look-aheads
1884 * since we are fetching a bundle the last packet
1885 * however can have it's lookahead used
1886 */
1887 packet->info.rx.rx_flags |=
1888 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1889
1890 /* NOTE: 1 HTC packet per scatter entry */
1891 scat_req->scat_list[i].buf = packet->buf;
1892 scat_req->scat_list[i].len = pad_len;
1893
1894 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1895
1896 list_add_tail(&packet->list, sync_compq);
1897
1898 WARN_ON(!scat_req->scat_list[i].len);
1899 len += scat_req->scat_list[i].len;
1900 }
1901
1902 scat_req->len = len;
1903 scat_req->scat_entries = i;
1904
Kalle Valo8e8ddb22011-10-05 12:23:33 +03001905 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
Kalle Valobdcd8172011-07-18 00:22:30 +03001906
1907 if (!status)
1908 *n_pkt_fetched = i;
1909
1910 /* free scatter request */
1911 hif_scatter_req_add(target->dev->ar, scat_req);
1912
1913fail_rx_pkt:
1914
1915 return status;
1916}
1917
Kalle Valo689def92011-09-06 11:10:49 +03001918static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1919 struct list_head *comp_pktq,
1920 u32 lk_ahds[],
1921 int *n_lk_ahd)
Kalle Valobdcd8172011-07-18 00:22:30 +03001922{
1923 struct htc_packet *packet, *tmp_pkt;
1924 struct htc_endpoint *ep;
1925 int status = 0;
1926
1927 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001928 ep = &target->endpoint[packet->endpoint];
1929
1930 /* process header for each of the recv packet */
Kalle Valo689def92011-09-06 11:10:49 +03001931 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1932 n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001933 if (status)
1934 return status;
1935
Vasanthakumar Thiagarajan4159cc92011-10-03 17:28:07 +05301936 list_del(&packet->list);
1937
Kalle Valobdcd8172011-07-18 00:22:30 +03001938 if (list_empty(comp_pktq)) {
1939 /*
1940 * Last packet's more packet flag is set
1941 * based on the lookahead.
1942 */
1943 if (*n_lk_ahd > 0)
Kalle Valo689def92011-09-06 11:10:49 +03001944 ath6kl_htc_rx_set_indicate(lk_ahds[0],
1945 ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001946 } else
1947 /*
1948 * Packets in a bundle automatically have
1949 * this flag set.
1950 */
1951 packet->info.rx.indicat_flags |=
1952 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1953
Kalle Valo689def92011-09-06 11:10:49 +03001954 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001955
1956 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1957 ep->ep_st.rx_bundl += 1;
1958
Kalle Valo689def92011-09-06 11:10:49 +03001959 ath6kl_htc_rx_complete(ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001960 }
1961
1962 return status;
1963}
1964
Kalle Valo689def92011-09-06 11:10:49 +03001965static int ath6kl_htc_rx_fetch(struct htc_target *target,
1966 struct list_head *rx_pktq,
1967 struct list_head *comp_pktq)
Kalle Valobdcd8172011-07-18 00:22:30 +03001968{
1969 int fetched_pkts;
1970 bool part_bundle = false;
1971 int status = 0;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301972 struct list_head tmp_rxq;
1973 struct htc_packet *packet, *tmp_pkt;
Kalle Valobdcd8172011-07-18 00:22:30 +03001974
1975 /* now go fetch the list of HTC packets */
1976 while (!list_empty(rx_pktq)) {
1977 fetched_pkts = 0;
1978
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301979 INIT_LIST_HEAD(&tmp_rxq);
1980
Kalle Valobdcd8172011-07-18 00:22:30 +03001981 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1982 /*
1983 * There are enough packets to attempt a
1984 * bundle transfer and recv bundling is
1985 * allowed.
1986 */
Kalle Valo689def92011-09-06 11:10:49 +03001987 status = ath6kl_htc_rx_bundle(target, rx_pktq,
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301988 &tmp_rxq,
Kalle Valo689def92011-09-06 11:10:49 +03001989 &fetched_pkts,
1990 part_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +03001991 if (status)
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301992 goto fail_rx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001993
1994 if (!list_empty(rx_pktq))
1995 part_bundle = true;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301996
1997 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001998 }
1999
2000 if (!fetched_pkts) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002001
2002 packet = list_first_entry(rx_pktq, struct htc_packet,
2003 list);
2004
Kalle Valobdcd8172011-07-18 00:22:30 +03002005 /* fully synchronous */
2006 packet->completion = NULL;
2007
Vasanthakumar Thiagarajanb8d5d5f2011-10-03 17:28:25 +05302008 if (!list_is_singular(rx_pktq))
Kalle Valobdcd8172011-07-18 00:22:30 +03002009 /*
2010 * look_aheads in all packet
2011 * except the last one in the
2012 * bundle must be ignored
2013 */
2014 packet->info.rx.rx_flags |=
2015 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2016
2017 /* go fetch the packet */
Kalle Valo689def92011-09-06 11:10:49 +03002018 status = ath6kl_htc_rx_packet(target, packet,
2019 packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03002020
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05302021 list_move_tail(&packet->list, &tmp_rxq);
2022
2023 if (status)
2024 goto fail_rx;
2025
2026 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002027 }
2028 }
2029
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05302030 return 0;
2031
2032fail_rx:
2033
2034 /*
2035 * Cleanup any packets we allocated but didn't use to
2036 * actually fetch any packets.
2037 */
2038
2039 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2040 list_del(&packet->list);
2041 htc_reclaim_rxbuf(target, packet,
2042 &target->endpoint[packet->endpoint]);
2043 }
2044
2045 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2046 list_del(&packet->list);
2047 htc_reclaim_rxbuf(target, packet,
2048 &target->endpoint[packet->endpoint]);
2049 }
2050
Kalle Valobdcd8172011-07-18 00:22:30 +03002051 return status;
2052}
2053
Kalle Vaload226ec2011-08-10 09:49:12 +03002054int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
Vasanthakumar Thiagarajan4533d902011-10-03 17:26:27 +05302055 u32 msg_look_ahead, int *num_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +03002056{
2057 struct htc_packet *packets, *tmp_pkt;
2058 struct htc_endpoint *endpoint;
2059 struct list_head rx_pktq, comp_pktq;
2060 int status = 0;
2061 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2062 int num_look_ahead = 1;
2063 enum htc_endpoint_id id;
2064 int n_fetched = 0;
2065
Sujith Manoharancbec2672012-01-10 09:53:53 +05302066 INIT_LIST_HEAD(&comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002067 *num_pkts = 0;
2068
2069 /*
2070 * On first entry copy the look_aheads into our temp array for
2071 * processing
2072 */
Vasanthakumar Thiagarajan4533d902011-10-03 17:26:27 +05302073 look_aheads[0] = msg_look_ahead;
Kalle Valobdcd8172011-07-18 00:22:30 +03002074
2075 while (true) {
2076
2077 /*
2078 * First lookahead sets the expected endpoint IDs for all
2079 * packets in a bundle.
2080 */
2081 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2082 endpoint = &target->endpoint[id];
2083
2084 if (id >= ENDPOINT_MAX) {
2085 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2086 id);
2087 status = -ENOMEM;
2088 break;
2089 }
2090
2091 INIT_LIST_HEAD(&rx_pktq);
2092 INIT_LIST_HEAD(&comp_pktq);
2093
2094 /*
2095 * Try to allocate as many HTC RX packets indicated by the
2096 * look_aheads.
2097 */
Kalle Valo689def92011-09-06 11:10:49 +03002098 status = ath6kl_htc_rx_alloc(target, look_aheads,
2099 num_look_ahead, endpoint,
2100 &rx_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002101 if (status)
2102 break;
2103
2104 if (get_queue_depth(&rx_pktq) >= 2)
2105 /*
2106 * A recv bundle was detected, force IRQ status
2107 * re-check again
2108 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05302109 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03002110
2111 n_fetched += get_queue_depth(&rx_pktq);
2112
2113 num_look_ahead = 0;
2114
Kalle Valo689def92011-09-06 11:10:49 +03002115 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002116
2117 if (!status)
Kalle Valo689def92011-09-06 11:10:49 +03002118 ath6kl_htc_rx_chk_water_mark(endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +03002119
2120 /* Process fetched packets */
Kalle Valo689def92011-09-06 11:10:49 +03002121 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2122 look_aheads,
2123 &num_look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03002124
2125 if (!num_look_ahead || status)
2126 break;
2127
2128 /*
2129 * For SYNCH processing, if we get here, we are running
2130 * through the loop again due to a detected lookahead. Set
2131 * flag that we should re-check IRQ status registers again
2132 * before leaving IRQ processing, this can net better
2133 * performance in high throughput situations.
2134 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05302135 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03002136 }
2137
2138 if (status) {
2139 ath6kl_err("failed to get pending recv messages: %d\n",
2140 status);
Kalle Valobdcd8172011-07-18 00:22:30 +03002141
2142 /* cleanup any packets in sync completion queue */
2143 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2144 list_del(&packets->list);
2145 htc_reclaim_rxbuf(target, packets,
2146 &target->endpoint[packets->endpoint]);
2147 }
2148
2149 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2150 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002151 ath6kl_hif_rx_control(target->dev, false);
Kalle Valobdcd8172011-07-18 00:22:30 +03002152 }
2153 }
2154
2155 /*
2156 * Before leaving, check to see if host ran out of buffers and
2157 * needs to stop the receiver.
2158 */
2159 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2160 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002161 ath6kl_hif_rx_control(target->dev, false);
Kalle Valobdcd8172011-07-18 00:22:30 +03002162 }
2163 *num_pkts = n_fetched;
2164
2165 return status;
2166}
2167
2168/*
2169 * Synchronously wait for a control message from the target,
2170 * This function is used at initialization time ONLY. At init messages
2171 * on ENDPOINT 0 are expected.
2172 */
2173static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2174{
2175 struct htc_packet *packet = NULL;
2176 struct htc_frame_hdr *htc_hdr;
2177 u32 look_ahead;
2178
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002179 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
Kalle Valobdcd8172011-07-18 00:22:30 +03002180 HTC_TARGET_RESPONSE_TIMEOUT))
2181 return NULL;
2182
Kalle Valoebf29c92011-10-13 15:21:15 +03002183 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002184 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03002185
2186 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2187
2188 if (htc_hdr->eid != ENDPOINT_0)
2189 return NULL;
2190
2191 packet = htc_get_control_buf(target, false);
2192
2193 if (!packet)
2194 return NULL;
2195
2196 packet->info.rx.rx_flags = 0;
2197 packet->info.rx.exp_hdr = look_ahead;
2198 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2199
2200 if (packet->act_len > packet->buf_len)
2201 goto fail_ctrl_rx;
2202
2203 /* we want synchronous operation */
2204 packet->completion = NULL;
2205
2206 /* get the message from the device, this will block */
Kalle Valo689def92011-09-06 11:10:49 +03002207 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
Kalle Valobdcd8172011-07-18 00:22:30 +03002208 goto fail_ctrl_rx;
2209
2210 /* process receive header */
Kalle Valo689def92011-09-06 11:10:49 +03002211 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
Kalle Valobdcd8172011-07-18 00:22:30 +03002212
2213 if (packet->status) {
Kalle Valo689def92011-09-06 11:10:49 +03002214 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002215 packet->status);
2216 goto fail_ctrl_rx;
2217 }
2218
2219 return packet;
2220
2221fail_ctrl_rx:
2222 if (packet != NULL) {
2223 htc_rxpkt_reset(packet);
2224 reclaim_rx_ctrl_buf(target, packet);
2225 }
2226
2227 return NULL;
2228}
2229
Kalle Vaload226ec2011-08-10 09:49:12 +03002230int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2231 struct list_head *pkt_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03002232{
2233 struct htc_endpoint *endpoint;
2234 struct htc_packet *first_pkt;
2235 bool rx_unblock = false;
2236 int status = 0, depth;
2237
2238 if (list_empty(pkt_queue))
2239 return -ENOMEM;
2240
2241 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2242
2243 if (first_pkt->endpoint >= ENDPOINT_MAX)
2244 return status;
2245
2246 depth = get_queue_depth(pkt_queue);
2247
Kalle Valoebf29c92011-10-13 15:21:15 +03002248 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002249 "htc rx add multiple ep id %d cnt %d len %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002250 first_pkt->endpoint, depth, first_pkt->buf_len);
2251
2252 endpoint = &target->endpoint[first_pkt->endpoint];
2253
2254 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2255 struct htc_packet *packet, *tmp_pkt;
2256
2257 /* walk through queue and mark each one canceled */
2258 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2259 packet->status = -ECANCELED;
2260 list_del(&packet->list);
Kalle Valo689def92011-09-06 11:10:49 +03002261 ath6kl_htc_rx_complete(endpoint, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03002262 }
2263
2264 return status;
2265 }
2266
2267 spin_lock_bh(&target->rx_lock);
2268
2269 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2270
2271 /* check if we are blocked waiting for a new buffer */
2272 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2273 if (target->ep_waiting == first_pkt->endpoint) {
Kalle Valoebf29c92011-10-13 15:21:15 +03002274 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002275 "htc rx blocked on ep %d, unblocking\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002276 target->ep_waiting);
2277 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2278 target->ep_waiting = ENDPOINT_MAX;
2279 rx_unblock = true;
2280 }
2281 }
2282
2283 spin_unlock_bh(&target->rx_lock);
2284
2285 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2286 /* TODO : implement a buffer threshold count? */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002287 ath6kl_hif_rx_control(target->dev, true);
Kalle Valobdcd8172011-07-18 00:22:30 +03002288
2289 return status;
2290}
2291
Kalle Vaload226ec2011-08-10 09:49:12 +03002292void ath6kl_htc_flush_rx_buf(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002293{
2294 struct htc_endpoint *endpoint;
2295 struct htc_packet *packet, *tmp_pkt;
2296 int i;
2297
2298 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2299 endpoint = &target->endpoint[i];
2300 if (!endpoint->svc_id)
2301 /* not in use.. */
2302 continue;
2303
2304 spin_lock_bh(&target->rx_lock);
2305 list_for_each_entry_safe(packet, tmp_pkt,
2306 &endpoint->rx_bufq, list) {
2307 list_del(&packet->list);
2308 spin_unlock_bh(&target->rx_lock);
Kalle Valoebf29c92011-10-13 15:21:15 +03002309 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002310 "htc rx flush pkt 0x%p len %d ep %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002311 packet, packet->buf_len,
2312 packet->endpoint);
2313 dev_kfree_skb(packet->pkt_cntxt);
2314 spin_lock_bh(&target->rx_lock);
2315 }
2316 spin_unlock_bh(&target->rx_lock);
2317 }
2318}
2319
Kalle Vaload226ec2011-08-10 09:49:12 +03002320int ath6kl_htc_conn_service(struct htc_target *target,
2321 struct htc_service_connect_req *conn_req,
2322 struct htc_service_connect_resp *conn_resp)
Kalle Valobdcd8172011-07-18 00:22:30 +03002323{
2324 struct htc_packet *rx_pkt = NULL;
2325 struct htc_packet *tx_pkt = NULL;
2326 struct htc_conn_service_resp *resp_msg;
2327 struct htc_conn_service_msg *conn_msg;
2328 struct htc_endpoint *endpoint;
2329 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2330 unsigned int max_msg_sz = 0;
2331 int status = 0;
2332
Kalle Valoebf29c92011-10-13 15:21:15 +03002333 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002334 "htc connect service target 0x%p service id 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002335 target, conn_req->svc_id);
2336
2337 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2338 /* special case for pseudo control service */
2339 assigned_ep = ENDPOINT_0;
2340 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2341 } else {
2342 /* allocate a packet to send to the target */
2343 tx_pkt = htc_get_control_buf(target, true);
2344
2345 if (!tx_pkt)
2346 return -ENOMEM;
2347
2348 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2349 memset(conn_msg, 0, sizeof(*conn_msg));
2350 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2351 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2352 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2353
2354 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2355 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2356 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2357
2358 /* we want synchronous operation */
2359 tx_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +03002360 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2361 status = ath6kl_htc_tx_issue(target, tx_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03002362
2363 if (status)
2364 goto fail_tx;
2365
2366 /* wait for response */
2367 rx_pkt = htc_wait_for_ctrl_msg(target);
2368
2369 if (!rx_pkt) {
2370 status = -ENOMEM;
2371 goto fail_tx;
2372 }
2373
2374 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2375
2376 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2377 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2378 status = -ENOMEM;
2379 goto fail_tx;
2380 }
2381
2382 conn_resp->resp_code = resp_msg->status;
2383 /* check response status */
2384 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2385 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2386 resp_msg->svc_id, resp_msg->status);
2387 status = -ENOMEM;
2388 goto fail_tx;
2389 }
2390
2391 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2392 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2393 }
2394
2395 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2396 status = -ENOMEM;
2397 goto fail_tx;
2398 }
2399
2400 endpoint = &target->endpoint[assigned_ep];
2401 endpoint->eid = assigned_ep;
2402 if (endpoint->svc_id) {
2403 status = -ENOMEM;
2404 goto fail_tx;
2405 }
2406
2407 /* return assigned endpoint to caller */
2408 conn_resp->endpoint = assigned_ep;
2409 conn_resp->len_max = max_msg_sz;
2410
2411 /* setup the endpoint */
2412
2413 /* this marks the endpoint in use */
2414 endpoint->svc_id = conn_req->svc_id;
2415
2416 endpoint->max_txq_depth = conn_req->max_txq_depth;
2417 endpoint->len_max = max_msg_sz;
2418 endpoint->ep_cb = conn_req->ep_cb;
2419 endpoint->cred_dist.svc_id = conn_req->svc_id;
Kalle Valoe8c39792011-10-24 12:17:04 +03002420 endpoint->cred_dist.htc_ep = endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +03002421 endpoint->cred_dist.endpoint = assigned_ep;
2422 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2423
2424 if (conn_req->max_rxmsg_sz) {
2425 /*
2426 * Override cred_per_msg calculation, this optimizes
2427 * the credit-low indications since the host will actually
2428 * issue smaller messages in the Send path.
2429 */
2430 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2431 status = -ENOMEM;
2432 goto fail_tx;
2433 }
2434 endpoint->cred_dist.cred_per_msg =
2435 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2436 } else
2437 endpoint->cred_dist.cred_per_msg =
2438 max_msg_sz / target->tgt_cred_sz;
2439
2440 if (!endpoint->cred_dist.cred_per_msg)
2441 endpoint->cred_dist.cred_per_msg = 1;
2442
2443 /* save local connection flags */
2444 endpoint->conn_flags = conn_req->flags;
2445
2446fail_tx:
2447 if (tx_pkt)
2448 htc_reclaim_txctrl_buf(target, tx_pkt);
2449
2450 if (rx_pkt) {
2451 htc_rxpkt_reset(rx_pkt);
2452 reclaim_rx_ctrl_buf(target, rx_pkt);
2453 }
2454
2455 return status;
2456}
2457
2458static void reset_ep_state(struct htc_target *target)
2459{
2460 struct htc_endpoint *endpoint;
2461 int i;
2462
2463 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2464 endpoint = &target->endpoint[i];
2465 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2466 endpoint->svc_id = 0;
2467 endpoint->len_max = 0;
2468 endpoint->max_txq_depth = 0;
2469 memset(&endpoint->ep_st, 0,
2470 sizeof(endpoint->ep_st));
2471 INIT_LIST_HEAD(&endpoint->rx_bufq);
2472 INIT_LIST_HEAD(&endpoint->txq);
2473 endpoint->target = target;
2474 }
2475
2476 /* reset distribution list */
Kalle Valo3c370392011-10-24 12:17:12 +03002477 /* FIXME: free existing entries */
Kalle Valobdcd8172011-07-18 00:22:30 +03002478 INIT_LIST_HEAD(&target->cred_dist_list);
2479}
2480
Kalle Vaload226ec2011-08-10 09:49:12 +03002481int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2482 enum htc_endpoint_id endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03002483{
2484 int num;
2485
2486 spin_lock_bh(&target->rx_lock);
2487 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2488 spin_unlock_bh(&target->rx_lock);
2489 return num;
2490}
2491
2492static void htc_setup_msg_bndl(struct htc_target *target)
2493{
Kalle Valobdcd8172011-07-18 00:22:30 +03002494 /* limit what HTC can handle */
2495 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2496 target->msg_per_bndl_max);
2497
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302498 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002499 target->msg_per_bndl_max = 0;
2500 return;
2501 }
2502
2503 /* limit bundle what the device layer can handle */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302504 target->msg_per_bndl_max = min(target->max_scat_entries,
Kalle Valobdcd8172011-07-18 00:22:30 +03002505 target->msg_per_bndl_max);
2506
Kalle Valo3ef987b2011-10-24 12:18:07 +03002507 ath6kl_dbg(ATH6KL_DBG_BOOT,
Kalle Valo471e92f2011-10-13 15:21:37 +03002508 "htc bundling allowed msg_per_bndl_max %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002509 target->msg_per_bndl_max);
2510
2511 /* Max rx bundle size is limited by the max tx bundle size */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302512 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
Kalle Valobdcd8172011-07-18 00:22:30 +03002513 /* Max tx bundle size if limited by the extended mbox address range */
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302514 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302515 target->max_xfer_szper_scatreq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002516
Kalle Valo3ef987b2011-10-24 12:18:07 +03002517 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302518 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
Kalle Valobdcd8172011-07-18 00:22:30 +03002519
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302520 if (target->max_tx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002521 target->tx_bndl_enable = true;
2522
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302523 if (target->max_rx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002524 target->rx_bndl_enable = true;
2525
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05302526 if ((target->tgt_cred_sz % target->block_sz) != 0) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002527 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2528 target->tgt_cred_sz);
2529
2530 /*
2531 * Disallow send bundling since the credit size is
2532 * not aligned to a block size the I/O block
2533 * padding will spill into the next credit buffer
2534 * which is fatal.
2535 */
2536 target->tx_bndl_enable = false;
2537 }
2538}
2539
Kalle Vaload226ec2011-08-10 09:49:12 +03002540int ath6kl_htc_wait_target(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002541{
2542 struct htc_packet *packet = NULL;
2543 struct htc_ready_ext_msg *rdy_msg;
2544 struct htc_service_connect_req connect;
2545 struct htc_service_connect_resp resp;
2546 int status;
2547
Kalle Valo241b1282012-01-17 20:09:45 +02002548 /* FIXME: remove once USB support is implemented */
2549 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
2550 ath6kl_err("HTC doesn't support USB yet. Patience!\n");
2551 return -EOPNOTSUPP;
2552 }
2553
Kalle Valobdcd8172011-07-18 00:22:30 +03002554 /* we should be getting 1 control message that the target is ready */
2555 packet = htc_wait_for_ctrl_msg(target);
2556
2557 if (!packet)
2558 return -ENOMEM;
2559
2560 /* we controlled the buffer creation so it's properly aligned */
2561 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2562
2563 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2564 (packet->act_len < sizeof(struct htc_ready_msg))) {
2565 status = -ENOMEM;
2566 goto fail_wait_target;
2567 }
2568
2569 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2570 status = -ENOMEM;
2571 goto fail_wait_target;
2572 }
2573
2574 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2575 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2576
Kalle Valo3ef987b2011-10-24 12:18:07 +03002577 ath6kl_dbg(ATH6KL_DBG_BOOT,
Kalle Valo471e92f2011-10-13 15:21:37 +03002578 "htc target ready credits %d size %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002579 target->tgt_creds, target->tgt_cred_sz);
2580
2581 /* check if this is an extended ready message */
2582 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2583 /* this is an extended message */
2584 target->htc_tgt_ver = rdy_msg->htc_ver;
2585 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2586 } else {
2587 /* legacy */
2588 target->htc_tgt_ver = HTC_VERSION_2P0;
2589 target->msg_per_bndl_max = 0;
2590 }
2591
Kalle Valo3ef987b2011-10-24 12:18:07 +03002592 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002593 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2594 target->htc_tgt_ver);
2595
2596 if (target->msg_per_bndl_max > 0)
2597 htc_setup_msg_bndl(target);
2598
2599 /* setup our pseudo HTC control endpoint connection */
2600 memset(&connect, 0, sizeof(connect));
2601 memset(&resp, 0, sizeof(resp));
2602 connect.ep_cb.rx = htc_ctrl_rx;
2603 connect.ep_cb.rx_refill = NULL;
2604 connect.ep_cb.tx_full = NULL;
2605 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2606 connect.svc_id = HTC_CTRL_RSVD_SVC;
2607
2608 /* connect fake service */
Kalle Vaload226ec2011-08-10 09:49:12 +03002609 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
Kalle Valobdcd8172011-07-18 00:22:30 +03002610
2611 if (status)
Kalle Valo4e3d54c2011-10-27 18:48:22 +03002612 /*
2613 * FIXME: this call doesn't make sense, the caller should
2614 * call ath6kl_htc_cleanup() when it wants remove htc
2615 */
Kalle Valobdcd8172011-07-18 00:22:30 +03002616 ath6kl_hif_cleanup_scatter(target->dev->ar);
2617
2618fail_wait_target:
2619 if (packet) {
2620 htc_rxpkt_reset(packet);
2621 reclaim_rx_ctrl_buf(target, packet);
2622 }
2623
2624 return status;
2625}
2626
2627/*
2628 * Start HTC, enable interrupts and let the target know
2629 * host has finished setup.
2630 */
Kalle Vaload226ec2011-08-10 09:49:12 +03002631int ath6kl_htc_start(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002632{
2633 struct htc_packet *packet;
2634 int status;
2635
Kalle Valo5fe4dff2011-10-30 21:16:15 +02002636 memset(&target->dev->irq_proc_reg, 0,
2637 sizeof(target->dev->irq_proc_reg));
2638
Kalle Valobdcd8172011-07-18 00:22:30 +03002639 /* Disable interrupts at the chip level */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002640 ath6kl_hif_disable_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002641
2642 target->htc_flags = 0;
2643 target->rx_st_flags = 0;
2644
2645 /* Push control receive buffers into htc control endpoint */
2646 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2647 status = htc_add_rxbuf(target, packet);
2648 if (status)
2649 return status;
2650 }
2651
2652 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
Kalle Valo3c370392011-10-24 12:17:12 +03002653 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
Kalle Valofa99e962011-10-24 12:16:55 +03002654 target->tgt_creds);
Kalle Valobdcd8172011-07-18 00:22:30 +03002655
2656 dump_cred_dist_stats(target);
2657
2658 /* Indicate to the target of the setup completion */
2659 status = htc_setup_tx_complete(target);
2660
2661 if (status)
2662 return status;
2663
2664 /* unmask interrupts */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002665 status = ath6kl_hif_unmask_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002666
2667 if (status)
Kalle Vaload226ec2011-08-10 09:49:12 +03002668 ath6kl_htc_stop(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002669
2670 return status;
2671}
2672
Kalle Valo8a8109162011-10-27 18:49:00 +03002673static int ath6kl_htc_reset(struct htc_target *target)
2674{
2675 u32 block_size, ctrl_bufsz;
2676 struct htc_packet *packet;
2677 int i;
2678
2679 reset_ep_state(target);
2680
2681 block_size = target->dev->ar->mbox_info.block_size;
2682
2683 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2684 (block_size + HTC_HDR_LENGTH) :
2685 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2686
2687 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2688 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2689 if (!packet)
2690 return -ENOMEM;
2691
2692 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2693 if (!packet->buf_start) {
2694 kfree(packet);
2695 return -ENOMEM;
2696 }
2697
2698 packet->buf_len = ctrl_bufsz;
2699 if (i < NUM_CONTROL_RX_BUFFERS) {
2700 packet->act_len = 0;
2701 packet->buf = packet->buf_start;
2702 packet->endpoint = ENDPOINT_0;
2703 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2704 } else
2705 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2706 }
2707
2708 return 0;
2709}
2710
Kalle Valobdcd8172011-07-18 00:22:30 +03002711/* htc_stop: stop interrupt reception, and flush all queued buffers */
Kalle Vaload226ec2011-08-10 09:49:12 +03002712void ath6kl_htc_stop(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002713{
2714 spin_lock_bh(&target->htc_lock);
2715 target->htc_flags |= HTC_OP_STATE_STOPPING;
2716 spin_unlock_bh(&target->htc_lock);
2717
2718 /*
2719 * Masking interrupts is a synchronous operation, when this
2720 * function returns all pending HIF I/O has completed, we can
2721 * safely flush the queues.
2722 */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002723 ath6kl_hif_mask_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002724
Kalle Vaload226ec2011-08-10 09:49:12 +03002725 ath6kl_htc_flush_txep_all(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002726
Kalle Vaload226ec2011-08-10 09:49:12 +03002727 ath6kl_htc_flush_rx_buf(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002728
Kalle Valo8a8109162011-10-27 18:49:00 +03002729 ath6kl_htc_reset(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002730}
2731
Kalle Vaload226ec2011-08-10 09:49:12 +03002732void *ath6kl_htc_create(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +03002733{
2734 struct htc_target *target = NULL;
Kalle Valo8a8109162011-10-27 18:49:00 +03002735 int status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +03002736
2737 target = kzalloc(sizeof(*target), GFP_KERNEL);
2738 if (!target) {
2739 ath6kl_err("unable to allocate memory\n");
2740 return NULL;
2741 }
2742
2743 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2744 if (!target->dev) {
2745 ath6kl_err("unable to allocate memory\n");
2746 status = -ENOMEM;
Kalle Valo8a8109162011-10-27 18:49:00 +03002747 goto err_htc_cleanup;
Kalle Valobdcd8172011-07-18 00:22:30 +03002748 }
2749
2750 spin_lock_init(&target->htc_lock);
2751 spin_lock_init(&target->rx_lock);
2752 spin_lock_init(&target->tx_lock);
2753
2754 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2755 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2756 INIT_LIST_HEAD(&target->cred_dist_list);
2757
2758 target->dev->ar = ar;
2759 target->dev->htc_cnxt = target;
Kalle Valobdcd8172011-07-18 00:22:30 +03002760 target->ep_waiting = ENDPOINT_MAX;
2761
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002762 status = ath6kl_hif_setup(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002763 if (status)
Kalle Valo8a8109162011-10-27 18:49:00 +03002764 goto err_htc_cleanup;
Kalle Valobdcd8172011-07-18 00:22:30 +03002765
Kalle Valo8a8109162011-10-27 18:49:00 +03002766 status = ath6kl_htc_reset(target);
2767 if (status)
2768 goto err_htc_cleanup;
Kalle Valobdcd8172011-07-18 00:22:30 +03002769
2770 return target;
Kalle Valo8a8109162011-10-27 18:49:00 +03002771
2772err_htc_cleanup:
2773 ath6kl_htc_cleanup(target);
2774
2775 return NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +03002776}
2777
2778/* cleanup the HTC instance */
Kalle Vaload226ec2011-08-10 09:49:12 +03002779void ath6kl_htc_cleanup(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002780{
2781 struct htc_packet *packet, *tmp_packet;
2782
Kalle Valo241b1282012-01-17 20:09:45 +02002783 /* FIXME: remove check once USB support is implemented */
2784 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
2785 ath6kl_hif_cleanup_scatter(target->dev->ar);
Kalle Valobdcd8172011-07-18 00:22:30 +03002786
2787 list_for_each_entry_safe(packet, tmp_packet,
2788 &target->free_ctrl_txbuf, list) {
2789 list_del(&packet->list);
2790 kfree(packet->buf_start);
2791 kfree(packet);
2792 }
2793
2794 list_for_each_entry_safe(packet, tmp_packet,
2795 &target->free_ctrl_rxbuf, list) {
2796 list_del(&packet->list);
2797 kfree(packet->buf_start);
2798 kfree(packet);
2799 }
2800
2801 kfree(target->dev);
2802 kfree(target);
2803}