blob: 04b4070240aa087717010165d5351d0730709185 [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
Kalle Valo2e1cb232011-10-05 12:23:49 +030018#include "hif.h"
Kalle Valobdcd8172011-07-18 00:22:30 +030019#include "debug.h"
20#include "hif-ops.h"
21#include <asm/unaligned.h>
22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
Kalle Valof2f92192011-10-24 12:17:20 +030025/* Functions for Tx credit handling */
Kalle Valocb64a612011-10-24 12:17:28 +030026static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
27 struct htc_endpoint_credit_dist *ep_dist,
28 int credits)
Kalle Valof2f92192011-10-24 12:17:20 +030029{
Kalle Valo02f0d6f2011-10-24 12:17:59 +030030 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
31 ep_dist->endpoint, credits);
32
Kalle Valof2f92192011-10-24 12:17:20 +030033 ep_dist->credits += credits;
34 ep_dist->cred_assngd += credits;
35 cred_info->cur_free_credits -= credits;
36}
37
38static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
39 struct list_head *ep_list,
40 int tot_credits)
41{
42 struct htc_endpoint_credit_dist *cur_ep_dist;
43 int count;
44
Kalle Valo02f0d6f2011-10-24 12:17:59 +030045 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
46
Kalle Valof2f92192011-10-24 12:17:20 +030047 cred_info->cur_free_credits = tot_credits;
48 cred_info->total_avail_credits = tot_credits;
49
50 list_for_each_entry(cur_ep_dist, ep_list, list) {
51 if (cur_ep_dist->endpoint == ENDPOINT_0)
52 continue;
53
54 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
55
56 if (tot_credits > 4) {
57 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
58 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
Kalle Valocb64a612011-10-24 12:17:28 +030059 ath6kl_credit_deposit(cred_info,
60 cur_ep_dist,
61 cur_ep_dist->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +030062 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
63 }
64 }
65
66 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
Kalle Valocb64a612011-10-24 12:17:28 +030067 ath6kl_credit_deposit(cred_info, cur_ep_dist,
68 cur_ep_dist->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +030069 /*
70 * Control service is always marked active, it
71 * never goes inactive EVER.
72 */
73 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
74 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
75 /* this is the lowest priority data endpoint */
76 /* FIXME: this looks fishy, check */
77 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
78
79 /*
80 * Streams have to be created (explicit | implicit) for all
81 * kinds of traffic. BE endpoints are also inactive in the
82 * beginning. When BE traffic starts it creates implicit
83 * streams that redistributes credits.
84 *
85 * Note: all other endpoints have minimums set but are
86 * initially given NO credits. credits will be distributed
87 * as traffic activity demands
88 */
89 }
90
91 WARN_ON(cred_info->cur_free_credits <= 0);
92
93 list_for_each_entry(cur_ep_dist, ep_list, list) {
94 if (cur_ep_dist->endpoint == ENDPOINT_0)
95 continue;
96
97 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
98 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
99 else {
100 /*
101 * For the remaining data endpoints, we assume that
102 * each cred_per_msg are the same. We use a simple
103 * calculation here, we take the remaining credits
104 * and determine how many max messages this can
105 * cover and then set each endpoint's normal value
106 * equal to 3/4 this amount.
107 */
108 count = (cred_info->cur_free_credits /
109 cur_ep_dist->cred_per_msg)
110 * cur_ep_dist->cred_per_msg;
111 count = (count * 3) >> 2;
112 count = max(count, cur_ep_dist->cred_per_msg);
113 cur_ep_dist->cred_norm = count;
114
115 }
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300116
117 ath6kl_dbg(ATH6KL_DBG_CREDIT,
118 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
119 cur_ep_dist->endpoint,
120 cur_ep_dist->svc_id,
121 cur_ep_dist->credits,
122 cur_ep_dist->cred_per_msg,
123 cur_ep_dist->cred_norm,
124 cur_ep_dist->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +0300125 }
126}
127
128/* initialize and setup credit distribution */
Kalle Valocb64a612011-10-24 12:17:28 +0300129int ath6kl_credit_setup(void *htc_handle,
130 struct ath6kl_htc_credit_info *cred_info)
Kalle Valof2f92192011-10-24 12:17:20 +0300131{
132 u16 servicepriority[5];
133
134 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
135
136 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
137 servicepriority[1] = WMI_DATA_VO_SVC;
138 servicepriority[2] = WMI_DATA_VI_SVC;
139 servicepriority[3] = WMI_DATA_BE_SVC;
140 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
141
142 /* set priority list */
143 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
144
145 return 0;
146}
147
148/* reduce an ep's credits back to a set limit */
Kalle Valocb64a612011-10-24 12:17:28 +0300149static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
150 struct htc_endpoint_credit_dist *ep_dist,
151 int limit)
Kalle Valof2f92192011-10-24 12:17:20 +0300152{
153 int credits;
154
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300155 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
156 ep_dist->endpoint, limit);
157
Kalle Valof2f92192011-10-24 12:17:20 +0300158 ep_dist->cred_assngd = limit;
159
160 if (ep_dist->credits <= limit)
161 return;
162
163 credits = ep_dist->credits - limit;
164 ep_dist->credits -= credits;
165 cred_info->cur_free_credits += credits;
166}
167
168static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
169 struct list_head *epdist_list)
170{
171 struct htc_endpoint_credit_dist *cur_dist_list;
172
173 list_for_each_entry(cur_dist_list, epdist_list, list) {
174 if (cur_dist_list->endpoint == ENDPOINT_0)
175 continue;
176
177 if (cur_dist_list->cred_to_dist > 0) {
178 cur_dist_list->credits +=
179 cur_dist_list->cred_to_dist;
180 cur_dist_list->cred_to_dist = 0;
181 if (cur_dist_list->credits >
182 cur_dist_list->cred_assngd)
Kalle Valocb64a612011-10-24 12:17:28 +0300183 ath6kl_credit_reduce(cred_info,
Kalle Valof2f92192011-10-24 12:17:20 +0300184 cur_dist_list,
185 cur_dist_list->cred_assngd);
186
187 if (cur_dist_list->credits >
188 cur_dist_list->cred_norm)
Kalle Valocb64a612011-10-24 12:17:28 +0300189 ath6kl_credit_reduce(cred_info, cur_dist_list,
190 cur_dist_list->cred_norm);
Kalle Valof2f92192011-10-24 12:17:20 +0300191
192 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
193 if (cur_dist_list->txq_depth == 0)
Kalle Valocb64a612011-10-24 12:17:28 +0300194 ath6kl_credit_reduce(cred_info,
195 cur_dist_list, 0);
Kalle Valof2f92192011-10-24 12:17:20 +0300196 }
197 }
198 }
199}
200
201/*
202 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
203 * question.
204 */
Kalle Valocb64a612011-10-24 12:17:28 +0300205static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
Kalle Valof2f92192011-10-24 12:17:20 +0300206 struct htc_endpoint_credit_dist *ep_dist)
207{
208 struct htc_endpoint_credit_dist *curdist_list;
209 int credits = 0;
210 int need;
211
212 if (ep_dist->svc_id == WMI_CONTROL_SVC)
213 goto out;
214
215 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
216 (ep_dist->svc_id == WMI_DATA_VO_SVC))
217 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
218 goto out;
219
220 /*
221 * For all other services, we follow a simple algorithm of:
222 *
223 * 1. checking the free pool for credits
224 * 2. checking lower priority endpoints for credits to take
225 */
226
227 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
228
229 if (credits >= ep_dist->seek_cred)
230 goto out;
231
232 /*
233 * We don't have enough in the free pool, try taking away from
234 * lower priority services The rule for taking away credits:
235 *
236 * 1. Only take from lower priority endpoints
237 * 2. Only take what is allocated above the minimum (never
238 * starve an endpoint completely)
239 * 3. Only take what you need.
240 */
241
242 list_for_each_entry_reverse(curdist_list,
243 &cred_info->lowestpri_ep_dist,
244 list) {
245 if (curdist_list == ep_dist)
246 break;
247
248 need = ep_dist->seek_cred - cred_info->cur_free_credits;
249
250 if ((curdist_list->cred_assngd - need) >=
251 curdist_list->cred_min) {
252 /*
253 * The current one has been allocated more than
254 * it's minimum and it has enough credits assigned
255 * above it's minimum to fulfill our need try to
256 * take away just enough to fulfill our need.
257 */
Kalle Valocb64a612011-10-24 12:17:28 +0300258 ath6kl_credit_reduce(cred_info, curdist_list,
259 curdist_list->cred_assngd - need);
Kalle Valof2f92192011-10-24 12:17:20 +0300260
261 if (cred_info->cur_free_credits >=
262 ep_dist->seek_cred)
263 break;
264 }
265
266 if (curdist_list->endpoint == ENDPOINT_0)
267 break;
268 }
269
270 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
271
272out:
273 /* did we find some credits? */
274 if (credits)
Kalle Valocb64a612011-10-24 12:17:28 +0300275 ath6kl_credit_deposit(cred_info, ep_dist, credits);
Kalle Valof2f92192011-10-24 12:17:20 +0300276
277 ep_dist->seek_cred = 0;
278}
279
280/* redistribute credits based on activity change */
Kalle Valocb64a612011-10-24 12:17:28 +0300281static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
282 struct list_head *ep_dist_list)
Kalle Valof2f92192011-10-24 12:17:20 +0300283{
284 struct htc_endpoint_credit_dist *curdist_list;
285
286 list_for_each_entry(curdist_list, ep_dist_list, list) {
287 if (curdist_list->endpoint == ENDPOINT_0)
288 continue;
289
290 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
291 (curdist_list->svc_id == WMI_DATA_BE_SVC))
292 curdist_list->dist_flags |= HTC_EP_ACTIVE;
293
294 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
295 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
296 if (curdist_list->txq_depth == 0)
Kalle Valocb64a612011-10-24 12:17:28 +0300297 ath6kl_credit_reduce(info, curdist_list, 0);
Kalle Valof2f92192011-10-24 12:17:20 +0300298 else
Kalle Valocb64a612011-10-24 12:17:28 +0300299 ath6kl_credit_reduce(info,
300 curdist_list,
301 curdist_list->cred_min);
Kalle Valof2f92192011-10-24 12:17:20 +0300302 }
303 }
304}
305
306/*
307 *
308 * This function is invoked whenever endpoints require credit
309 * distributions. A lock is held while this function is invoked, this
310 * function shall NOT block. The ep_dist_list is a list of distribution
311 * structures in prioritized order as defined by the call to the
312 * htc_set_credit_dist() api.
313 */
314static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
315 struct list_head *ep_dist_list,
316 enum htc_credit_dist_reason reason)
317{
318 switch (reason) {
319 case HTC_CREDIT_DIST_SEND_COMPLETE:
320 ath6kl_credit_update(cred_info, ep_dist_list);
321 break;
322 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
Kalle Valocb64a612011-10-24 12:17:28 +0300323 ath6kl_credit_redistribute(cred_info, ep_dist_list);
Kalle Valof2f92192011-10-24 12:17:20 +0300324 break;
325 default:
326 break;
327 }
328
329 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
330 WARN_ON(cred_info->cur_free_credits < 0);
331}
332
Kalle Valodfa01042011-09-06 11:10:49 +0300333static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530334{
335 u8 *align_addr;
336
337 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
338 align_addr = PTR_ALIGN(*buf - 4, 4);
339 memmove(align_addr, *buf, len);
340 *buf = align_addr;
341 }
342}
343
Kalle Valodfa01042011-09-06 11:10:49 +0300344static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
345 int ctrl0, int ctrl1)
Kalle Valobdcd8172011-07-18 00:22:30 +0300346{
347 struct htc_frame_hdr *hdr;
348
349 packet->buf -= HTC_HDR_LENGTH;
350 hdr = (struct htc_frame_hdr *)packet->buf;
351
352 /* Endianess? */
353 put_unaligned((u16)packet->act_len, &hdr->payld_len);
354 hdr->flags = flags;
355 hdr->eid = packet->endpoint;
356 hdr->ctrl[0] = ctrl0;
357 hdr->ctrl[1] = ctrl1;
358}
359
360static void htc_reclaim_txctrl_buf(struct htc_target *target,
361 struct htc_packet *pkt)
362{
363 spin_lock_bh(&target->htc_lock);
364 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
365 spin_unlock_bh(&target->htc_lock);
366}
367
368static struct htc_packet *htc_get_control_buf(struct htc_target *target,
369 bool tx)
370{
371 struct htc_packet *packet = NULL;
372 struct list_head *buf_list;
373
374 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
375
376 spin_lock_bh(&target->htc_lock);
377
378 if (list_empty(buf_list)) {
379 spin_unlock_bh(&target->htc_lock);
380 return NULL;
381 }
382
383 packet = list_first_entry(buf_list, struct htc_packet, list);
384 list_del(&packet->list);
385 spin_unlock_bh(&target->htc_lock);
386
387 if (tx)
388 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
389
390 return packet;
391}
392
393static void htc_tx_comp_update(struct htc_target *target,
394 struct htc_endpoint *endpoint,
395 struct htc_packet *packet)
396{
397 packet->completion = NULL;
398 packet->buf += HTC_HDR_LENGTH;
399
400 if (!packet->status)
401 return;
402
403 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
404 packet->status, packet->endpoint, packet->act_len,
405 packet->info.tx.cred_used);
406
407 /* on failure to submit, reclaim credits for this packet */
408 spin_lock_bh(&target->tx_lock);
409 endpoint->cred_dist.cred_to_dist +=
410 packet->info.tx.cred_used;
411 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
412
Kalle Valo471e92f2011-10-13 15:21:37 +0300413 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +0300414 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +0300415
Kalle Valo3c370392011-10-24 12:17:12 +0300416 ath6kl_credit_distribute(target->credit_info,
417 &target->cred_dist_list,
418 HTC_CREDIT_DIST_SEND_COMPLETE);
Kalle Valobdcd8172011-07-18 00:22:30 +0300419
420 spin_unlock_bh(&target->tx_lock);
421}
422
423static void htc_tx_complete(struct htc_endpoint *endpoint,
424 struct list_head *txq)
425{
426 if (list_empty(txq))
427 return;
428
Kalle Valoebf29c92011-10-13 15:21:15 +0300429 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300430 "htc tx complete ep %d pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300431 endpoint->eid, get_queue_depth(txq));
432
433 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
434}
435
436static void htc_tx_comp_handler(struct htc_target *target,
437 struct htc_packet *packet)
438{
439 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
440 struct list_head container;
441
442 htc_tx_comp_update(target, endpoint, packet);
443 INIT_LIST_HEAD(&container);
444 list_add_tail(&packet->list, &container);
445 /* do completion */
446 htc_tx_complete(endpoint, &container);
447}
448
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530449static void htc_async_tx_scat_complete(struct htc_target *target,
450 struct hif_scatter_req *scat_req)
Kalle Valobdcd8172011-07-18 00:22:30 +0300451{
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530452 struct htc_endpoint *endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +0300453 struct htc_packet *packet;
454 struct list_head tx_compq;
455 int i;
456
457 INIT_LIST_HEAD(&tx_compq);
458
Kalle Valoebf29c92011-10-13 15:21:15 +0300459 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300460 "htc tx scat complete len %d entries %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300461 scat_req->len, scat_req->scat_entries);
462
463 if (scat_req->status)
464 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
465
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530466 packet = scat_req->scat_list[0].packet;
467 endpoint = &target->endpoint[packet->endpoint];
468
Kalle Valobdcd8172011-07-18 00:22:30 +0300469 /* walk through the scatter list and process */
470 for (i = 0; i < scat_req->scat_entries; i++) {
471 packet = scat_req->scat_list[i].packet;
472 if (!packet) {
473 WARN_ON(1);
474 return;
475 }
476
477 packet->status = scat_req->status;
478 htc_tx_comp_update(target, endpoint, packet);
479 list_add_tail(&packet->list, &tx_compq);
480 }
481
482 /* free scatter request */
483 hif_scatter_req_add(target->dev->ar, scat_req);
484
485 /* complete all packets */
486 htc_tx_complete(endpoint, &tx_compq);
487}
488
Kalle Valodfa01042011-09-06 11:10:49 +0300489static int ath6kl_htc_tx_issue(struct htc_target *target,
490 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300491{
492 int status;
493 bool sync = false;
494 u32 padded_len, send_len;
495
496 if (!packet->completion)
497 sync = true;
498
499 send_len = packet->act_len + HTC_HDR_LENGTH;
500
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530501 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300502
Kalle Valoebf29c92011-10-13 15:21:15 +0300503 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300504 "htc tx issue len %d padded_len %d mbox 0x%X %s\n",
505 send_len, padded_len,
506 target->dev->ar->mbox_info.htc_addr,
507 sync ? "sync" : "async");
Kalle Valobdcd8172011-07-18 00:22:30 +0300508
509 if (sync) {
510 status = hif_read_write_sync(target->dev->ar,
511 target->dev->ar->mbox_info.htc_addr,
512 packet->buf, padded_len,
513 HIF_WR_SYNC_BLOCK_INC);
514
515 packet->status = status;
Kalle Valo65d2bb12011-08-14 18:10:03 -0700516 packet->buf += HTC_HDR_LENGTH;
Kalle Valobdcd8172011-07-18 00:22:30 +0300517 } else
518 status = hif_write_async(target->dev->ar,
519 target->dev->ar->mbox_info.htc_addr,
520 packet->buf, padded_len,
521 HIF_WR_ASYNC_BLOCK_INC, packet);
522
523 return status;
524}
525
526static int htc_check_credits(struct htc_target *target,
527 struct htc_endpoint *ep, u8 *flags,
528 enum htc_endpoint_id eid, unsigned int len,
529 int *req_cred)
530{
531
532 *req_cred = (len > target->tgt_cred_sz) ?
533 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
534
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300535 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300536 *req_cred, ep->cred_dist.credits);
537
538 if (ep->cred_dist.credits < *req_cred) {
539 if (eid == ENDPOINT_0)
540 return -EINVAL;
541
542 /* Seek more credits */
543 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
544
Kalle Valocb64a612011-10-24 12:17:28 +0300545 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300546
547 ep->cred_dist.seek_cred = 0;
548
549 if (ep->cred_dist.credits < *req_cred) {
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300550 ath6kl_dbg(ATH6KL_DBG_CREDIT,
551 "credit not found for ep %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300552 eid);
553 return -EINVAL;
554 }
555 }
556
557 ep->cred_dist.credits -= *req_cred;
558 ep->ep_st.cred_cosumd += *req_cred;
559
560 /* When we are getting low on credits, ask for more */
561 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
562 ep->cred_dist.seek_cred =
563 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
564
Kalle Valocb64a612011-10-24 12:17:28 +0300565 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
Kalle Valobdcd8172011-07-18 00:22:30 +0300566
567 /* see if we were successful in getting more */
568 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
569 /* tell the target we need credits ASAP! */
570 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
571 ep->ep_st.cred_low_indicate += 1;
Kalle Valo02f0d6f2011-10-24 12:17:59 +0300572 ath6kl_dbg(ATH6KL_DBG_CREDIT,
573 "credit we need credits asap\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300574 }
575 }
576
577 return 0;
578}
579
Kalle Valodfa01042011-09-06 11:10:49 +0300580static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
581 struct htc_endpoint *endpoint,
582 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300583{
584 int req_cred;
585 u8 flags;
586 struct htc_packet *packet;
587 unsigned int len;
588
589 while (true) {
590
591 flags = 0;
592
593 if (list_empty(&endpoint->txq))
594 break;
595 packet = list_first_entry(&endpoint->txq, struct htc_packet,
596 list);
597
Kalle Valoebf29c92011-10-13 15:21:15 +0300598 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300599 "htc tx got packet 0x%p queue depth %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300600 packet, get_queue_depth(&endpoint->txq));
601
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530602 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300603 packet->act_len + HTC_HDR_LENGTH);
604
605 if (htc_check_credits(target, endpoint, &flags,
606 packet->endpoint, len, &req_cred))
607 break;
608
609 /* now we can fully move onto caller's queue */
610 packet = list_first_entry(&endpoint->txq, struct htc_packet,
611 list);
612 list_move_tail(&packet->list, queue);
613
614 /* save the number of credits this packet consumed */
615 packet->info.tx.cred_used = req_cred;
616
617 /* all TX packets are handled asynchronously */
618 packet->completion = htc_tx_comp_handler;
619 packet->context = target;
620 endpoint->ep_st.tx_issued += 1;
621
622 /* save send flags */
623 packet->info.tx.flags = flags;
624 packet->info.tx.seqno = endpoint->seqno;
625 endpoint->seqno++;
626 }
627}
628
629/* See if the padded tx length falls on a credit boundary */
630static int htc_get_credit_padding(unsigned int cred_sz, int *len,
631 struct htc_endpoint *ep)
632{
633 int rem_cred, cred_pad;
634
635 rem_cred = *len % cred_sz;
636
637 /* No padding needed */
638 if (!rem_cred)
639 return 0;
640
641 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
642 return -1;
643
644 /*
645 * The transfer consumes a "partial" credit, this
646 * packet cannot be bundled unless we add
647 * additional "dummy" padding (max 255 bytes) to
648 * consume the entire credit.
649 */
650 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
651
652 if ((cred_pad > 0) && (cred_pad <= 255))
653 *len += cred_pad;
654 else
655 /* The amount of padding is too large, send as non-bundled */
656 return -1;
657
658 return cred_pad;
659}
660
Kalle Valodfa01042011-09-06 11:10:49 +0300661static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
662 struct htc_endpoint *endpoint,
663 struct hif_scatter_req *scat_req,
664 int n_scat,
665 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300666{
667 struct htc_packet *packet;
668 int i, len, rem_scat, cred_pad;
669 int status = 0;
670
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +0530671 rem_scat = target->max_tx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +0300672
673 for (i = 0; i < n_scat; i++) {
674 scat_req->scat_list[i].packet = NULL;
675
676 if (list_empty(queue))
677 break;
678
679 packet = list_first_entry(queue, struct htc_packet, list);
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530680 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300681 packet->act_len + HTC_HDR_LENGTH);
682
683 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
684 &len, endpoint);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530685 if (cred_pad < 0 || rem_scat < len) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300686 status = -ENOSPC;
687 break;
688 }
689
690 rem_scat -= len;
691 /* now remove it from the queue */
Kalle Valobdcd8172011-07-18 00:22:30 +0300692 list_del(&packet->list);
693
694 scat_req->scat_list[i].packet = packet;
695 /* prepare packet and flag message as part of a send bundle */
Kalle Valodfa01042011-09-06 11:10:49 +0300696 ath6kl_htc_tx_prep_pkt(packet,
Kalle Valobdcd8172011-07-18 00:22:30 +0300697 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
698 cred_pad, packet->info.tx.seqno);
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530699 /* Make sure the buffer is 4-byte aligned */
Kalle Valodfa01042011-09-06 11:10:49 +0300700 ath6kl_htc_tx_buf_align(&packet->buf,
701 packet->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +0300702 scat_req->scat_list[i].buf = packet->buf;
703 scat_req->scat_list[i].len = len;
704
705 scat_req->len += len;
706 scat_req->scat_entries++;
Kalle Valoebf29c92011-10-13 15:21:15 +0300707 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300708 "htc tx adding (%d) pkt 0x%p len %d remaining %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300709 i, packet, len, rem_scat);
710 }
711
712 /* Roll back scatter setup in case of any failure */
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530713 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300714 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
715 packet = scat_req->scat_list[i].packet;
716 if (packet) {
717 packet->buf += HTC_HDR_LENGTH;
718 list_add(&packet->list, queue);
719 }
720 }
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530721 return -EAGAIN;
Kalle Valobdcd8172011-07-18 00:22:30 +0300722 }
723
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530724 return status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300725}
726
727/*
Kalle Valodfa01042011-09-06 11:10:49 +0300728 * Drain a queue and send as bundles this function may return without fully
729 * draining the queue when
Kalle Valobdcd8172011-07-18 00:22:30 +0300730 *
731 * 1. scatter resources are exhausted
732 * 2. a message that will consume a partial credit will stop the
733 * bundling process early
734 * 3. we drop below the minimum number of messages for a bundle
735 */
Kalle Valodfa01042011-09-06 11:10:49 +0300736static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
737 struct list_head *queue,
738 int *sent_bundle, int *n_bundle_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +0300739{
740 struct htc_target *target = endpoint->target;
741 struct hif_scatter_req *scat_req = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300742 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530743 int status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300744
Kalle Valobdcd8172011-07-18 00:22:30 +0300745 while (true) {
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530746 status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300747 n_scat = get_queue_depth(queue);
748 n_scat = min(n_scat, target->msg_per_bndl_max);
749
750 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
751 /* not enough to bundle */
752 break;
753
754 scat_req = hif_scatter_req_get(target->dev->ar);
755
756 if (!scat_req) {
757 /* no scatter resources */
Kalle Valoebf29c92011-10-13 15:21:15 +0300758 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300759 "htc tx no more scatter resources\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300760 break;
761 }
762
Kalle Valo471e92f2011-10-13 15:21:37 +0300763 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300764 n_scat);
765
766 scat_req->len = 0;
767 scat_req->scat_entries = 0;
768
Kalle Valodfa01042011-09-06 11:10:49 +0300769 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
770 scat_req, n_scat,
771 queue);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530772 if (status == -EAGAIN) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300773 hif_scatter_req_add(target->dev->ar, scat_req);
774 break;
775 }
776
777 /* send path is always asynchronous */
778 scat_req->complete = htc_async_tx_scat_complete;
Kalle Valobdcd8172011-07-18 00:22:30 +0300779 n_sent_bundle++;
780 tot_pkts_bundle += scat_req->scat_entries;
781
Kalle Valoebf29c92011-10-13 15:21:15 +0300782 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300783 "htc tx scatter bytes %d entries %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300784 scat_req->len, scat_req->scat_entries);
Kalle Valo8e8ddb22011-10-05 12:23:33 +0300785 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530786
787 if (status)
788 break;
Kalle Valobdcd8172011-07-18 00:22:30 +0300789 }
790
791 *sent_bundle = n_sent_bundle;
792 *n_bundle_pkts = tot_pkts_bundle;
Kalle Valo471e92f2011-10-13 15:21:37 +0300793 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
794 n_sent_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +0300795
796 return;
797}
798
Kalle Valodfa01042011-09-06 11:10:49 +0300799static void ath6kl_htc_tx_from_queue(struct htc_target *target,
800 struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +0300801{
802 struct list_head txq;
803 struct htc_packet *packet;
804 int bundle_sent;
805 int n_pkts_bundle;
806
807 spin_lock_bh(&target->tx_lock);
808
809 endpoint->tx_proc_cnt++;
810 if (endpoint->tx_proc_cnt > 1) {
811 endpoint->tx_proc_cnt--;
812 spin_unlock_bh(&target->tx_lock);
Kalle Valo471e92f2011-10-13 15:21:37 +0300813 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
Kalle Valobdcd8172011-07-18 00:22:30 +0300814 return;
815 }
816
817 /*
818 * drain the endpoint TX queue for transmission as long
819 * as we have enough credits.
820 */
821 INIT_LIST_HEAD(&txq);
822
823 while (true) {
824
825 if (list_empty(&endpoint->txq))
826 break;
827
Kalle Valodfa01042011-09-06 11:10:49 +0300828 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300829
830 if (list_empty(&txq))
831 break;
832
833 spin_unlock_bh(&target->tx_lock);
834
835 bundle_sent = 0;
836 n_pkts_bundle = 0;
837
838 while (true) {
839 /* try to send a bundle on each pass */
840 if ((target->tx_bndl_enable) &&
841 (get_queue_depth(&txq) >=
842 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
843 int temp1 = 0, temp2 = 0;
844
Kalle Valodfa01042011-09-06 11:10:49 +0300845 ath6kl_htc_tx_bundle(endpoint, &txq,
846 &temp1, &temp2);
Kalle Valobdcd8172011-07-18 00:22:30 +0300847 bundle_sent += temp1;
848 n_pkts_bundle += temp2;
849 }
850
851 if (list_empty(&txq))
852 break;
853
854 packet = list_first_entry(&txq, struct htc_packet,
855 list);
856 list_del(&packet->list);
857
Kalle Valodfa01042011-09-06 11:10:49 +0300858 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
859 0, packet->info.tx.seqno);
860 ath6kl_htc_tx_issue(target, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +0300861 }
862
863 spin_lock_bh(&target->tx_lock);
864
865 endpoint->ep_st.tx_bundles += bundle_sent;
866 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
867 }
868
869 endpoint->tx_proc_cnt = 0;
870 spin_unlock_bh(&target->tx_lock);
871}
872
Kalle Valodfa01042011-09-06 11:10:49 +0300873static bool ath6kl_htc_tx_try(struct htc_target *target,
874 struct htc_endpoint *endpoint,
875 struct htc_packet *tx_pkt)
Kalle Valobdcd8172011-07-18 00:22:30 +0300876{
877 struct htc_ep_callbacks ep_cb;
878 int txq_depth;
879 bool overflow = false;
880
881 ep_cb = endpoint->ep_cb;
882
883 spin_lock_bh(&target->tx_lock);
884 txq_depth = get_queue_depth(&endpoint->txq);
885 spin_unlock_bh(&target->tx_lock);
886
887 if (txq_depth >= endpoint->max_txq_depth)
888 overflow = true;
889
890 if (overflow)
Kalle Valoebf29c92011-10-13 15:21:15 +0300891 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300892 "htc tx overflow ep %d depth %d max %d\n",
893 endpoint->eid, txq_depth,
Kalle Valobdcd8172011-07-18 00:22:30 +0300894 endpoint->max_txq_depth);
895
896 if (overflow && ep_cb.tx_full) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300897 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
898 HTC_SEND_FULL_DROP) {
899 endpoint->ep_st.tx_dropped += 1;
900 return false;
901 }
902 }
903
904 spin_lock_bh(&target->tx_lock);
905 list_add_tail(&tx_pkt->list, &endpoint->txq);
906 spin_unlock_bh(&target->tx_lock);
907
Kalle Valodfa01042011-09-06 11:10:49 +0300908 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300909
910 return true;
911}
912
913static void htc_chk_ep_txq(struct htc_target *target)
914{
915 struct htc_endpoint *endpoint;
916 struct htc_endpoint_credit_dist *cred_dist;
917
918 /*
919 * Run through the credit distribution list to see if there are
920 * packets queued. NOTE: no locks need to be taken since the
921 * distribution list is not dynamic (cannot be re-ordered) and we
922 * are not modifying any state.
923 */
924 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
Kalle Valoe8c39792011-10-24 12:17:04 +0300925 endpoint = cred_dist->htc_ep;
Kalle Valobdcd8172011-07-18 00:22:30 +0300926
927 spin_lock_bh(&target->tx_lock);
928 if (!list_empty(&endpoint->txq)) {
Kalle Valoebf29c92011-10-13 15:21:15 +0300929 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +0300930 "htc creds ep %d credits %d pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +0300931 cred_dist->endpoint,
932 endpoint->cred_dist.credits,
933 get_queue_depth(&endpoint->txq));
934 spin_unlock_bh(&target->tx_lock);
935 /*
936 * Try to start the stalled queue, this list is
937 * ordered by priority. If there are credits
938 * available the highest priority queue will get a
939 * chance to reclaim credits from lower priority
940 * ones.
941 */
Kalle Valodfa01042011-09-06 11:10:49 +0300942 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300943 spin_lock_bh(&target->tx_lock);
944 }
945 spin_unlock_bh(&target->tx_lock);
946 }
947}
948
949static int htc_setup_tx_complete(struct htc_target *target)
950{
951 struct htc_packet *send_pkt = NULL;
952 int status;
953
954 send_pkt = htc_get_control_buf(target, true);
955
956 if (!send_pkt)
957 return -ENOMEM;
958
959 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
960 struct htc_setup_comp_ext_msg *setup_comp_ext;
961 u32 flags = 0;
962
963 setup_comp_ext =
964 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
965 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
966 setup_comp_ext->msg_id =
967 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
968
969 if (target->msg_per_bndl_max > 0) {
970 /* Indicate HTC bundling to the target */
971 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
972 setup_comp_ext->msg_per_rxbndl =
973 target->msg_per_bndl_max;
974 }
975
976 memcpy(&setup_comp_ext->flags, &flags,
977 sizeof(setup_comp_ext->flags));
978 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
979 sizeof(struct htc_setup_comp_ext_msg),
980 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
981
982 } else {
983 struct htc_setup_comp_msg *setup_comp;
984 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
985 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
986 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
987 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
988 sizeof(struct htc_setup_comp_msg),
989 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
990 }
991
992 /* we want synchronous operation */
993 send_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +0300994 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
995 status = ath6kl_htc_tx_issue(target, send_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300996
997 if (send_pkt != NULL)
998 htc_reclaim_txctrl_buf(target, send_pkt);
999
1000 return status;
1001}
1002
Kalle Vaload226ec2011-08-10 09:49:12 +03001003void ath6kl_htc_set_credit_dist(struct htc_target *target,
Kalle Valo3c370392011-10-24 12:17:12 +03001004 struct ath6kl_htc_credit_info *credit_info,
Kalle Vaload226ec2011-08-10 09:49:12 +03001005 u16 srvc_pri_order[], int list_len)
Kalle Valobdcd8172011-07-18 00:22:30 +03001006{
1007 struct htc_endpoint *endpoint;
1008 int i, ep;
1009
Kalle Valo3c370392011-10-24 12:17:12 +03001010 target->credit_info = credit_info;
Kalle Valobdcd8172011-07-18 00:22:30 +03001011
1012 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1013 &target->cred_dist_list);
1014
1015 for (i = 0; i < list_len; i++) {
1016 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1017 endpoint = &target->endpoint[ep];
1018 if (endpoint->svc_id == srvc_pri_order[i]) {
1019 list_add_tail(&endpoint->cred_dist.list,
1020 &target->cred_dist_list);
1021 break;
1022 }
1023 }
1024 if (ep >= ENDPOINT_MAX) {
1025 WARN_ON(1);
1026 return;
1027 }
1028 }
1029}
1030
Kalle Vaload226ec2011-08-10 09:49:12 +03001031int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001032{
1033 struct htc_endpoint *endpoint;
1034 struct list_head queue;
1035
Kalle Valoebf29c92011-10-13 15:21:15 +03001036 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001037 "htc tx ep id %d buf 0x%p len %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001038 packet->endpoint, packet->buf, packet->act_len);
1039
1040 if (packet->endpoint >= ENDPOINT_MAX) {
1041 WARN_ON(1);
1042 return -EINVAL;
1043 }
1044
1045 endpoint = &target->endpoint[packet->endpoint];
1046
Kalle Valodfa01042011-09-06 11:10:49 +03001047 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001048 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1049 -ECANCELED : -ENOSPC;
1050 INIT_LIST_HEAD(&queue);
1051 list_add(&packet->list, &queue);
1052 htc_tx_complete(endpoint, &queue);
1053 }
1054
1055 return 0;
1056}
1057
1058/* flush endpoint TX queue */
Kalle Vaload226ec2011-08-10 09:49:12 +03001059void ath6kl_htc_flush_txep(struct htc_target *target,
1060 enum htc_endpoint_id eid, u16 tag)
Kalle Valobdcd8172011-07-18 00:22:30 +03001061{
1062 struct htc_packet *packet, *tmp_pkt;
1063 struct list_head discard_q, container;
1064 struct htc_endpoint *endpoint = &target->endpoint[eid];
1065
1066 if (!endpoint->svc_id) {
1067 WARN_ON(1);
1068 return;
1069 }
1070
1071 /* initialize the discard queue */
1072 INIT_LIST_HEAD(&discard_q);
1073
1074 spin_lock_bh(&target->tx_lock);
1075
1076 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1077 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1078 (tag == packet->info.tx.tag))
1079 list_move_tail(&packet->list, &discard_q);
1080 }
1081
1082 spin_unlock_bh(&target->tx_lock);
1083
1084 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1085 packet->status = -ECANCELED;
1086 list_del(&packet->list);
Kalle Valoebf29c92011-10-13 15:21:15 +03001087 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001088 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001089 packet, packet->act_len,
1090 packet->endpoint, packet->info.tx.tag);
1091
1092 INIT_LIST_HEAD(&container);
1093 list_add_tail(&packet->list, &container);
1094 htc_tx_complete(endpoint, &container);
1095 }
1096
1097}
1098
Kalle Vaload226ec2011-08-10 09:49:12 +03001099static void ath6kl_htc_flush_txep_all(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03001100{
1101 struct htc_endpoint *endpoint;
1102 int i;
1103
1104 dump_cred_dist_stats(target);
1105
1106 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1107 endpoint = &target->endpoint[i];
1108 if (endpoint->svc_id == 0)
1109 /* not in use.. */
1110 continue;
Kalle Vaload226ec2011-08-10 09:49:12 +03001111 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
Kalle Valobdcd8172011-07-18 00:22:30 +03001112 }
1113}
1114
Kalle Vaload226ec2011-08-10 09:49:12 +03001115void ath6kl_htc_indicate_activity_change(struct htc_target *target,
1116 enum htc_endpoint_id eid, bool active)
Kalle Valobdcd8172011-07-18 00:22:30 +03001117{
1118 struct htc_endpoint *endpoint = &target->endpoint[eid];
1119 bool dist = false;
1120
1121 if (endpoint->svc_id == 0) {
1122 WARN_ON(1);
1123 return;
1124 }
1125
1126 spin_lock_bh(&target->tx_lock);
1127
1128 if (active) {
1129 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1130 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1131 dist = true;
1132 }
1133 } else {
1134 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1135 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1136 dist = true;
1137 }
1138 }
1139
1140 if (dist) {
1141 endpoint->cred_dist.txq_depth =
1142 get_queue_depth(&endpoint->txq);
1143
Kalle Valo471e92f2011-10-13 15:21:37 +03001144 ath6kl_dbg(ATH6KL_DBG_HTC,
1145 "htc tx activity ctxt 0x%p dist 0x%p\n",
Kalle Valo3c370392011-10-24 12:17:12 +03001146 target->credit_info, &target->cred_dist_list);
Kalle Valobdcd8172011-07-18 00:22:30 +03001147
Kalle Valo3c370392011-10-24 12:17:12 +03001148 ath6kl_credit_distribute(target->credit_info,
Kalle Valofa99e962011-10-24 12:16:55 +03001149 &target->cred_dist_list,
1150 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
Kalle Valobdcd8172011-07-18 00:22:30 +03001151 }
1152
1153 spin_unlock_bh(&target->tx_lock);
1154
1155 if (dist && !active)
1156 htc_chk_ep_txq(target);
1157}
1158
1159/* HTC Rx */
1160
Kalle Valo689def92011-09-06 11:10:49 +03001161static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1162 int n_look_ahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001163{
1164 endpoint->ep_st.rx_pkts++;
1165 if (n_look_ahds == 1)
1166 endpoint->ep_st.rx_lkahds++;
1167 else if (n_look_ahds > 1)
1168 endpoint->ep_st.rx_bundle_lkahd++;
1169}
1170
1171static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1172 enum htc_endpoint_id eid, int len)
1173{
1174 return (eid == target->dev->ar->ctrl_ep) ?
1175 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1176}
1177
1178static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1179{
1180 struct list_head queue;
1181
1182 INIT_LIST_HEAD(&queue);
1183 list_add_tail(&packet->list, &queue);
Kalle Vaload226ec2011-08-10 09:49:12 +03001184 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +03001185}
1186
1187static void htc_reclaim_rxbuf(struct htc_target *target,
1188 struct htc_packet *packet,
1189 struct htc_endpoint *ep)
1190{
1191 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1192 htc_rxpkt_reset(packet);
1193 packet->status = -ECANCELED;
1194 ep->ep_cb.rx(ep->target, packet);
1195 } else {
1196 htc_rxpkt_reset(packet);
1197 htc_add_rxbuf((void *)(target), packet);
1198 }
1199}
1200
1201static void reclaim_rx_ctrl_buf(struct htc_target *target,
1202 struct htc_packet *packet)
1203{
1204 spin_lock_bh(&target->htc_lock);
1205 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1206 spin_unlock_bh(&target->htc_lock);
1207}
1208
Kalle Valo689def92011-09-06 11:10:49 +03001209static int ath6kl_htc_rx_packet(struct htc_target *target,
1210 struct htc_packet *packet,
1211 u32 rx_len)
Kalle Valobdcd8172011-07-18 00:22:30 +03001212{
1213 struct ath6kl_device *dev = target->dev;
1214 u32 padded_len;
1215 int status;
1216
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301217 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001218
1219 if (padded_len > packet->buf_len) {
Kalle Valo471e92f2011-10-13 15:21:37 +03001220 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001221 padded_len, rx_len, packet->buf_len);
1222 return -ENOMEM;
1223 }
1224
Kalle Valoebf29c92011-10-13 15:21:15 +03001225 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001226 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001227 packet, packet->info.rx.exp_hdr,
Kalle Valo471e92f2011-10-13 15:21:37 +03001228 padded_len, dev->ar->mbox_info.htc_addr);
Kalle Valobdcd8172011-07-18 00:22:30 +03001229
1230 status = hif_read_write_sync(dev->ar,
1231 dev->ar->mbox_info.htc_addr,
1232 packet->buf, padded_len,
1233 HIF_RD_SYNC_BLOCK_FIX);
1234
1235 packet->status = status;
1236
1237 return status;
1238}
1239
1240/*
1241 * optimization for recv packets, we can indicate a
1242 * "hint" that there are more single-packets to fetch
1243 * on this endpoint.
1244 */
Kalle Valo689def92011-09-06 11:10:49 +03001245static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1246 struct htc_endpoint *endpoint,
1247 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001248{
1249 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1250
1251 if (htc_hdr->eid == packet->endpoint) {
1252 if (!list_empty(&endpoint->rx_bufq))
1253 packet->info.rx.indicat_flags |=
1254 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1255 }
1256}
1257
Kalle Valo689def92011-09-06 11:10:49 +03001258static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03001259{
1260 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1261
1262 if (ep_cb.rx_refill_thresh > 0) {
1263 spin_lock_bh(&endpoint->target->rx_lock);
1264 if (get_queue_depth(&endpoint->rx_bufq)
1265 < ep_cb.rx_refill_thresh) {
1266 spin_unlock_bh(&endpoint->target->rx_lock);
1267 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1268 return;
1269 }
1270 spin_unlock_bh(&endpoint->target->rx_lock);
1271 }
1272}
1273
1274/* This function is called with rx_lock held */
Kalle Valo689def92011-09-06 11:10:49 +03001275static int ath6kl_htc_rx_setup(struct htc_target *target,
1276 struct htc_endpoint *ep,
1277 u32 *lk_ahds, struct list_head *queue, int n_msg)
Kalle Valobdcd8172011-07-18 00:22:30 +03001278{
1279 struct htc_packet *packet;
1280 /* FIXME: type of lk_ahds can't be right */
1281 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1282 struct htc_ep_callbacks ep_cb;
1283 int status = 0, j, full_len;
1284 bool no_recycle;
1285
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301286 full_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001287 le16_to_cpu(htc_hdr->payld_len) +
1288 sizeof(*htc_hdr));
1289
1290 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1291 ath6kl_warn("Rx buffer requested with invalid length\n");
1292 return -EINVAL;
1293 }
1294
1295 ep_cb = ep->ep_cb;
1296 for (j = 0; j < n_msg; j++) {
1297
1298 /*
1299 * Reset flag, any packets allocated using the
1300 * rx_alloc() API cannot be recycled on
1301 * cleanup,they must be explicitly returned.
1302 */
1303 no_recycle = false;
1304
1305 if (ep_cb.rx_allocthresh &&
1306 (full_len > ep_cb.rx_alloc_thresh)) {
1307 ep->ep_st.rx_alloc_thresh_hit += 1;
1308 ep->ep_st.rxalloc_thresh_byte +=
1309 le16_to_cpu(htc_hdr->payld_len);
1310
1311 spin_unlock_bh(&target->rx_lock);
1312 no_recycle = true;
1313
1314 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1315 full_len);
1316 spin_lock_bh(&target->rx_lock);
1317 } else {
1318 /* refill handler is being used */
1319 if (list_empty(&ep->rx_bufq)) {
1320 if (ep_cb.rx_refill) {
1321 spin_unlock_bh(&target->rx_lock);
1322 ep_cb.rx_refill(ep->target, ep->eid);
1323 spin_lock_bh(&target->rx_lock);
1324 }
1325 }
1326
1327 if (list_empty(&ep->rx_bufq))
1328 packet = NULL;
1329 else {
1330 packet = list_first_entry(&ep->rx_bufq,
1331 struct htc_packet, list);
1332 list_del(&packet->list);
1333 }
1334 }
1335
1336 if (!packet) {
1337 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1338 target->ep_waiting = ep->eid;
1339 return -ENOSPC;
1340 }
1341
1342 /* clear flags */
1343 packet->info.rx.rx_flags = 0;
1344 packet->info.rx.indicat_flags = 0;
1345 packet->status = 0;
1346
1347 if (no_recycle)
1348 /*
1349 * flag that these packets cannot be
1350 * recycled, they have to be returned to
1351 * the user
1352 */
1353 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1354
1355 /* Caller needs to free this upon any failure */
1356 list_add_tail(&packet->list, queue);
1357
1358 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1359 status = -ECANCELED;
1360 break;
1361 }
1362
1363 if (j) {
1364 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1365 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1366 } else
1367 /* set expected look ahead */
1368 packet->info.rx.exp_hdr = *lk_ahds;
1369
1370 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1371 HTC_HDR_LENGTH;
1372 }
1373
1374 return status;
1375}
1376
Kalle Valo689def92011-09-06 11:10:49 +03001377static int ath6kl_htc_rx_alloc(struct htc_target *target,
1378 u32 lk_ahds[], int msg,
1379 struct htc_endpoint *endpoint,
1380 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03001381{
1382 int status = 0;
1383 struct htc_packet *packet, *tmp_pkt;
1384 struct htc_frame_hdr *htc_hdr;
1385 int i, n_msg;
1386
1387 spin_lock_bh(&target->rx_lock);
1388
1389 for (i = 0; i < msg; i++) {
1390
1391 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1392
1393 if (htc_hdr->eid >= ENDPOINT_MAX) {
1394 ath6kl_err("invalid ep in look-ahead: %d\n",
1395 htc_hdr->eid);
1396 status = -ENOMEM;
1397 break;
1398 }
1399
1400 if (htc_hdr->eid != endpoint->eid) {
1401 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1402 htc_hdr->eid, endpoint->eid, i);
1403 status = -ENOMEM;
1404 break;
1405 }
1406
1407 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1408 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1409 htc_hdr->payld_len,
1410 (u32) HTC_MAX_PAYLOAD_LENGTH);
1411 status = -ENOMEM;
1412 break;
1413 }
1414
1415 if (endpoint->svc_id == 0) {
1416 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1417 status = -ENOMEM;
1418 break;
1419 }
1420
1421 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1422 /*
1423 * HTC header indicates that every packet to follow
1424 * has the same padded length so that it can be
1425 * optimally fetched as a full bundle.
1426 */
1427 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1428 HTC_FLG_RX_BNDL_CNT_S;
1429
1430 /* the count doesn't include the starter frame */
1431 n_msg++;
1432 if (n_msg > target->msg_per_bndl_max) {
1433 status = -ENOMEM;
1434 break;
1435 }
1436
1437 endpoint->ep_st.rx_bundle_from_hdr += 1;
Kalle Valoebf29c92011-10-13 15:21:15 +03001438 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001439 "htc rx bundle pkts %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001440 n_msg);
1441 } else
1442 /* HTC header only indicates 1 message to fetch */
1443 n_msg = 1;
1444
1445 /* Setup packet buffers for each message */
Kalle Valo689def92011-09-06 11:10:49 +03001446 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1447 queue, n_msg);
Kalle Valobdcd8172011-07-18 00:22:30 +03001448
1449 /*
1450 * This is due to unavailabilty of buffers to rx entire data.
1451 * Return no error so that free buffers from queue can be used
1452 * to receive partial data.
1453 */
1454 if (status == -ENOSPC) {
1455 spin_unlock_bh(&target->rx_lock);
1456 return 0;
1457 }
1458
1459 if (status)
1460 break;
1461 }
1462
1463 spin_unlock_bh(&target->rx_lock);
1464
1465 if (status) {
1466 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1467 list_del(&packet->list);
1468 htc_reclaim_rxbuf(target, packet,
1469 &target->endpoint[packet->endpoint]);
1470 }
1471 }
1472
1473 return status;
1474}
1475
1476static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1477{
1478 if (packets->endpoint != ENDPOINT_0) {
1479 WARN_ON(1);
1480 return;
1481 }
1482
1483 if (packets->status == -ECANCELED) {
1484 reclaim_rx_ctrl_buf(context, packets);
1485 return;
1486 }
1487
1488 if (packets->act_len > 0) {
1489 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1490 packets->act_len + HTC_HDR_LENGTH);
1491
Kalle Valo471e92f2011-10-13 15:21:37 +03001492 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1493 "htc rx unexpected endpoint 0 message", "",
Kalle Valoef094102011-09-27 14:30:45 +03001494 packets->buf - HTC_HDR_LENGTH,
1495 packets->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +03001496 }
1497
1498 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1499}
1500
1501static void htc_proc_cred_rpt(struct htc_target *target,
1502 struct htc_credit_report *rpt,
1503 int n_entries,
1504 enum htc_endpoint_id from_ep)
1505{
1506 struct htc_endpoint *endpoint;
1507 int tot_credits = 0, i;
1508 bool dist = false;
1509
Kalle Valobdcd8172011-07-18 00:22:30 +03001510 spin_lock_bh(&target->tx_lock);
1511
1512 for (i = 0; i < n_entries; i++, rpt++) {
1513 if (rpt->eid >= ENDPOINT_MAX) {
1514 WARN_ON(1);
1515 spin_unlock_bh(&target->tx_lock);
1516 return;
1517 }
1518
1519 endpoint = &target->endpoint[rpt->eid];
1520
Kalle Valo02f0d6f2011-10-24 12:17:59 +03001521 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1522 "credit report ep %d credits %d\n",
Kalle Valo471e92f2011-10-13 15:21:37 +03001523 rpt->eid, rpt->credits);
Kalle Valobdcd8172011-07-18 00:22:30 +03001524
1525 endpoint->ep_st.tx_cred_rpt += 1;
1526 endpoint->ep_st.cred_retnd += rpt->credits;
1527
1528 if (from_ep == rpt->eid) {
1529 /*
1530 * This credit report arrived on the same endpoint
1531 * indicating it arrived in an RX packet.
1532 */
1533 endpoint->ep_st.cred_from_rx += rpt->credits;
1534 endpoint->ep_st.cred_rpt_from_rx += 1;
1535 } else if (from_ep == ENDPOINT_0) {
1536 /* credit arrived on endpoint 0 as a NULL message */
1537 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1538 endpoint->ep_st.cred_rpt_ep0 += 1;
1539 } else {
1540 endpoint->ep_st.cred_from_other += rpt->credits;
1541 endpoint->ep_st.cred_rpt_from_other += 1;
1542 }
1543
Raja Mani5ba3ee42011-07-19 19:27:31 +05301544 if (rpt->eid == ENDPOINT_0)
Kalle Valobdcd8172011-07-18 00:22:30 +03001545 /* always give endpoint 0 credits back */
1546 endpoint->cred_dist.credits += rpt->credits;
1547 else {
1548 endpoint->cred_dist.cred_to_dist += rpt->credits;
1549 dist = true;
1550 }
1551
1552 /*
1553 * Refresh tx depth for distribution function that will
1554 * recover these credits NOTE: this is only valid when
1555 * there are credits to recover!
1556 */
1557 endpoint->cred_dist.txq_depth =
1558 get_queue_depth(&endpoint->txq);
1559
1560 tot_credits += rpt->credits;
1561 }
1562
Kalle Valobdcd8172011-07-18 00:22:30 +03001563 if (dist) {
1564 /*
1565 * This was a credit return based on a completed send
1566 * operations note, this is done with the lock held
1567 */
Kalle Valo3c370392011-10-24 12:17:12 +03001568 ath6kl_credit_distribute(target->credit_info,
Kalle Valofa99e962011-10-24 12:16:55 +03001569 &target->cred_dist_list,
1570 HTC_CREDIT_DIST_SEND_COMPLETE);
Kalle Valobdcd8172011-07-18 00:22:30 +03001571 }
1572
1573 spin_unlock_bh(&target->tx_lock);
1574
1575 if (tot_credits)
1576 htc_chk_ep_txq(target);
1577}
1578
1579static int htc_parse_trailer(struct htc_target *target,
1580 struct htc_record_hdr *record,
1581 u8 *record_buf, u32 *next_lk_ahds,
1582 enum htc_endpoint_id endpoint,
1583 int *n_lk_ahds)
1584{
1585 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1586 struct htc_lookahead_report *lk_ahd;
1587 int len;
1588
1589 switch (record->rec_id) {
1590 case HTC_RECORD_CREDITS:
1591 len = record->len / sizeof(struct htc_credit_report);
1592 if (!len) {
1593 WARN_ON(1);
1594 return -EINVAL;
1595 }
1596
1597 htc_proc_cred_rpt(target,
1598 (struct htc_credit_report *) record_buf,
1599 len, endpoint);
1600 break;
1601 case HTC_RECORD_LOOKAHEAD:
1602 len = record->len / sizeof(*lk_ahd);
1603 if (!len) {
1604 WARN_ON(1);
1605 return -EINVAL;
1606 }
1607
1608 lk_ahd = (struct htc_lookahead_report *) record_buf;
1609 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1610 && next_lk_ahds) {
1611
Kalle Valoebf29c92011-10-13 15:21:15 +03001612 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001613 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001614 lk_ahd->pre_valid, lk_ahd->post_valid);
1615
1616 /* look ahead bytes are valid, copy them over */
1617 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1618
Kalle Valo471e92f2011-10-13 15:21:37 +03001619 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1620 "htc rx next look ahead",
Kalle Valoef094102011-09-27 14:30:45 +03001621 "", next_lk_ahds, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001622
1623 *n_lk_ahds = 1;
1624 }
1625 break;
1626 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1627 len = record->len / sizeof(*bundle_lkahd_rpt);
1628 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1629 WARN_ON(1);
1630 return -EINVAL;
1631 }
1632
1633 if (next_lk_ahds) {
1634 int i;
1635
1636 bundle_lkahd_rpt =
1637 (struct htc_bundle_lkahd_rpt *) record_buf;
1638
Kalle Valo471e92f2011-10-13 15:21:37 +03001639 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001640 "", record_buf, record->len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001641
1642 for (i = 0; i < len; i++) {
1643 memcpy((u8 *)&next_lk_ahds[i],
1644 bundle_lkahd_rpt->lk_ahd, 4);
1645 bundle_lkahd_rpt++;
1646 }
1647
1648 *n_lk_ahds = i;
1649 }
1650 break;
1651 default:
1652 ath6kl_err("unhandled record: id:%d len:%d\n",
1653 record->rec_id, record->len);
1654 break;
1655 }
1656
1657 return 0;
1658
1659}
1660
1661static int htc_proc_trailer(struct htc_target *target,
1662 u8 *buf, int len, u32 *next_lk_ahds,
1663 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1664{
1665 struct htc_record_hdr *record;
1666 int orig_len;
1667 int status;
1668 u8 *record_buf;
1669 u8 *orig_buf;
1670
Kalle Valo471e92f2011-10-13 15:21:37 +03001671 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1672 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001673
1674 orig_buf = buf;
1675 orig_len = len;
1676 status = 0;
1677
1678 while (len > 0) {
1679
1680 if (len < sizeof(struct htc_record_hdr)) {
1681 status = -ENOMEM;
1682 break;
1683 }
1684 /* these are byte aligned structs */
1685 record = (struct htc_record_hdr *) buf;
1686 len -= sizeof(struct htc_record_hdr);
1687 buf += sizeof(struct htc_record_hdr);
1688
1689 if (record->len > len) {
1690 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1691 record->len, record->rec_id, len);
1692 status = -ENOMEM;
1693 break;
1694 }
1695 record_buf = buf;
1696
1697 status = htc_parse_trailer(target, record, record_buf,
1698 next_lk_ahds, endpoint, n_lk_ahds);
1699
1700 if (status)
1701 break;
1702
1703 /* advance buffer past this record for next time around */
1704 buf += record->len;
1705 len -= record->len;
1706 }
1707
Raja Mani2588f552011-07-19 19:27:30 +05301708 if (status)
Kalle Valo471e92f2011-10-13 15:21:37 +03001709 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
Kalle Valoef094102011-09-27 14:30:45 +03001710 "", orig_buf, orig_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001711
1712 return status;
1713}
1714
Kalle Valo689def92011-09-06 11:10:49 +03001715static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1716 struct htc_packet *packet,
1717 u32 *next_lkahds, int *n_lkahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001718{
1719 int status = 0;
1720 u16 payload_len;
1721 u32 lk_ahd;
1722 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1723
1724 if (n_lkahds != NULL)
1725 *n_lkahds = 0;
1726
Kalle Valobdcd8172011-07-18 00:22:30 +03001727 /*
1728 * NOTE: we cannot assume the alignment of buf, so we use the safe
1729 * macros to retrieve 16 bit fields.
1730 */
1731 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1732
1733 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1734
1735 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1736 /*
1737 * Refresh the expected header and the actual length as it
1738 * was unknown when this packet was grabbed as part of the
1739 * bundle.
1740 */
1741 packet->info.rx.exp_hdr = lk_ahd;
1742 packet->act_len = payload_len + HTC_HDR_LENGTH;
1743
1744 /* validate the actual header that was refreshed */
1745 if (packet->act_len > packet->buf_len) {
1746 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1747 payload_len, lk_ahd);
1748 /*
1749 * Limit this to max buffer just to print out some
1750 * of the buffer.
1751 */
1752 packet->act_len = min(packet->act_len, packet->buf_len);
1753 status = -ENOMEM;
1754 goto fail_rx;
1755 }
1756
1757 if (packet->endpoint != htc_hdr->eid) {
1758 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1759 htc_hdr->eid, packet->endpoint);
1760 status = -ENOMEM;
1761 goto fail_rx;
1762 }
1763 }
1764
1765 if (lk_ahd != packet->info.rx.exp_hdr) {
Kalle Valo689def92011-09-06 11:10:49 +03001766 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1767 __func__, packet, packet->info.rx.rx_flags);
Kalle Valo471e92f2011-10-13 15:21:37 +03001768 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001769 "", &packet->info.rx.exp_hdr, 4);
Kalle Valo471e92f2011-10-13 15:21:37 +03001770 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
Kalle Valoef094102011-09-27 14:30:45 +03001771 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
Kalle Valobdcd8172011-07-18 00:22:30 +03001772 status = -ENOMEM;
1773 goto fail_rx;
1774 }
1775
1776 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1777 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1778 htc_hdr->ctrl[0] > payload_len) {
Kalle Valo689def92011-09-06 11:10:49 +03001779 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1780 __func__, payload_len, htc_hdr->ctrl[0]);
Kalle Valobdcd8172011-07-18 00:22:30 +03001781 status = -ENOMEM;
1782 goto fail_rx;
1783 }
1784
1785 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1786 next_lkahds = NULL;
1787 n_lkahds = NULL;
1788 }
1789
1790 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1791 + payload_len - htc_hdr->ctrl[0],
1792 htc_hdr->ctrl[0], next_lkahds,
1793 n_lkahds, packet->endpoint);
1794
1795 if (status)
1796 goto fail_rx;
1797
1798 packet->act_len -= htc_hdr->ctrl[0];
1799 }
1800
1801 packet->buf += HTC_HDR_LENGTH;
1802 packet->act_len -= HTC_HDR_LENGTH;
1803
1804fail_rx:
1805 if (status)
Kalle Valo471e92f2011-10-13 15:21:37 +03001806 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1807 "", packet->buf, packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001808
1809 return status;
1810}
1811
Kalle Valo689def92011-09-06 11:10:49 +03001812static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1813 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001814{
Kalle Valoebf29c92011-10-13 15:21:15 +03001815 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001816 "htc rx complete ep %d packet 0x%p\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001817 endpoint->eid, packet);
1818 endpoint->ep_cb.rx(endpoint->target, packet);
1819}
1820
Kalle Valo689def92011-09-06 11:10:49 +03001821static int ath6kl_htc_rx_bundle(struct htc_target *target,
1822 struct list_head *rxq,
1823 struct list_head *sync_compq,
1824 int *n_pkt_fetched, bool part_bundle)
Kalle Valobdcd8172011-07-18 00:22:30 +03001825{
1826 struct hif_scatter_req *scat_req;
1827 struct htc_packet *packet;
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05301828 int rem_space = target->max_rx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +03001829 int n_scat_pkt, status = 0, i, len;
1830
1831 n_scat_pkt = get_queue_depth(rxq);
1832 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1833
1834 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1835 /*
1836 * We were forced to split this bundle receive operation
1837 * all packets in this partial bundle must have their
1838 * lookaheads ignored.
1839 */
1840 part_bundle = true;
1841
1842 /*
1843 * This would only happen if the target ignored our max
1844 * bundle limit.
1845 */
Kalle Valo689def92011-09-06 11:10:49 +03001846 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1847 __func__, get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001848 }
1849
1850 len = 0;
1851
Kalle Valoebf29c92011-10-13 15:21:15 +03001852 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03001853 "htc rx bundle depth %d pkts %d\n",
1854 get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001855
1856 scat_req = hif_scatter_req_get(target->dev->ar);
1857
1858 if (scat_req == NULL)
1859 goto fail_rx_pkt;
1860
Kalle Valobdcd8172011-07-18 00:22:30 +03001861 for (i = 0; i < n_scat_pkt; i++) {
1862 int pad_len;
1863
1864 packet = list_first_entry(rxq, struct htc_packet, list);
1865 list_del(&packet->list);
1866
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301867 pad_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001868 packet->act_len);
1869
1870 if ((rem_space - pad_len) < 0) {
1871 list_add(&packet->list, rxq);
1872 break;
1873 }
1874
1875 rem_space -= pad_len;
1876
1877 if (part_bundle || (i < (n_scat_pkt - 1)))
1878 /*
1879 * Packet 0..n-1 cannot be checked for look-aheads
1880 * since we are fetching a bundle the last packet
1881 * however can have it's lookahead used
1882 */
1883 packet->info.rx.rx_flags |=
1884 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1885
1886 /* NOTE: 1 HTC packet per scatter entry */
1887 scat_req->scat_list[i].buf = packet->buf;
1888 scat_req->scat_list[i].len = pad_len;
1889
1890 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1891
1892 list_add_tail(&packet->list, sync_compq);
1893
1894 WARN_ON(!scat_req->scat_list[i].len);
1895 len += scat_req->scat_list[i].len;
1896 }
1897
1898 scat_req->len = len;
1899 scat_req->scat_entries = i;
1900
Kalle Valo8e8ddb22011-10-05 12:23:33 +03001901 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
Kalle Valobdcd8172011-07-18 00:22:30 +03001902
1903 if (!status)
1904 *n_pkt_fetched = i;
1905
1906 /* free scatter request */
1907 hif_scatter_req_add(target->dev->ar, scat_req);
1908
1909fail_rx_pkt:
1910
1911 return status;
1912}
1913
Kalle Valo689def92011-09-06 11:10:49 +03001914static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1915 struct list_head *comp_pktq,
1916 u32 lk_ahds[],
1917 int *n_lk_ahd)
Kalle Valobdcd8172011-07-18 00:22:30 +03001918{
1919 struct htc_packet *packet, *tmp_pkt;
1920 struct htc_endpoint *ep;
1921 int status = 0;
1922
1923 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001924 ep = &target->endpoint[packet->endpoint];
1925
1926 /* process header for each of the recv packet */
Kalle Valo689def92011-09-06 11:10:49 +03001927 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1928 n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001929 if (status)
1930 return status;
1931
Vasanthakumar Thiagarajan4159cc92011-10-03 17:28:07 +05301932 list_del(&packet->list);
1933
Kalle Valobdcd8172011-07-18 00:22:30 +03001934 if (list_empty(comp_pktq)) {
1935 /*
1936 * Last packet's more packet flag is set
1937 * based on the lookahead.
1938 */
1939 if (*n_lk_ahd > 0)
Kalle Valo689def92011-09-06 11:10:49 +03001940 ath6kl_htc_rx_set_indicate(lk_ahds[0],
1941 ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001942 } else
1943 /*
1944 * Packets in a bundle automatically have
1945 * this flag set.
1946 */
1947 packet->info.rx.indicat_flags |=
1948 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1949
Kalle Valo689def92011-09-06 11:10:49 +03001950 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001951
1952 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1953 ep->ep_st.rx_bundl += 1;
1954
Kalle Valo689def92011-09-06 11:10:49 +03001955 ath6kl_htc_rx_complete(ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001956 }
1957
1958 return status;
1959}
1960
Kalle Valo689def92011-09-06 11:10:49 +03001961static int ath6kl_htc_rx_fetch(struct htc_target *target,
1962 struct list_head *rx_pktq,
1963 struct list_head *comp_pktq)
Kalle Valobdcd8172011-07-18 00:22:30 +03001964{
1965 int fetched_pkts;
1966 bool part_bundle = false;
1967 int status = 0;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301968 struct list_head tmp_rxq;
1969 struct htc_packet *packet, *tmp_pkt;
Kalle Valobdcd8172011-07-18 00:22:30 +03001970
1971 /* now go fetch the list of HTC packets */
1972 while (!list_empty(rx_pktq)) {
1973 fetched_pkts = 0;
1974
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301975 INIT_LIST_HEAD(&tmp_rxq);
1976
Kalle Valobdcd8172011-07-18 00:22:30 +03001977 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1978 /*
1979 * There are enough packets to attempt a
1980 * bundle transfer and recv bundling is
1981 * allowed.
1982 */
Kalle Valo689def92011-09-06 11:10:49 +03001983 status = ath6kl_htc_rx_bundle(target, rx_pktq,
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301984 &tmp_rxq,
Kalle Valo689def92011-09-06 11:10:49 +03001985 &fetched_pkts,
1986 part_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +03001987 if (status)
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301988 goto fail_rx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001989
1990 if (!list_empty(rx_pktq))
1991 part_bundle = true;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301992
1993 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001994 }
1995
1996 if (!fetched_pkts) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001997
1998 packet = list_first_entry(rx_pktq, struct htc_packet,
1999 list);
2000
Kalle Valobdcd8172011-07-18 00:22:30 +03002001 /* fully synchronous */
2002 packet->completion = NULL;
2003
Vasanthakumar Thiagarajanb8d5d5f2011-10-03 17:28:25 +05302004 if (!list_is_singular(rx_pktq))
Kalle Valobdcd8172011-07-18 00:22:30 +03002005 /*
2006 * look_aheads in all packet
2007 * except the last one in the
2008 * bundle must be ignored
2009 */
2010 packet->info.rx.rx_flags |=
2011 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2012
2013 /* go fetch the packet */
Kalle Valo689def92011-09-06 11:10:49 +03002014 status = ath6kl_htc_rx_packet(target, packet,
2015 packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03002016
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05302017 list_move_tail(&packet->list, &tmp_rxq);
2018
2019 if (status)
2020 goto fail_rx;
2021
2022 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002023 }
2024 }
2025
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05302026 return 0;
2027
2028fail_rx:
2029
2030 /*
2031 * Cleanup any packets we allocated but didn't use to
2032 * actually fetch any packets.
2033 */
2034
2035 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2036 list_del(&packet->list);
2037 htc_reclaim_rxbuf(target, packet,
2038 &target->endpoint[packet->endpoint]);
2039 }
2040
2041 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2042 list_del(&packet->list);
2043 htc_reclaim_rxbuf(target, packet,
2044 &target->endpoint[packet->endpoint]);
2045 }
2046
Kalle Valobdcd8172011-07-18 00:22:30 +03002047 return status;
2048}
2049
Kalle Vaload226ec2011-08-10 09:49:12 +03002050int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
Vasanthakumar Thiagarajan4533d902011-10-03 17:26:27 +05302051 u32 msg_look_ahead, int *num_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +03002052{
2053 struct htc_packet *packets, *tmp_pkt;
2054 struct htc_endpoint *endpoint;
2055 struct list_head rx_pktq, comp_pktq;
2056 int status = 0;
2057 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2058 int num_look_ahead = 1;
2059 enum htc_endpoint_id id;
2060 int n_fetched = 0;
2061
2062 *num_pkts = 0;
2063
2064 /*
2065 * On first entry copy the look_aheads into our temp array for
2066 * processing
2067 */
Vasanthakumar Thiagarajan4533d902011-10-03 17:26:27 +05302068 look_aheads[0] = msg_look_ahead;
Kalle Valobdcd8172011-07-18 00:22:30 +03002069
2070 while (true) {
2071
2072 /*
2073 * First lookahead sets the expected endpoint IDs for all
2074 * packets in a bundle.
2075 */
2076 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2077 endpoint = &target->endpoint[id];
2078
2079 if (id >= ENDPOINT_MAX) {
2080 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2081 id);
2082 status = -ENOMEM;
2083 break;
2084 }
2085
2086 INIT_LIST_HEAD(&rx_pktq);
2087 INIT_LIST_HEAD(&comp_pktq);
2088
2089 /*
2090 * Try to allocate as many HTC RX packets indicated by the
2091 * look_aheads.
2092 */
Kalle Valo689def92011-09-06 11:10:49 +03002093 status = ath6kl_htc_rx_alloc(target, look_aheads,
2094 num_look_ahead, endpoint,
2095 &rx_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002096 if (status)
2097 break;
2098
2099 if (get_queue_depth(&rx_pktq) >= 2)
2100 /*
2101 * A recv bundle was detected, force IRQ status
2102 * re-check again
2103 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05302104 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03002105
2106 n_fetched += get_queue_depth(&rx_pktq);
2107
2108 num_look_ahead = 0;
2109
Kalle Valo689def92011-09-06 11:10:49 +03002110 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002111
2112 if (!status)
Kalle Valo689def92011-09-06 11:10:49 +03002113 ath6kl_htc_rx_chk_water_mark(endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +03002114
2115 /* Process fetched packets */
Kalle Valo689def92011-09-06 11:10:49 +03002116 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2117 look_aheads,
2118 &num_look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03002119
2120 if (!num_look_ahead || status)
2121 break;
2122
2123 /*
2124 * For SYNCH processing, if we get here, we are running
2125 * through the loop again due to a detected lookahead. Set
2126 * flag that we should re-check IRQ status registers again
2127 * before leaving IRQ processing, this can net better
2128 * performance in high throughput situations.
2129 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05302130 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03002131 }
2132
2133 if (status) {
2134 ath6kl_err("failed to get pending recv messages: %d\n",
2135 status);
Kalle Valobdcd8172011-07-18 00:22:30 +03002136
2137 /* cleanup any packets in sync completion queue */
2138 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2139 list_del(&packets->list);
2140 htc_reclaim_rxbuf(target, packets,
2141 &target->endpoint[packets->endpoint]);
2142 }
2143
2144 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2145 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002146 ath6kl_hif_rx_control(target->dev, false);
Kalle Valobdcd8172011-07-18 00:22:30 +03002147 }
2148 }
2149
2150 /*
2151 * Before leaving, check to see if host ran out of buffers and
2152 * needs to stop the receiver.
2153 */
2154 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2155 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002156 ath6kl_hif_rx_control(target->dev, false);
Kalle Valobdcd8172011-07-18 00:22:30 +03002157 }
2158 *num_pkts = n_fetched;
2159
2160 return status;
2161}
2162
2163/*
2164 * Synchronously wait for a control message from the target,
2165 * This function is used at initialization time ONLY. At init messages
2166 * on ENDPOINT 0 are expected.
2167 */
2168static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2169{
2170 struct htc_packet *packet = NULL;
2171 struct htc_frame_hdr *htc_hdr;
2172 u32 look_ahead;
2173
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002174 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
Kalle Valobdcd8172011-07-18 00:22:30 +03002175 HTC_TARGET_RESPONSE_TIMEOUT))
2176 return NULL;
2177
Kalle Valoebf29c92011-10-13 15:21:15 +03002178 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002179 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03002180
2181 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2182
2183 if (htc_hdr->eid != ENDPOINT_0)
2184 return NULL;
2185
2186 packet = htc_get_control_buf(target, false);
2187
2188 if (!packet)
2189 return NULL;
2190
2191 packet->info.rx.rx_flags = 0;
2192 packet->info.rx.exp_hdr = look_ahead;
2193 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2194
2195 if (packet->act_len > packet->buf_len)
2196 goto fail_ctrl_rx;
2197
2198 /* we want synchronous operation */
2199 packet->completion = NULL;
2200
2201 /* get the message from the device, this will block */
Kalle Valo689def92011-09-06 11:10:49 +03002202 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
Kalle Valobdcd8172011-07-18 00:22:30 +03002203 goto fail_ctrl_rx;
2204
2205 /* process receive header */
Kalle Valo689def92011-09-06 11:10:49 +03002206 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
Kalle Valobdcd8172011-07-18 00:22:30 +03002207
2208 if (packet->status) {
Kalle Valo689def92011-09-06 11:10:49 +03002209 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002210 packet->status);
2211 goto fail_ctrl_rx;
2212 }
2213
2214 return packet;
2215
2216fail_ctrl_rx:
2217 if (packet != NULL) {
2218 htc_rxpkt_reset(packet);
2219 reclaim_rx_ctrl_buf(target, packet);
2220 }
2221
2222 return NULL;
2223}
2224
Kalle Vaload226ec2011-08-10 09:49:12 +03002225int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2226 struct list_head *pkt_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03002227{
2228 struct htc_endpoint *endpoint;
2229 struct htc_packet *first_pkt;
2230 bool rx_unblock = false;
2231 int status = 0, depth;
2232
2233 if (list_empty(pkt_queue))
2234 return -ENOMEM;
2235
2236 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2237
2238 if (first_pkt->endpoint >= ENDPOINT_MAX)
2239 return status;
2240
2241 depth = get_queue_depth(pkt_queue);
2242
Kalle Valoebf29c92011-10-13 15:21:15 +03002243 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002244 "htc rx add multiple ep id %d cnt %d len %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002245 first_pkt->endpoint, depth, first_pkt->buf_len);
2246
2247 endpoint = &target->endpoint[first_pkt->endpoint];
2248
2249 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2250 struct htc_packet *packet, *tmp_pkt;
2251
2252 /* walk through queue and mark each one canceled */
2253 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2254 packet->status = -ECANCELED;
2255 list_del(&packet->list);
Kalle Valo689def92011-09-06 11:10:49 +03002256 ath6kl_htc_rx_complete(endpoint, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03002257 }
2258
2259 return status;
2260 }
2261
2262 spin_lock_bh(&target->rx_lock);
2263
2264 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2265
2266 /* check if we are blocked waiting for a new buffer */
2267 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2268 if (target->ep_waiting == first_pkt->endpoint) {
Kalle Valoebf29c92011-10-13 15:21:15 +03002269 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002270 "htc rx blocked on ep %d, unblocking\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002271 target->ep_waiting);
2272 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2273 target->ep_waiting = ENDPOINT_MAX;
2274 rx_unblock = true;
2275 }
2276 }
2277
2278 spin_unlock_bh(&target->rx_lock);
2279
2280 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2281 /* TODO : implement a buffer threshold count? */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002282 ath6kl_hif_rx_control(target->dev, true);
Kalle Valobdcd8172011-07-18 00:22:30 +03002283
2284 return status;
2285}
2286
Kalle Vaload226ec2011-08-10 09:49:12 +03002287void ath6kl_htc_flush_rx_buf(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002288{
2289 struct htc_endpoint *endpoint;
2290 struct htc_packet *packet, *tmp_pkt;
2291 int i;
2292
2293 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2294 endpoint = &target->endpoint[i];
2295 if (!endpoint->svc_id)
2296 /* not in use.. */
2297 continue;
2298
2299 spin_lock_bh(&target->rx_lock);
2300 list_for_each_entry_safe(packet, tmp_pkt,
2301 &endpoint->rx_bufq, list) {
2302 list_del(&packet->list);
2303 spin_unlock_bh(&target->rx_lock);
Kalle Valoebf29c92011-10-13 15:21:15 +03002304 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002305 "htc rx flush pkt 0x%p len %d ep %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002306 packet, packet->buf_len,
2307 packet->endpoint);
2308 dev_kfree_skb(packet->pkt_cntxt);
2309 spin_lock_bh(&target->rx_lock);
2310 }
2311 spin_unlock_bh(&target->rx_lock);
2312 }
2313}
2314
Kalle Vaload226ec2011-08-10 09:49:12 +03002315int ath6kl_htc_conn_service(struct htc_target *target,
2316 struct htc_service_connect_req *conn_req,
2317 struct htc_service_connect_resp *conn_resp)
Kalle Valobdcd8172011-07-18 00:22:30 +03002318{
2319 struct htc_packet *rx_pkt = NULL;
2320 struct htc_packet *tx_pkt = NULL;
2321 struct htc_conn_service_resp *resp_msg;
2322 struct htc_conn_service_msg *conn_msg;
2323 struct htc_endpoint *endpoint;
2324 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2325 unsigned int max_msg_sz = 0;
2326 int status = 0;
2327
Kalle Valoebf29c92011-10-13 15:21:15 +03002328 ath6kl_dbg(ATH6KL_DBG_HTC,
Kalle Valo471e92f2011-10-13 15:21:37 +03002329 "htc connect service target 0x%p service id 0x%x\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002330 target, conn_req->svc_id);
2331
2332 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2333 /* special case for pseudo control service */
2334 assigned_ep = ENDPOINT_0;
2335 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2336 } else {
2337 /* allocate a packet to send to the target */
2338 tx_pkt = htc_get_control_buf(target, true);
2339
2340 if (!tx_pkt)
2341 return -ENOMEM;
2342
2343 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2344 memset(conn_msg, 0, sizeof(*conn_msg));
2345 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2346 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2347 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2348
2349 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2350 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2351 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2352
2353 /* we want synchronous operation */
2354 tx_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +03002355 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2356 status = ath6kl_htc_tx_issue(target, tx_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03002357
2358 if (status)
2359 goto fail_tx;
2360
2361 /* wait for response */
2362 rx_pkt = htc_wait_for_ctrl_msg(target);
2363
2364 if (!rx_pkt) {
2365 status = -ENOMEM;
2366 goto fail_tx;
2367 }
2368
2369 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2370
2371 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2372 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2373 status = -ENOMEM;
2374 goto fail_tx;
2375 }
2376
2377 conn_resp->resp_code = resp_msg->status;
2378 /* check response status */
2379 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2380 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2381 resp_msg->svc_id, resp_msg->status);
2382 status = -ENOMEM;
2383 goto fail_tx;
2384 }
2385
2386 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2387 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2388 }
2389
2390 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2391 status = -ENOMEM;
2392 goto fail_tx;
2393 }
2394
2395 endpoint = &target->endpoint[assigned_ep];
2396 endpoint->eid = assigned_ep;
2397 if (endpoint->svc_id) {
2398 status = -ENOMEM;
2399 goto fail_tx;
2400 }
2401
2402 /* return assigned endpoint to caller */
2403 conn_resp->endpoint = assigned_ep;
2404 conn_resp->len_max = max_msg_sz;
2405
2406 /* setup the endpoint */
2407
2408 /* this marks the endpoint in use */
2409 endpoint->svc_id = conn_req->svc_id;
2410
2411 endpoint->max_txq_depth = conn_req->max_txq_depth;
2412 endpoint->len_max = max_msg_sz;
2413 endpoint->ep_cb = conn_req->ep_cb;
2414 endpoint->cred_dist.svc_id = conn_req->svc_id;
Kalle Valoe8c39792011-10-24 12:17:04 +03002415 endpoint->cred_dist.htc_ep = endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +03002416 endpoint->cred_dist.endpoint = assigned_ep;
2417 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2418
2419 if (conn_req->max_rxmsg_sz) {
2420 /*
2421 * Override cred_per_msg calculation, this optimizes
2422 * the credit-low indications since the host will actually
2423 * issue smaller messages in the Send path.
2424 */
2425 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2426 status = -ENOMEM;
2427 goto fail_tx;
2428 }
2429 endpoint->cred_dist.cred_per_msg =
2430 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2431 } else
2432 endpoint->cred_dist.cred_per_msg =
2433 max_msg_sz / target->tgt_cred_sz;
2434
2435 if (!endpoint->cred_dist.cred_per_msg)
2436 endpoint->cred_dist.cred_per_msg = 1;
2437
2438 /* save local connection flags */
2439 endpoint->conn_flags = conn_req->flags;
2440
2441fail_tx:
2442 if (tx_pkt)
2443 htc_reclaim_txctrl_buf(target, tx_pkt);
2444
2445 if (rx_pkt) {
2446 htc_rxpkt_reset(rx_pkt);
2447 reclaim_rx_ctrl_buf(target, rx_pkt);
2448 }
2449
2450 return status;
2451}
2452
2453static void reset_ep_state(struct htc_target *target)
2454{
2455 struct htc_endpoint *endpoint;
2456 int i;
2457
2458 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2459 endpoint = &target->endpoint[i];
2460 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2461 endpoint->svc_id = 0;
2462 endpoint->len_max = 0;
2463 endpoint->max_txq_depth = 0;
2464 memset(&endpoint->ep_st, 0,
2465 sizeof(endpoint->ep_st));
2466 INIT_LIST_HEAD(&endpoint->rx_bufq);
2467 INIT_LIST_HEAD(&endpoint->txq);
2468 endpoint->target = target;
2469 }
2470
2471 /* reset distribution list */
Kalle Valo3c370392011-10-24 12:17:12 +03002472 /* FIXME: free existing entries */
Kalle Valobdcd8172011-07-18 00:22:30 +03002473 INIT_LIST_HEAD(&target->cred_dist_list);
2474}
2475
Kalle Vaload226ec2011-08-10 09:49:12 +03002476int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2477 enum htc_endpoint_id endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03002478{
2479 int num;
2480
2481 spin_lock_bh(&target->rx_lock);
2482 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2483 spin_unlock_bh(&target->rx_lock);
2484 return num;
2485}
2486
2487static void htc_setup_msg_bndl(struct htc_target *target)
2488{
Kalle Valobdcd8172011-07-18 00:22:30 +03002489 /* limit what HTC can handle */
2490 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2491 target->msg_per_bndl_max);
2492
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302493 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002494 target->msg_per_bndl_max = 0;
2495 return;
2496 }
2497
2498 /* limit bundle what the device layer can handle */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302499 target->msg_per_bndl_max = min(target->max_scat_entries,
Kalle Valobdcd8172011-07-18 00:22:30 +03002500 target->msg_per_bndl_max);
2501
Kalle Valo3ef987b2011-10-24 12:18:07 +03002502 ath6kl_dbg(ATH6KL_DBG_BOOT,
Kalle Valo471e92f2011-10-13 15:21:37 +03002503 "htc bundling allowed msg_per_bndl_max %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002504 target->msg_per_bndl_max);
2505
2506 /* Max rx bundle size is limited by the max tx bundle size */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302507 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
Kalle Valobdcd8172011-07-18 00:22:30 +03002508 /* Max tx bundle size if limited by the extended mbox address range */
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302509 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302510 target->max_xfer_szper_scatreq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002511
Kalle Valo3ef987b2011-10-24 12:18:07 +03002512 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302513 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
Kalle Valobdcd8172011-07-18 00:22:30 +03002514
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302515 if (target->max_tx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002516 target->tx_bndl_enable = true;
2517
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302518 if (target->max_rx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002519 target->rx_bndl_enable = true;
2520
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05302521 if ((target->tgt_cred_sz % target->block_sz) != 0) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002522 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2523 target->tgt_cred_sz);
2524
2525 /*
2526 * Disallow send bundling since the credit size is
2527 * not aligned to a block size the I/O block
2528 * padding will spill into the next credit buffer
2529 * which is fatal.
2530 */
2531 target->tx_bndl_enable = false;
2532 }
2533}
2534
Kalle Vaload226ec2011-08-10 09:49:12 +03002535int ath6kl_htc_wait_target(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002536{
2537 struct htc_packet *packet = NULL;
2538 struct htc_ready_ext_msg *rdy_msg;
2539 struct htc_service_connect_req connect;
2540 struct htc_service_connect_resp resp;
2541 int status;
2542
2543 /* we should be getting 1 control message that the target is ready */
2544 packet = htc_wait_for_ctrl_msg(target);
2545
2546 if (!packet)
2547 return -ENOMEM;
2548
2549 /* we controlled the buffer creation so it's properly aligned */
2550 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2551
2552 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2553 (packet->act_len < sizeof(struct htc_ready_msg))) {
2554 status = -ENOMEM;
2555 goto fail_wait_target;
2556 }
2557
2558 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2559 status = -ENOMEM;
2560 goto fail_wait_target;
2561 }
2562
2563 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2564 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2565
Kalle Valo3ef987b2011-10-24 12:18:07 +03002566 ath6kl_dbg(ATH6KL_DBG_BOOT,
Kalle Valo471e92f2011-10-13 15:21:37 +03002567 "htc target ready credits %d size %d\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002568 target->tgt_creds, target->tgt_cred_sz);
2569
2570 /* check if this is an extended ready message */
2571 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2572 /* this is an extended message */
2573 target->htc_tgt_ver = rdy_msg->htc_ver;
2574 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2575 } else {
2576 /* legacy */
2577 target->htc_tgt_ver = HTC_VERSION_2P0;
2578 target->msg_per_bndl_max = 0;
2579 }
2580
Kalle Valo3ef987b2011-10-24 12:18:07 +03002581 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03002582 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2583 target->htc_tgt_ver);
2584
2585 if (target->msg_per_bndl_max > 0)
2586 htc_setup_msg_bndl(target);
2587
2588 /* setup our pseudo HTC control endpoint connection */
2589 memset(&connect, 0, sizeof(connect));
2590 memset(&resp, 0, sizeof(resp));
2591 connect.ep_cb.rx = htc_ctrl_rx;
2592 connect.ep_cb.rx_refill = NULL;
2593 connect.ep_cb.tx_full = NULL;
2594 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2595 connect.svc_id = HTC_CTRL_RSVD_SVC;
2596
2597 /* connect fake service */
Kalle Vaload226ec2011-08-10 09:49:12 +03002598 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
Kalle Valobdcd8172011-07-18 00:22:30 +03002599
2600 if (status)
Kalle Valo4e3d54c2011-10-27 18:48:22 +03002601 /*
2602 * FIXME: this call doesn't make sense, the caller should
2603 * call ath6kl_htc_cleanup() when it wants remove htc
2604 */
Kalle Valobdcd8172011-07-18 00:22:30 +03002605 ath6kl_hif_cleanup_scatter(target->dev->ar);
2606
2607fail_wait_target:
2608 if (packet) {
2609 htc_rxpkt_reset(packet);
2610 reclaim_rx_ctrl_buf(target, packet);
2611 }
2612
2613 return status;
2614}
2615
2616/*
2617 * Start HTC, enable interrupts and let the target know
2618 * host has finished setup.
2619 */
Kalle Vaload226ec2011-08-10 09:49:12 +03002620int ath6kl_htc_start(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002621{
2622 struct htc_packet *packet;
2623 int status;
2624
2625 /* Disable interrupts at the chip level */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002626 ath6kl_hif_disable_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002627
2628 target->htc_flags = 0;
2629 target->rx_st_flags = 0;
2630
2631 /* Push control receive buffers into htc control endpoint */
2632 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2633 status = htc_add_rxbuf(target, packet);
2634 if (status)
2635 return status;
2636 }
2637
2638 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
Kalle Valo3c370392011-10-24 12:17:12 +03002639 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
Kalle Valofa99e962011-10-24 12:16:55 +03002640 target->tgt_creds);
Kalle Valobdcd8172011-07-18 00:22:30 +03002641
2642 dump_cred_dist_stats(target);
2643
2644 /* Indicate to the target of the setup completion */
2645 status = htc_setup_tx_complete(target);
2646
2647 if (status)
2648 return status;
2649
2650 /* unmask interrupts */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002651 status = ath6kl_hif_unmask_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002652
2653 if (status)
Kalle Vaload226ec2011-08-10 09:49:12 +03002654 ath6kl_htc_stop(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002655
2656 return status;
2657}
2658
Kalle Valo8a8109162011-10-27 18:49:00 +03002659static int ath6kl_htc_reset(struct htc_target *target)
2660{
2661 u32 block_size, ctrl_bufsz;
2662 struct htc_packet *packet;
2663 int i;
2664
2665 reset_ep_state(target);
2666
2667 block_size = target->dev->ar->mbox_info.block_size;
2668
2669 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2670 (block_size + HTC_HDR_LENGTH) :
2671 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2672
2673 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2674 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2675 if (!packet)
2676 return -ENOMEM;
2677
2678 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2679 if (!packet->buf_start) {
2680 kfree(packet);
2681 return -ENOMEM;
2682 }
2683
2684 packet->buf_len = ctrl_bufsz;
2685 if (i < NUM_CONTROL_RX_BUFFERS) {
2686 packet->act_len = 0;
2687 packet->buf = packet->buf_start;
2688 packet->endpoint = ENDPOINT_0;
2689 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2690 } else
2691 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2692 }
2693
2694 return 0;
2695}
2696
Kalle Valobdcd8172011-07-18 00:22:30 +03002697/* htc_stop: stop interrupt reception, and flush all queued buffers */
Kalle Vaload226ec2011-08-10 09:49:12 +03002698void ath6kl_htc_stop(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002699{
2700 spin_lock_bh(&target->htc_lock);
2701 target->htc_flags |= HTC_OP_STATE_STOPPING;
2702 spin_unlock_bh(&target->htc_lock);
2703
2704 /*
2705 * Masking interrupts is a synchronous operation, when this
2706 * function returns all pending HIF I/O has completed, we can
2707 * safely flush the queues.
2708 */
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002709 ath6kl_hif_mask_intrs(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002710
Kalle Vaload226ec2011-08-10 09:49:12 +03002711 ath6kl_htc_flush_txep_all(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002712
Kalle Vaload226ec2011-08-10 09:49:12 +03002713 ath6kl_htc_flush_rx_buf(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002714
Kalle Valo8a8109162011-10-27 18:49:00 +03002715 ath6kl_htc_reset(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002716}
2717
Kalle Vaload226ec2011-08-10 09:49:12 +03002718void *ath6kl_htc_create(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +03002719{
2720 struct htc_target *target = NULL;
Kalle Valo8a8109162011-10-27 18:49:00 +03002721 int status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +03002722
2723 target = kzalloc(sizeof(*target), GFP_KERNEL);
2724 if (!target) {
2725 ath6kl_err("unable to allocate memory\n");
2726 return NULL;
2727 }
2728
2729 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2730 if (!target->dev) {
2731 ath6kl_err("unable to allocate memory\n");
2732 status = -ENOMEM;
Kalle Valo8a8109162011-10-27 18:49:00 +03002733 goto err_htc_cleanup;
Kalle Valobdcd8172011-07-18 00:22:30 +03002734 }
2735
2736 spin_lock_init(&target->htc_lock);
2737 spin_lock_init(&target->rx_lock);
2738 spin_lock_init(&target->tx_lock);
2739
2740 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2741 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2742 INIT_LIST_HEAD(&target->cred_dist_list);
2743
2744 target->dev->ar = ar;
2745 target->dev->htc_cnxt = target;
Kalle Valobdcd8172011-07-18 00:22:30 +03002746 target->ep_waiting = ENDPOINT_MAX;
2747
Kalle Valo8e8ddb22011-10-05 12:23:33 +03002748 status = ath6kl_hif_setup(target->dev);
Kalle Valobdcd8172011-07-18 00:22:30 +03002749 if (status)
Kalle Valo8a8109162011-10-27 18:49:00 +03002750 goto err_htc_cleanup;
Kalle Valobdcd8172011-07-18 00:22:30 +03002751
Kalle Valo8a8109162011-10-27 18:49:00 +03002752 status = ath6kl_htc_reset(target);
2753 if (status)
2754 goto err_htc_cleanup;
Kalle Valobdcd8172011-07-18 00:22:30 +03002755
2756 return target;
Kalle Valo8a8109162011-10-27 18:49:00 +03002757
2758err_htc_cleanup:
2759 ath6kl_htc_cleanup(target);
2760
2761 return NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +03002762}
2763
2764/* cleanup the HTC instance */
Kalle Vaload226ec2011-08-10 09:49:12 +03002765void ath6kl_htc_cleanup(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002766{
2767 struct htc_packet *packet, *tmp_packet;
2768
2769 ath6kl_hif_cleanup_scatter(target->dev->ar);
2770
2771 list_for_each_entry_safe(packet, tmp_packet,
2772 &target->free_ctrl_txbuf, list) {
2773 list_del(&packet->list);
2774 kfree(packet->buf_start);
2775 kfree(packet);
2776 }
2777
2778 list_for_each_entry_safe(packet, tmp_packet,
2779 &target->free_ctrl_rxbuf, list) {
2780 list_del(&packet->list);
2781 kfree(packet->buf_start);
2782 kfree(packet);
2783 }
2784
2785 kfree(target->dev);
2786 kfree(target);
2787}