blob: 46c6efbcd31b6169857cd6bcf9e61249f661b14a [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "htc_hif.h"
19#include "debug.h"
20#include "hif-ops.h"
21#include <asm/unaligned.h>
22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
Kalle Valodfa01042011-09-06 11:10:49 +030025static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +053026{
27 u8 *align_addr;
28
29 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
30 align_addr = PTR_ALIGN(*buf - 4, 4);
31 memmove(align_addr, *buf, len);
32 *buf = align_addr;
33 }
34}
35
Kalle Valodfa01042011-09-06 11:10:49 +030036static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
37 int ctrl0, int ctrl1)
Kalle Valobdcd8172011-07-18 00:22:30 +030038{
39 struct htc_frame_hdr *hdr;
40
41 packet->buf -= HTC_HDR_LENGTH;
42 hdr = (struct htc_frame_hdr *)packet->buf;
43
44 /* Endianess? */
45 put_unaligned((u16)packet->act_len, &hdr->payld_len);
46 hdr->flags = flags;
47 hdr->eid = packet->endpoint;
48 hdr->ctrl[0] = ctrl0;
49 hdr->ctrl[1] = ctrl1;
50}
51
52static void htc_reclaim_txctrl_buf(struct htc_target *target,
53 struct htc_packet *pkt)
54{
55 spin_lock_bh(&target->htc_lock);
56 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
57 spin_unlock_bh(&target->htc_lock);
58}
59
60static struct htc_packet *htc_get_control_buf(struct htc_target *target,
61 bool tx)
62{
63 struct htc_packet *packet = NULL;
64 struct list_head *buf_list;
65
66 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
67
68 spin_lock_bh(&target->htc_lock);
69
70 if (list_empty(buf_list)) {
71 spin_unlock_bh(&target->htc_lock);
72 return NULL;
73 }
74
75 packet = list_first_entry(buf_list, struct htc_packet, list);
76 list_del(&packet->list);
77 spin_unlock_bh(&target->htc_lock);
78
79 if (tx)
80 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
81
82 return packet;
83}
84
85static void htc_tx_comp_update(struct htc_target *target,
86 struct htc_endpoint *endpoint,
87 struct htc_packet *packet)
88{
89 packet->completion = NULL;
90 packet->buf += HTC_HDR_LENGTH;
91
92 if (!packet->status)
93 return;
94
95 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
96 packet->status, packet->endpoint, packet->act_len,
97 packet->info.tx.cred_used);
98
99 /* on failure to submit, reclaim credits for this packet */
100 spin_lock_bh(&target->tx_lock);
101 endpoint->cred_dist.cred_to_dist +=
102 packet->info.tx.cred_used;
103 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
104
105 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
106 target->cred_dist_cntxt, &target->cred_dist_list);
107
108 ath6k_credit_distribute(target->cred_dist_cntxt,
109 &target->cred_dist_list,
110 HTC_CREDIT_DIST_SEND_COMPLETE);
111
112 spin_unlock_bh(&target->tx_lock);
113}
114
115static void htc_tx_complete(struct htc_endpoint *endpoint,
116 struct list_head *txq)
117{
118 if (list_empty(txq))
119 return;
120
121 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
122 "send complete ep %d, (%d pkts)\n",
123 endpoint->eid, get_queue_depth(txq));
124
125 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
126}
127
128static void htc_tx_comp_handler(struct htc_target *target,
129 struct htc_packet *packet)
130{
131 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
132 struct list_head container;
133
134 htc_tx_comp_update(target, endpoint, packet);
135 INIT_LIST_HEAD(&container);
136 list_add_tail(&packet->list, &container);
137 /* do completion */
138 htc_tx_complete(endpoint, &container);
139}
140
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530141static void htc_async_tx_scat_complete(struct htc_target *target,
142 struct hif_scatter_req *scat_req)
Kalle Valobdcd8172011-07-18 00:22:30 +0300143{
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530144 struct htc_endpoint *endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +0300145 struct htc_packet *packet;
146 struct list_head tx_compq;
147 int i;
148
149 INIT_LIST_HEAD(&tx_compq);
150
151 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
152 "htc_async_tx_scat_complete total len: %d entries: %d\n",
153 scat_req->len, scat_req->scat_entries);
154
155 if (scat_req->status)
156 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
157
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530158 packet = scat_req->scat_list[0].packet;
159 endpoint = &target->endpoint[packet->endpoint];
160
Kalle Valobdcd8172011-07-18 00:22:30 +0300161 /* walk through the scatter list and process */
162 for (i = 0; i < scat_req->scat_entries; i++) {
163 packet = scat_req->scat_list[i].packet;
164 if (!packet) {
165 WARN_ON(1);
166 return;
167 }
168
169 packet->status = scat_req->status;
170 htc_tx_comp_update(target, endpoint, packet);
171 list_add_tail(&packet->list, &tx_compq);
172 }
173
174 /* free scatter request */
175 hif_scatter_req_add(target->dev->ar, scat_req);
176
177 /* complete all packets */
178 htc_tx_complete(endpoint, &tx_compq);
179}
180
Kalle Valodfa01042011-09-06 11:10:49 +0300181static int ath6kl_htc_tx_issue(struct htc_target *target,
182 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300183{
184 int status;
185 bool sync = false;
186 u32 padded_len, send_len;
187
188 if (!packet->completion)
189 sync = true;
190
191 send_len = packet->act_len + HTC_HDR_LENGTH;
192
193 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
194 __func__, send_len, sync ? "sync" : "async");
195
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530196 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300197
198 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
199 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
200 padded_len,
201 target->dev->ar->mbox_info.htc_addr,
202 sync ? "sync" : "async");
203
204 if (sync) {
205 status = hif_read_write_sync(target->dev->ar,
206 target->dev->ar->mbox_info.htc_addr,
207 packet->buf, padded_len,
208 HIF_WR_SYNC_BLOCK_INC);
209
210 packet->status = status;
Kalle Valo65d2bb12011-08-14 18:10:03 -0700211 packet->buf += HTC_HDR_LENGTH;
Kalle Valobdcd8172011-07-18 00:22:30 +0300212 } else
213 status = hif_write_async(target->dev->ar,
214 target->dev->ar->mbox_info.htc_addr,
215 packet->buf, padded_len,
216 HIF_WR_ASYNC_BLOCK_INC, packet);
217
218 return status;
219}
220
221static int htc_check_credits(struct htc_target *target,
222 struct htc_endpoint *ep, u8 *flags,
223 enum htc_endpoint_id eid, unsigned int len,
224 int *req_cred)
225{
226
227 *req_cred = (len > target->tgt_cred_sz) ?
228 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
229
230 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
231 *req_cred, ep->cred_dist.credits);
232
233 if (ep->cred_dist.credits < *req_cred) {
234 if (eid == ENDPOINT_0)
235 return -EINVAL;
236
237 /* Seek more credits */
238 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
239
240 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
241 target->cred_dist_cntxt, &ep->cred_dist);
242
243 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
244
245 ep->cred_dist.seek_cred = 0;
246
247 if (ep->cred_dist.credits < *req_cred) {
248 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
249 "not enough credits for ep %d - leaving packet in queue\n",
250 eid);
251 return -EINVAL;
252 }
253 }
254
255 ep->cred_dist.credits -= *req_cred;
256 ep->ep_st.cred_cosumd += *req_cred;
257
258 /* When we are getting low on credits, ask for more */
259 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
260 ep->cred_dist.seek_cred =
261 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
262
263 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
264 target->cred_dist_cntxt, &ep->cred_dist);
265
266 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
267
268 /* see if we were successful in getting more */
269 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
270 /* tell the target we need credits ASAP! */
271 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
272 ep->ep_st.cred_low_indicate += 1;
273 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
274 }
275 }
276
277 return 0;
278}
279
Kalle Valodfa01042011-09-06 11:10:49 +0300280static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
281 struct htc_endpoint *endpoint,
282 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300283{
284 int req_cred;
285 u8 flags;
286 struct htc_packet *packet;
287 unsigned int len;
288
289 while (true) {
290
291 flags = 0;
292
293 if (list_empty(&endpoint->txq))
294 break;
295 packet = list_first_entry(&endpoint->txq, struct htc_packet,
296 list);
297
298 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
299 "got head pkt:0x%p , queue depth: %d\n",
300 packet, get_queue_depth(&endpoint->txq));
301
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530302 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300303 packet->act_len + HTC_HDR_LENGTH);
304
305 if (htc_check_credits(target, endpoint, &flags,
306 packet->endpoint, len, &req_cred))
307 break;
308
309 /* now we can fully move onto caller's queue */
310 packet = list_first_entry(&endpoint->txq, struct htc_packet,
311 list);
312 list_move_tail(&packet->list, queue);
313
314 /* save the number of credits this packet consumed */
315 packet->info.tx.cred_used = req_cred;
316
317 /* all TX packets are handled asynchronously */
318 packet->completion = htc_tx_comp_handler;
319 packet->context = target;
320 endpoint->ep_st.tx_issued += 1;
321
322 /* save send flags */
323 packet->info.tx.flags = flags;
324 packet->info.tx.seqno = endpoint->seqno;
325 endpoint->seqno++;
326 }
327}
328
329/* See if the padded tx length falls on a credit boundary */
330static int htc_get_credit_padding(unsigned int cred_sz, int *len,
331 struct htc_endpoint *ep)
332{
333 int rem_cred, cred_pad;
334
335 rem_cred = *len % cred_sz;
336
337 /* No padding needed */
338 if (!rem_cred)
339 return 0;
340
341 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
342 return -1;
343
344 /*
345 * The transfer consumes a "partial" credit, this
346 * packet cannot be bundled unless we add
347 * additional "dummy" padding (max 255 bytes) to
348 * consume the entire credit.
349 */
350 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
351
352 if ((cred_pad > 0) && (cred_pad <= 255))
353 *len += cred_pad;
354 else
355 /* The amount of padding is too large, send as non-bundled */
356 return -1;
357
358 return cred_pad;
359}
360
Kalle Valodfa01042011-09-06 11:10:49 +0300361static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
362 struct htc_endpoint *endpoint,
363 struct hif_scatter_req *scat_req,
364 int n_scat,
365 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300366{
367 struct htc_packet *packet;
368 int i, len, rem_scat, cred_pad;
369 int status = 0;
370
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +0530371 rem_scat = target->max_tx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +0300372
373 for (i = 0; i < n_scat; i++) {
374 scat_req->scat_list[i].packet = NULL;
375
376 if (list_empty(queue))
377 break;
378
379 packet = list_first_entry(queue, struct htc_packet, list);
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530380 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300381 packet->act_len + HTC_HDR_LENGTH);
382
383 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
384 &len, endpoint);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530385 if (cred_pad < 0 || rem_scat < len) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300386 status = -ENOSPC;
387 break;
388 }
389
390 rem_scat -= len;
391 /* now remove it from the queue */
392 packet = list_first_entry(queue, struct htc_packet, list);
393 list_del(&packet->list);
394
395 scat_req->scat_list[i].packet = packet;
396 /* prepare packet and flag message as part of a send bundle */
Kalle Valodfa01042011-09-06 11:10:49 +0300397 ath6kl_htc_tx_prep_pkt(packet,
Kalle Valobdcd8172011-07-18 00:22:30 +0300398 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
399 cred_pad, packet->info.tx.seqno);
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530400 /* Make sure the buffer is 4-byte aligned */
Kalle Valodfa01042011-09-06 11:10:49 +0300401 ath6kl_htc_tx_buf_align(&packet->buf,
402 packet->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +0300403 scat_req->scat_list[i].buf = packet->buf;
404 scat_req->scat_list[i].len = len;
405
406 scat_req->len += len;
407 scat_req->scat_entries++;
408 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
409 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
410 i, packet, len, rem_scat);
411 }
412
413 /* Roll back scatter setup in case of any failure */
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530414 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300415 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
416 packet = scat_req->scat_list[i].packet;
417 if (packet) {
418 packet->buf += HTC_HDR_LENGTH;
419 list_add(&packet->list, queue);
420 }
421 }
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530422 return -EAGAIN;
Kalle Valobdcd8172011-07-18 00:22:30 +0300423 }
424
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530425 return status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300426}
427
428/*
Kalle Valodfa01042011-09-06 11:10:49 +0300429 * Drain a queue and send as bundles this function may return without fully
430 * draining the queue when
Kalle Valobdcd8172011-07-18 00:22:30 +0300431 *
432 * 1. scatter resources are exhausted
433 * 2. a message that will consume a partial credit will stop the
434 * bundling process early
435 * 3. we drop below the minimum number of messages for a bundle
436 */
Kalle Valodfa01042011-09-06 11:10:49 +0300437static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
438 struct list_head *queue,
439 int *sent_bundle, int *n_bundle_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +0300440{
441 struct htc_target *target = endpoint->target;
442 struct hif_scatter_req *scat_req = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300443 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530444 int status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300445
Kalle Valobdcd8172011-07-18 00:22:30 +0300446 while (true) {
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530447 status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300448 n_scat = get_queue_depth(queue);
449 n_scat = min(n_scat, target->msg_per_bndl_max);
450
451 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
452 /* not enough to bundle */
453 break;
454
455 scat_req = hif_scatter_req_get(target->dev->ar);
456
457 if (!scat_req) {
458 /* no scatter resources */
459 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
460 "no more scatter resources\n");
461 break;
462 }
463
464 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
465 n_scat);
466
467 scat_req->len = 0;
468 scat_req->scat_entries = 0;
469
Kalle Valodfa01042011-09-06 11:10:49 +0300470 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
471 scat_req, n_scat,
472 queue);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530473 if (status == -EAGAIN) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300474 hif_scatter_req_add(target->dev->ar, scat_req);
475 break;
476 }
477
478 /* send path is always asynchronous */
479 scat_req->complete = htc_async_tx_scat_complete;
Kalle Valobdcd8172011-07-18 00:22:30 +0300480 n_sent_bundle++;
481 tot_pkts_bundle += scat_req->scat_entries;
482
483 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
484 "send scatter total bytes: %d , entries: %d\n",
485 scat_req->len, scat_req->scat_entries);
486 ath6kldev_submit_scat_req(target->dev, scat_req, false);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530487
488 if (status)
489 break;
Kalle Valobdcd8172011-07-18 00:22:30 +0300490 }
491
492 *sent_bundle = n_sent_bundle;
493 *n_bundle_pkts = tot_pkts_bundle;
Kalle Valodfa01042011-09-06 11:10:49 +0300494 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
495 __func__, n_sent_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +0300496
497 return;
498}
499
Kalle Valodfa01042011-09-06 11:10:49 +0300500static void ath6kl_htc_tx_from_queue(struct htc_target *target,
501 struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +0300502{
503 struct list_head txq;
504 struct htc_packet *packet;
505 int bundle_sent;
506 int n_pkts_bundle;
507
508 spin_lock_bh(&target->tx_lock);
509
510 endpoint->tx_proc_cnt++;
511 if (endpoint->tx_proc_cnt > 1) {
512 endpoint->tx_proc_cnt--;
513 spin_unlock_bh(&target->tx_lock);
514 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
515 return;
516 }
517
518 /*
519 * drain the endpoint TX queue for transmission as long
520 * as we have enough credits.
521 */
522 INIT_LIST_HEAD(&txq);
523
524 while (true) {
525
526 if (list_empty(&endpoint->txq))
527 break;
528
Kalle Valodfa01042011-09-06 11:10:49 +0300529 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300530
531 if (list_empty(&txq))
532 break;
533
534 spin_unlock_bh(&target->tx_lock);
535
536 bundle_sent = 0;
537 n_pkts_bundle = 0;
538
539 while (true) {
540 /* try to send a bundle on each pass */
541 if ((target->tx_bndl_enable) &&
542 (get_queue_depth(&txq) >=
543 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
544 int temp1 = 0, temp2 = 0;
545
Kalle Valodfa01042011-09-06 11:10:49 +0300546 ath6kl_htc_tx_bundle(endpoint, &txq,
547 &temp1, &temp2);
Kalle Valobdcd8172011-07-18 00:22:30 +0300548 bundle_sent += temp1;
549 n_pkts_bundle += temp2;
550 }
551
552 if (list_empty(&txq))
553 break;
554
555 packet = list_first_entry(&txq, struct htc_packet,
556 list);
557 list_del(&packet->list);
558
Kalle Valodfa01042011-09-06 11:10:49 +0300559 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
560 0, packet->info.tx.seqno);
561 ath6kl_htc_tx_issue(target, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +0300562 }
563
564 spin_lock_bh(&target->tx_lock);
565
566 endpoint->ep_st.tx_bundles += bundle_sent;
567 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
568 }
569
570 endpoint->tx_proc_cnt = 0;
571 spin_unlock_bh(&target->tx_lock);
572}
573
Kalle Valodfa01042011-09-06 11:10:49 +0300574static bool ath6kl_htc_tx_try(struct htc_target *target,
575 struct htc_endpoint *endpoint,
576 struct htc_packet *tx_pkt)
Kalle Valobdcd8172011-07-18 00:22:30 +0300577{
578 struct htc_ep_callbacks ep_cb;
579 int txq_depth;
580 bool overflow = false;
581
582 ep_cb = endpoint->ep_cb;
583
584 spin_lock_bh(&target->tx_lock);
585 txq_depth = get_queue_depth(&endpoint->txq);
586 spin_unlock_bh(&target->tx_lock);
587
588 if (txq_depth >= endpoint->max_txq_depth)
589 overflow = true;
590
591 if (overflow)
592 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
593 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
594 endpoint->eid, overflow, txq_depth,
595 endpoint->max_txq_depth);
596
597 if (overflow && ep_cb.tx_full) {
598 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
599 "indicating overflowed tx packet: 0x%p\n", tx_pkt);
600
601 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
602 HTC_SEND_FULL_DROP) {
603 endpoint->ep_st.tx_dropped += 1;
604 return false;
605 }
606 }
607
608 spin_lock_bh(&target->tx_lock);
609 list_add_tail(&tx_pkt->list, &endpoint->txq);
610 spin_unlock_bh(&target->tx_lock);
611
Kalle Valodfa01042011-09-06 11:10:49 +0300612 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300613
614 return true;
615}
616
617static void htc_chk_ep_txq(struct htc_target *target)
618{
619 struct htc_endpoint *endpoint;
620 struct htc_endpoint_credit_dist *cred_dist;
621
622 /*
623 * Run through the credit distribution list to see if there are
624 * packets queued. NOTE: no locks need to be taken since the
625 * distribution list is not dynamic (cannot be re-ordered) and we
626 * are not modifying any state.
627 */
628 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
629 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
630
631 spin_lock_bh(&target->tx_lock);
632 if (!list_empty(&endpoint->txq)) {
633 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
634 "ep %d has %d credits and %d packets in tx queue\n",
635 cred_dist->endpoint,
636 endpoint->cred_dist.credits,
637 get_queue_depth(&endpoint->txq));
638 spin_unlock_bh(&target->tx_lock);
639 /*
640 * Try to start the stalled queue, this list is
641 * ordered by priority. If there are credits
642 * available the highest priority queue will get a
643 * chance to reclaim credits from lower priority
644 * ones.
645 */
Kalle Valodfa01042011-09-06 11:10:49 +0300646 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300647 spin_lock_bh(&target->tx_lock);
648 }
649 spin_unlock_bh(&target->tx_lock);
650 }
651}
652
653static int htc_setup_tx_complete(struct htc_target *target)
654{
655 struct htc_packet *send_pkt = NULL;
656 int status;
657
658 send_pkt = htc_get_control_buf(target, true);
659
660 if (!send_pkt)
661 return -ENOMEM;
662
663 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
664 struct htc_setup_comp_ext_msg *setup_comp_ext;
665 u32 flags = 0;
666
667 setup_comp_ext =
668 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
669 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
670 setup_comp_ext->msg_id =
671 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
672
673 if (target->msg_per_bndl_max > 0) {
674 /* Indicate HTC bundling to the target */
675 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
676 setup_comp_ext->msg_per_rxbndl =
677 target->msg_per_bndl_max;
678 }
679
680 memcpy(&setup_comp_ext->flags, &flags,
681 sizeof(setup_comp_ext->flags));
682 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
683 sizeof(struct htc_setup_comp_ext_msg),
684 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
685
686 } else {
687 struct htc_setup_comp_msg *setup_comp;
688 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
689 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
690 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
691 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
692 sizeof(struct htc_setup_comp_msg),
693 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
694 }
695
696 /* we want synchronous operation */
697 send_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +0300698 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
699 status = ath6kl_htc_tx_issue(target, send_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300700
701 if (send_pkt != NULL)
702 htc_reclaim_txctrl_buf(target, send_pkt);
703
704 return status;
705}
706
Kalle Vaload226ec2011-08-10 09:49:12 +0300707void ath6kl_htc_set_credit_dist(struct htc_target *target,
708 struct htc_credit_state_info *cred_dist_cntxt,
709 u16 srvc_pri_order[], int list_len)
Kalle Valobdcd8172011-07-18 00:22:30 +0300710{
711 struct htc_endpoint *endpoint;
712 int i, ep;
713
714 target->cred_dist_cntxt = cred_dist_cntxt;
715
716 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
717 &target->cred_dist_list);
718
719 for (i = 0; i < list_len; i++) {
720 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
721 endpoint = &target->endpoint[ep];
722 if (endpoint->svc_id == srvc_pri_order[i]) {
723 list_add_tail(&endpoint->cred_dist.list,
724 &target->cred_dist_list);
725 break;
726 }
727 }
728 if (ep >= ENDPOINT_MAX) {
729 WARN_ON(1);
730 return;
731 }
732 }
733}
734
Kalle Vaload226ec2011-08-10 09:49:12 +0300735int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300736{
737 struct htc_endpoint *endpoint;
738 struct list_head queue;
739
740 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
741 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
742 packet->endpoint, packet->buf, packet->act_len);
743
744 if (packet->endpoint >= ENDPOINT_MAX) {
745 WARN_ON(1);
746 return -EINVAL;
747 }
748
749 endpoint = &target->endpoint[packet->endpoint];
750
Kalle Valodfa01042011-09-06 11:10:49 +0300751 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300752 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
753 -ECANCELED : -ENOSPC;
754 INIT_LIST_HEAD(&queue);
755 list_add(&packet->list, &queue);
756 htc_tx_complete(endpoint, &queue);
757 }
758
759 return 0;
760}
761
762/* flush endpoint TX queue */
Kalle Vaload226ec2011-08-10 09:49:12 +0300763void ath6kl_htc_flush_txep(struct htc_target *target,
764 enum htc_endpoint_id eid, u16 tag)
Kalle Valobdcd8172011-07-18 00:22:30 +0300765{
766 struct htc_packet *packet, *tmp_pkt;
767 struct list_head discard_q, container;
768 struct htc_endpoint *endpoint = &target->endpoint[eid];
769
770 if (!endpoint->svc_id) {
771 WARN_ON(1);
772 return;
773 }
774
775 /* initialize the discard queue */
776 INIT_LIST_HEAD(&discard_q);
777
778 spin_lock_bh(&target->tx_lock);
779
780 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
781 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
782 (tag == packet->info.tx.tag))
783 list_move_tail(&packet->list, &discard_q);
784 }
785
786 spin_unlock_bh(&target->tx_lock);
787
788 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
789 packet->status = -ECANCELED;
790 list_del(&packet->list);
791 ath6kl_dbg(ATH6KL_DBG_TRC,
792 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
793 packet, packet->act_len,
794 packet->endpoint, packet->info.tx.tag);
795
796 INIT_LIST_HEAD(&container);
797 list_add_tail(&packet->list, &container);
798 htc_tx_complete(endpoint, &container);
799 }
800
801}
802
Kalle Vaload226ec2011-08-10 09:49:12 +0300803static void ath6kl_htc_flush_txep_all(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +0300804{
805 struct htc_endpoint *endpoint;
806 int i;
807
808 dump_cred_dist_stats(target);
809
810 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
811 endpoint = &target->endpoint[i];
812 if (endpoint->svc_id == 0)
813 /* not in use.. */
814 continue;
Kalle Vaload226ec2011-08-10 09:49:12 +0300815 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
Kalle Valobdcd8172011-07-18 00:22:30 +0300816 }
817}
818
Kalle Vaload226ec2011-08-10 09:49:12 +0300819void ath6kl_htc_indicate_activity_change(struct htc_target *target,
820 enum htc_endpoint_id eid, bool active)
Kalle Valobdcd8172011-07-18 00:22:30 +0300821{
822 struct htc_endpoint *endpoint = &target->endpoint[eid];
823 bool dist = false;
824
825 if (endpoint->svc_id == 0) {
826 WARN_ON(1);
827 return;
828 }
829
830 spin_lock_bh(&target->tx_lock);
831
832 if (active) {
833 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
834 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
835 dist = true;
836 }
837 } else {
838 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
839 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
840 dist = true;
841 }
842 }
843
844 if (dist) {
845 endpoint->cred_dist.txq_depth =
846 get_queue_depth(&endpoint->txq);
847
848 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
849 target->cred_dist_cntxt, &target->cred_dist_list);
850
851 ath6k_credit_distribute(target->cred_dist_cntxt,
852 &target->cred_dist_list,
853 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
854 }
855
856 spin_unlock_bh(&target->tx_lock);
857
858 if (dist && !active)
859 htc_chk_ep_txq(target);
860}
861
862/* HTC Rx */
863
864static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
865 int n_look_ahds)
866{
867 endpoint->ep_st.rx_pkts++;
868 if (n_look_ahds == 1)
869 endpoint->ep_st.rx_lkahds++;
870 else if (n_look_ahds > 1)
871 endpoint->ep_st.rx_bundle_lkahd++;
872}
873
874static inline bool htc_valid_rx_frame_len(struct htc_target *target,
875 enum htc_endpoint_id eid, int len)
876{
877 return (eid == target->dev->ar->ctrl_ep) ?
878 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
879}
880
881static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
882{
883 struct list_head queue;
884
885 INIT_LIST_HEAD(&queue);
886 list_add_tail(&packet->list, &queue);
Kalle Vaload226ec2011-08-10 09:49:12 +0300887 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +0300888}
889
890static void htc_reclaim_rxbuf(struct htc_target *target,
891 struct htc_packet *packet,
892 struct htc_endpoint *ep)
893{
894 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
895 htc_rxpkt_reset(packet);
896 packet->status = -ECANCELED;
897 ep->ep_cb.rx(ep->target, packet);
898 } else {
899 htc_rxpkt_reset(packet);
900 htc_add_rxbuf((void *)(target), packet);
901 }
902}
903
904static void reclaim_rx_ctrl_buf(struct htc_target *target,
905 struct htc_packet *packet)
906{
907 spin_lock_bh(&target->htc_lock);
908 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
909 spin_unlock_bh(&target->htc_lock);
910}
911
912static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
913 u32 rx_len)
914{
915 struct ath6kl_device *dev = target->dev;
916 u32 padded_len;
917 int status;
918
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530919 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300920
921 if (padded_len > packet->buf_len) {
922 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
923 padded_len, rx_len, packet->buf_len);
924 return -ENOMEM;
925 }
926
927 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
928 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
929 packet, packet->info.rx.exp_hdr,
930 padded_len, dev->ar->mbox_info.htc_addr, "sync");
931
932 status = hif_read_write_sync(dev->ar,
933 dev->ar->mbox_info.htc_addr,
934 packet->buf, padded_len,
935 HIF_RD_SYNC_BLOCK_FIX);
936
937 packet->status = status;
938
939 return status;
940}
941
942/*
943 * optimization for recv packets, we can indicate a
944 * "hint" that there are more single-packets to fetch
945 * on this endpoint.
946 */
947static void set_rxpkt_indication_flag(u32 lk_ahd,
948 struct htc_endpoint *endpoint,
949 struct htc_packet *packet)
950{
951 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
952
953 if (htc_hdr->eid == packet->endpoint) {
954 if (!list_empty(&endpoint->rx_bufq))
955 packet->info.rx.indicat_flags |=
956 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
957 }
958}
959
960static void chk_rx_water_mark(struct htc_endpoint *endpoint)
961{
962 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
963
964 if (ep_cb.rx_refill_thresh > 0) {
965 spin_lock_bh(&endpoint->target->rx_lock);
966 if (get_queue_depth(&endpoint->rx_bufq)
967 < ep_cb.rx_refill_thresh) {
968 spin_unlock_bh(&endpoint->target->rx_lock);
969 ep_cb.rx_refill(endpoint->target, endpoint->eid);
970 return;
971 }
972 spin_unlock_bh(&endpoint->target->rx_lock);
973 }
974}
975
976/* This function is called with rx_lock held */
977static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
978 u32 *lk_ahds, struct list_head *queue, int n_msg)
979{
980 struct htc_packet *packet;
981 /* FIXME: type of lk_ahds can't be right */
982 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
983 struct htc_ep_callbacks ep_cb;
984 int status = 0, j, full_len;
985 bool no_recycle;
986
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530987 full_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300988 le16_to_cpu(htc_hdr->payld_len) +
989 sizeof(*htc_hdr));
990
991 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
992 ath6kl_warn("Rx buffer requested with invalid length\n");
993 return -EINVAL;
994 }
995
996 ep_cb = ep->ep_cb;
997 for (j = 0; j < n_msg; j++) {
998
999 /*
1000 * Reset flag, any packets allocated using the
1001 * rx_alloc() API cannot be recycled on
1002 * cleanup,they must be explicitly returned.
1003 */
1004 no_recycle = false;
1005
1006 if (ep_cb.rx_allocthresh &&
1007 (full_len > ep_cb.rx_alloc_thresh)) {
1008 ep->ep_st.rx_alloc_thresh_hit += 1;
1009 ep->ep_st.rxalloc_thresh_byte +=
1010 le16_to_cpu(htc_hdr->payld_len);
1011
1012 spin_unlock_bh(&target->rx_lock);
1013 no_recycle = true;
1014
1015 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1016 full_len);
1017 spin_lock_bh(&target->rx_lock);
1018 } else {
1019 /* refill handler is being used */
1020 if (list_empty(&ep->rx_bufq)) {
1021 if (ep_cb.rx_refill) {
1022 spin_unlock_bh(&target->rx_lock);
1023 ep_cb.rx_refill(ep->target, ep->eid);
1024 spin_lock_bh(&target->rx_lock);
1025 }
1026 }
1027
1028 if (list_empty(&ep->rx_bufq))
1029 packet = NULL;
1030 else {
1031 packet = list_first_entry(&ep->rx_bufq,
1032 struct htc_packet, list);
1033 list_del(&packet->list);
1034 }
1035 }
1036
1037 if (!packet) {
1038 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1039 target->ep_waiting = ep->eid;
1040 return -ENOSPC;
1041 }
1042
1043 /* clear flags */
1044 packet->info.rx.rx_flags = 0;
1045 packet->info.rx.indicat_flags = 0;
1046 packet->status = 0;
1047
1048 if (no_recycle)
1049 /*
1050 * flag that these packets cannot be
1051 * recycled, they have to be returned to
1052 * the user
1053 */
1054 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1055
1056 /* Caller needs to free this upon any failure */
1057 list_add_tail(&packet->list, queue);
1058
1059 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1060 status = -ECANCELED;
1061 break;
1062 }
1063
1064 if (j) {
1065 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1066 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1067 } else
1068 /* set expected look ahead */
1069 packet->info.rx.exp_hdr = *lk_ahds;
1070
1071 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1072 HTC_HDR_LENGTH;
1073 }
1074
1075 return status;
1076}
1077
1078static int alloc_and_prep_rxpkts(struct htc_target *target,
1079 u32 lk_ahds[], int msg,
1080 struct htc_endpoint *endpoint,
1081 struct list_head *queue)
1082{
1083 int status = 0;
1084 struct htc_packet *packet, *tmp_pkt;
1085 struct htc_frame_hdr *htc_hdr;
1086 int i, n_msg;
1087
1088 spin_lock_bh(&target->rx_lock);
1089
1090 for (i = 0; i < msg; i++) {
1091
1092 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1093
1094 if (htc_hdr->eid >= ENDPOINT_MAX) {
1095 ath6kl_err("invalid ep in look-ahead: %d\n",
1096 htc_hdr->eid);
1097 status = -ENOMEM;
1098 break;
1099 }
1100
1101 if (htc_hdr->eid != endpoint->eid) {
1102 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1103 htc_hdr->eid, endpoint->eid, i);
1104 status = -ENOMEM;
1105 break;
1106 }
1107
1108 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1109 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1110 htc_hdr->payld_len,
1111 (u32) HTC_MAX_PAYLOAD_LENGTH);
1112 status = -ENOMEM;
1113 break;
1114 }
1115
1116 if (endpoint->svc_id == 0) {
1117 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1118 status = -ENOMEM;
1119 break;
1120 }
1121
1122 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1123 /*
1124 * HTC header indicates that every packet to follow
1125 * has the same padded length so that it can be
1126 * optimally fetched as a full bundle.
1127 */
1128 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1129 HTC_FLG_RX_BNDL_CNT_S;
1130
1131 /* the count doesn't include the starter frame */
1132 n_msg++;
1133 if (n_msg > target->msg_per_bndl_max) {
1134 status = -ENOMEM;
1135 break;
1136 }
1137
1138 endpoint->ep_st.rx_bundle_from_hdr += 1;
1139 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1140 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1141 n_msg);
1142 } else
1143 /* HTC header only indicates 1 message to fetch */
1144 n_msg = 1;
1145
1146 /* Setup packet buffers for each message */
1147 status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
1148 n_msg);
1149
1150 /*
1151 * This is due to unavailabilty of buffers to rx entire data.
1152 * Return no error so that free buffers from queue can be used
1153 * to receive partial data.
1154 */
1155 if (status == -ENOSPC) {
1156 spin_unlock_bh(&target->rx_lock);
1157 return 0;
1158 }
1159
1160 if (status)
1161 break;
1162 }
1163
1164 spin_unlock_bh(&target->rx_lock);
1165
1166 if (status) {
1167 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1168 list_del(&packet->list);
1169 htc_reclaim_rxbuf(target, packet,
1170 &target->endpoint[packet->endpoint]);
1171 }
1172 }
1173
1174 return status;
1175}
1176
1177static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1178{
1179 if (packets->endpoint != ENDPOINT_0) {
1180 WARN_ON(1);
1181 return;
1182 }
1183
1184 if (packets->status == -ECANCELED) {
1185 reclaim_rx_ctrl_buf(context, packets);
1186 return;
1187 }
1188
1189 if (packets->act_len > 0) {
1190 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1191 packets->act_len + HTC_HDR_LENGTH);
1192
1193 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1194 "Unexpected ENDPOINT 0 Message",
1195 packets->buf - HTC_HDR_LENGTH,
1196 packets->act_len + HTC_HDR_LENGTH);
1197 }
1198
1199 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1200}
1201
1202static void htc_proc_cred_rpt(struct htc_target *target,
1203 struct htc_credit_report *rpt,
1204 int n_entries,
1205 enum htc_endpoint_id from_ep)
1206{
1207 struct htc_endpoint *endpoint;
1208 int tot_credits = 0, i;
1209 bool dist = false;
1210
1211 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1212 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1213
1214 spin_lock_bh(&target->tx_lock);
1215
1216 for (i = 0; i < n_entries; i++, rpt++) {
1217 if (rpt->eid >= ENDPOINT_MAX) {
1218 WARN_ON(1);
1219 spin_unlock_bh(&target->tx_lock);
1220 return;
1221 }
1222
1223 endpoint = &target->endpoint[rpt->eid];
1224
1225 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1226 rpt->eid, rpt->credits);
1227
1228 endpoint->ep_st.tx_cred_rpt += 1;
1229 endpoint->ep_st.cred_retnd += rpt->credits;
1230
1231 if (from_ep == rpt->eid) {
1232 /*
1233 * This credit report arrived on the same endpoint
1234 * indicating it arrived in an RX packet.
1235 */
1236 endpoint->ep_st.cred_from_rx += rpt->credits;
1237 endpoint->ep_st.cred_rpt_from_rx += 1;
1238 } else if (from_ep == ENDPOINT_0) {
1239 /* credit arrived on endpoint 0 as a NULL message */
1240 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1241 endpoint->ep_st.cred_rpt_ep0 += 1;
1242 } else {
1243 endpoint->ep_st.cred_from_other += rpt->credits;
1244 endpoint->ep_st.cred_rpt_from_other += 1;
1245 }
1246
Raja Mani5ba3ee42011-07-19 19:27:31 +05301247 if (rpt->eid == ENDPOINT_0)
Kalle Valobdcd8172011-07-18 00:22:30 +03001248 /* always give endpoint 0 credits back */
1249 endpoint->cred_dist.credits += rpt->credits;
1250 else {
1251 endpoint->cred_dist.cred_to_dist += rpt->credits;
1252 dist = true;
1253 }
1254
1255 /*
1256 * Refresh tx depth for distribution function that will
1257 * recover these credits NOTE: this is only valid when
1258 * there are credits to recover!
1259 */
1260 endpoint->cred_dist.txq_depth =
1261 get_queue_depth(&endpoint->txq);
1262
1263 tot_credits += rpt->credits;
1264 }
1265
1266 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1267 "report indicated %d credits to distribute\n",
1268 tot_credits);
1269
1270 if (dist) {
1271 /*
1272 * This was a credit return based on a completed send
1273 * operations note, this is done with the lock held
1274 */
1275 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1276 target->cred_dist_cntxt, &target->cred_dist_list);
1277
1278 ath6k_credit_distribute(target->cred_dist_cntxt,
1279 &target->cred_dist_list,
1280 HTC_CREDIT_DIST_SEND_COMPLETE);
1281 }
1282
1283 spin_unlock_bh(&target->tx_lock);
1284
1285 if (tot_credits)
1286 htc_chk_ep_txq(target);
1287}
1288
1289static int htc_parse_trailer(struct htc_target *target,
1290 struct htc_record_hdr *record,
1291 u8 *record_buf, u32 *next_lk_ahds,
1292 enum htc_endpoint_id endpoint,
1293 int *n_lk_ahds)
1294{
1295 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1296 struct htc_lookahead_report *lk_ahd;
1297 int len;
1298
1299 switch (record->rec_id) {
1300 case HTC_RECORD_CREDITS:
1301 len = record->len / sizeof(struct htc_credit_report);
1302 if (!len) {
1303 WARN_ON(1);
1304 return -EINVAL;
1305 }
1306
1307 htc_proc_cred_rpt(target,
1308 (struct htc_credit_report *) record_buf,
1309 len, endpoint);
1310 break;
1311 case HTC_RECORD_LOOKAHEAD:
1312 len = record->len / sizeof(*lk_ahd);
1313 if (!len) {
1314 WARN_ON(1);
1315 return -EINVAL;
1316 }
1317
1318 lk_ahd = (struct htc_lookahead_report *) record_buf;
1319 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1320 && next_lk_ahds) {
1321
1322 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1323 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1324 lk_ahd->pre_valid, lk_ahd->post_valid);
1325
1326 /* look ahead bytes are valid, copy them over */
1327 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1328
1329 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
1330 next_lk_ahds, 4);
1331
1332 *n_lk_ahds = 1;
1333 }
1334 break;
1335 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1336 len = record->len / sizeof(*bundle_lkahd_rpt);
1337 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1338 WARN_ON(1);
1339 return -EINVAL;
1340 }
1341
1342 if (next_lk_ahds) {
1343 int i;
1344
1345 bundle_lkahd_rpt =
1346 (struct htc_bundle_lkahd_rpt *) record_buf;
1347
1348 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
1349 record_buf, record->len);
1350
1351 for (i = 0; i < len; i++) {
1352 memcpy((u8 *)&next_lk_ahds[i],
1353 bundle_lkahd_rpt->lk_ahd, 4);
1354 bundle_lkahd_rpt++;
1355 }
1356
1357 *n_lk_ahds = i;
1358 }
1359 break;
1360 default:
1361 ath6kl_err("unhandled record: id:%d len:%d\n",
1362 record->rec_id, record->len);
1363 break;
1364 }
1365
1366 return 0;
1367
1368}
1369
1370static int htc_proc_trailer(struct htc_target *target,
1371 u8 *buf, int len, u32 *next_lk_ahds,
1372 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1373{
1374 struct htc_record_hdr *record;
1375 int orig_len;
1376 int status;
1377 u8 *record_buf;
1378 u8 *orig_buf;
1379
1380 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1381
1382 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
1383
1384 orig_buf = buf;
1385 orig_len = len;
1386 status = 0;
1387
1388 while (len > 0) {
1389
1390 if (len < sizeof(struct htc_record_hdr)) {
1391 status = -ENOMEM;
1392 break;
1393 }
1394 /* these are byte aligned structs */
1395 record = (struct htc_record_hdr *) buf;
1396 len -= sizeof(struct htc_record_hdr);
1397 buf += sizeof(struct htc_record_hdr);
1398
1399 if (record->len > len) {
1400 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1401 record->len, record->rec_id, len);
1402 status = -ENOMEM;
1403 break;
1404 }
1405 record_buf = buf;
1406
1407 status = htc_parse_trailer(target, record, record_buf,
1408 next_lk_ahds, endpoint, n_lk_ahds);
1409
1410 if (status)
1411 break;
1412
1413 /* advance buffer past this record for next time around */
1414 buf += record->len;
1415 len -= record->len;
1416 }
1417
Raja Mani2588f552011-07-19 19:27:30 +05301418 if (status)
1419 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
1420 orig_buf, orig_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001421
1422 return status;
1423}
1424
1425static int htc_proc_rxhdr(struct htc_target *target,
1426 struct htc_packet *packet,
1427 u32 *next_lkahds, int *n_lkahds)
1428{
1429 int status = 0;
1430 u16 payload_len;
1431 u32 lk_ahd;
1432 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1433
1434 if (n_lkahds != NULL)
1435 *n_lkahds = 0;
1436
1437 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
1438 packet->act_len);
1439
1440 /*
1441 * NOTE: we cannot assume the alignment of buf, so we use the safe
1442 * macros to retrieve 16 bit fields.
1443 */
1444 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1445
1446 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1447
1448 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1449 /*
1450 * Refresh the expected header and the actual length as it
1451 * was unknown when this packet was grabbed as part of the
1452 * bundle.
1453 */
1454 packet->info.rx.exp_hdr = lk_ahd;
1455 packet->act_len = payload_len + HTC_HDR_LENGTH;
1456
1457 /* validate the actual header that was refreshed */
1458 if (packet->act_len > packet->buf_len) {
1459 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1460 payload_len, lk_ahd);
1461 /*
1462 * Limit this to max buffer just to print out some
1463 * of the buffer.
1464 */
1465 packet->act_len = min(packet->act_len, packet->buf_len);
1466 status = -ENOMEM;
1467 goto fail_rx;
1468 }
1469
1470 if (packet->endpoint != htc_hdr->eid) {
1471 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1472 htc_hdr->eid, packet->endpoint);
1473 status = -ENOMEM;
1474 goto fail_rx;
1475 }
1476 }
1477
1478 if (lk_ahd != packet->info.rx.exp_hdr) {
1479 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1480 packet, packet->info.rx.rx_flags);
1481 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
1482 &packet->info.rx.exp_hdr, 4);
1483 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
1484 (u8 *)&lk_ahd, sizeof(lk_ahd));
1485 status = -ENOMEM;
1486 goto fail_rx;
1487 }
1488
1489 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1490 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1491 htc_hdr->ctrl[0] > payload_len) {
1492 ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1493 payload_len, htc_hdr->ctrl[0]);
1494 status = -ENOMEM;
1495 goto fail_rx;
1496 }
1497
1498 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1499 next_lkahds = NULL;
1500 n_lkahds = NULL;
1501 }
1502
1503 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1504 + payload_len - htc_hdr->ctrl[0],
1505 htc_hdr->ctrl[0], next_lkahds,
1506 n_lkahds, packet->endpoint);
1507
1508 if (status)
1509 goto fail_rx;
1510
1511 packet->act_len -= htc_hdr->ctrl[0];
1512 }
1513
1514 packet->buf += HTC_HDR_LENGTH;
1515 packet->act_len -= HTC_HDR_LENGTH;
1516
1517fail_rx:
1518 if (status)
1519 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
1520 packet->buf,
1521 packet->act_len < 256 ? packet->act_len : 256);
1522 else {
1523 if (packet->act_len > 0)
1524 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1525 "HTC - Application Msg",
1526 packet->buf, packet->act_len);
1527 }
1528
1529 return status;
1530}
1531
1532static void do_rx_completion(struct htc_endpoint *endpoint,
1533 struct htc_packet *packet)
1534{
1535 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1536 "htc calling ep %d recv callback on packet 0x%p\n",
1537 endpoint->eid, packet);
1538 endpoint->ep_cb.rx(endpoint->target, packet);
1539}
1540
1541static int htc_issue_rxpkt_bundle(struct htc_target *target,
1542 struct list_head *rxq,
1543 struct list_head *sync_compq,
1544 int *n_pkt_fetched, bool part_bundle)
1545{
1546 struct hif_scatter_req *scat_req;
1547 struct htc_packet *packet;
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05301548 int rem_space = target->max_rx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +03001549 int n_scat_pkt, status = 0, i, len;
1550
1551 n_scat_pkt = get_queue_depth(rxq);
1552 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1553
1554 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1555 /*
1556 * We were forced to split this bundle receive operation
1557 * all packets in this partial bundle must have their
1558 * lookaheads ignored.
1559 */
1560 part_bundle = true;
1561
1562 /*
1563 * This would only happen if the target ignored our max
1564 * bundle limit.
1565 */
1566 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1567 get_queue_depth(rxq), n_scat_pkt);
1568 }
1569
1570 len = 0;
1571
1572 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1573 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1574 get_queue_depth(rxq), n_scat_pkt);
1575
1576 scat_req = hif_scatter_req_get(target->dev->ar);
1577
1578 if (scat_req == NULL)
1579 goto fail_rx_pkt;
1580
Kalle Valobdcd8172011-07-18 00:22:30 +03001581 for (i = 0; i < n_scat_pkt; i++) {
1582 int pad_len;
1583
1584 packet = list_first_entry(rxq, struct htc_packet, list);
1585 list_del(&packet->list);
1586
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301587 pad_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001588 packet->act_len);
1589
1590 if ((rem_space - pad_len) < 0) {
1591 list_add(&packet->list, rxq);
1592 break;
1593 }
1594
1595 rem_space -= pad_len;
1596
1597 if (part_bundle || (i < (n_scat_pkt - 1)))
1598 /*
1599 * Packet 0..n-1 cannot be checked for look-aheads
1600 * since we are fetching a bundle the last packet
1601 * however can have it's lookahead used
1602 */
1603 packet->info.rx.rx_flags |=
1604 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1605
1606 /* NOTE: 1 HTC packet per scatter entry */
1607 scat_req->scat_list[i].buf = packet->buf;
1608 scat_req->scat_list[i].len = pad_len;
1609
1610 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1611
1612 list_add_tail(&packet->list, sync_compq);
1613
1614 WARN_ON(!scat_req->scat_list[i].len);
1615 len += scat_req->scat_list[i].len;
1616 }
1617
1618 scat_req->len = len;
1619 scat_req->scat_entries = i;
1620
1621 status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1622
1623 if (!status)
1624 *n_pkt_fetched = i;
1625
1626 /* free scatter request */
1627 hif_scatter_req_add(target->dev->ar, scat_req);
1628
1629fail_rx_pkt:
1630
1631 return status;
1632}
1633
1634static int htc_proc_fetched_rxpkts(struct htc_target *target,
1635 struct list_head *comp_pktq, u32 lk_ahds[],
1636 int *n_lk_ahd)
1637{
1638 struct htc_packet *packet, *tmp_pkt;
1639 struct htc_endpoint *ep;
1640 int status = 0;
1641
1642 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1643 list_del(&packet->list);
1644 ep = &target->endpoint[packet->endpoint];
1645
1646 /* process header for each of the recv packet */
1647 status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
1648 if (status)
1649 return status;
1650
1651 if (list_empty(comp_pktq)) {
1652 /*
1653 * Last packet's more packet flag is set
1654 * based on the lookahead.
1655 */
1656 if (*n_lk_ahd > 0)
1657 set_rxpkt_indication_flag(lk_ahds[0],
1658 ep, packet);
1659 } else
1660 /*
1661 * Packets in a bundle automatically have
1662 * this flag set.
1663 */
1664 packet->info.rx.indicat_flags |=
1665 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1666
1667 htc_update_rx_stats(ep, *n_lk_ahd);
1668
1669 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1670 ep->ep_st.rx_bundl += 1;
1671
1672 do_rx_completion(ep, packet);
1673 }
1674
1675 return status;
1676}
1677
1678static int htc_fetch_rxpkts(struct htc_target *target,
1679 struct list_head *rx_pktq,
1680 struct list_head *comp_pktq)
1681{
1682 int fetched_pkts;
1683 bool part_bundle = false;
1684 int status = 0;
1685
1686 /* now go fetch the list of HTC packets */
1687 while (!list_empty(rx_pktq)) {
1688 fetched_pkts = 0;
1689
1690 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1691 /*
1692 * There are enough packets to attempt a
1693 * bundle transfer and recv bundling is
1694 * allowed.
1695 */
1696 status = htc_issue_rxpkt_bundle(target, rx_pktq,
1697 comp_pktq,
1698 &fetched_pkts,
1699 part_bundle);
1700 if (status)
1701 return status;
1702
1703 if (!list_empty(rx_pktq))
1704 part_bundle = true;
1705 }
1706
1707 if (!fetched_pkts) {
1708 struct htc_packet *packet;
1709
1710 packet = list_first_entry(rx_pktq, struct htc_packet,
1711 list);
1712
1713 list_del(&packet->list);
1714
1715 /* fully synchronous */
1716 packet->completion = NULL;
1717
1718 if (!list_empty(rx_pktq))
1719 /*
1720 * look_aheads in all packet
1721 * except the last one in the
1722 * bundle must be ignored
1723 */
1724 packet->info.rx.rx_flags |=
1725 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1726
1727 /* go fetch the packet */
1728 status = dev_rx_pkt(target, packet, packet->act_len);
1729 if (status)
1730 return status;
1731
1732 list_add_tail(&packet->list, comp_pktq);
1733 }
1734 }
1735
1736 return status;
1737}
1738
Kalle Vaload226ec2011-08-10 09:49:12 +03001739int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1740 u32 msg_look_ahead[], int *num_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +03001741{
1742 struct htc_packet *packets, *tmp_pkt;
1743 struct htc_endpoint *endpoint;
1744 struct list_head rx_pktq, comp_pktq;
1745 int status = 0;
1746 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1747 int num_look_ahead = 1;
1748 enum htc_endpoint_id id;
1749 int n_fetched = 0;
1750
1751 *num_pkts = 0;
1752
1753 /*
1754 * On first entry copy the look_aheads into our temp array for
1755 * processing
1756 */
1757 memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1758
1759 while (true) {
1760
1761 /*
1762 * First lookahead sets the expected endpoint IDs for all
1763 * packets in a bundle.
1764 */
1765 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1766 endpoint = &target->endpoint[id];
1767
1768 if (id >= ENDPOINT_MAX) {
1769 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1770 id);
1771 status = -ENOMEM;
1772 break;
1773 }
1774
1775 INIT_LIST_HEAD(&rx_pktq);
1776 INIT_LIST_HEAD(&comp_pktq);
1777
1778 /*
1779 * Try to allocate as many HTC RX packets indicated by the
1780 * look_aheads.
1781 */
1782 status = alloc_and_prep_rxpkts(target, look_aheads,
1783 num_look_ahead, endpoint,
1784 &rx_pktq);
1785 if (status)
1786 break;
1787
1788 if (get_queue_depth(&rx_pktq) >= 2)
1789 /*
1790 * A recv bundle was detected, force IRQ status
1791 * re-check again
1792 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05301793 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03001794
1795 n_fetched += get_queue_depth(&rx_pktq);
1796
1797 num_look_ahead = 0;
1798
1799 status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
1800
1801 if (!status)
1802 chk_rx_water_mark(endpoint);
1803
1804 /* Process fetched packets */
1805 status = htc_proc_fetched_rxpkts(target, &comp_pktq,
1806 look_aheads, &num_look_ahead);
1807
1808 if (!num_look_ahead || status)
1809 break;
1810
1811 /*
1812 * For SYNCH processing, if we get here, we are running
1813 * through the loop again due to a detected lookahead. Set
1814 * flag that we should re-check IRQ status registers again
1815 * before leaving IRQ processing, this can net better
1816 * performance in high throughput situations.
1817 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05301818 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03001819 }
1820
1821 if (status) {
1822 ath6kl_err("failed to get pending recv messages: %d\n",
1823 status);
1824 /*
1825 * Cleanup any packets we allocated but didn't use to
1826 * actually fetch any packets.
1827 */
1828 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1829 list_del(&packets->list);
1830 htc_reclaim_rxbuf(target, packets,
1831 &target->endpoint[packets->endpoint]);
1832 }
1833
1834 /* cleanup any packets in sync completion queue */
1835 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1836 list_del(&packets->list);
1837 htc_reclaim_rxbuf(target, packets,
1838 &target->endpoint[packets->endpoint]);
1839 }
1840
1841 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1842 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1843 ath6kldev_rx_control(target->dev, false);
1844 }
1845 }
1846
1847 /*
1848 * Before leaving, check to see if host ran out of buffers and
1849 * needs to stop the receiver.
1850 */
1851 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1852 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1853 ath6kldev_rx_control(target->dev, false);
1854 }
1855 *num_pkts = n_fetched;
1856
1857 return status;
1858}
1859
1860/*
1861 * Synchronously wait for a control message from the target,
1862 * This function is used at initialization time ONLY. At init messages
1863 * on ENDPOINT 0 are expected.
1864 */
1865static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1866{
1867 struct htc_packet *packet = NULL;
1868 struct htc_frame_hdr *htc_hdr;
1869 u32 look_ahead;
1870
1871 if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1872 HTC_TARGET_RESPONSE_TIMEOUT))
1873 return NULL;
1874
1875 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1876 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1877
1878 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1879
1880 if (htc_hdr->eid != ENDPOINT_0)
1881 return NULL;
1882
1883 packet = htc_get_control_buf(target, false);
1884
1885 if (!packet)
1886 return NULL;
1887
1888 packet->info.rx.rx_flags = 0;
1889 packet->info.rx.exp_hdr = look_ahead;
1890 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1891
1892 if (packet->act_len > packet->buf_len)
1893 goto fail_ctrl_rx;
1894
1895 /* we want synchronous operation */
1896 packet->completion = NULL;
1897
1898 /* get the message from the device, this will block */
1899 if (dev_rx_pkt(target, packet, packet->act_len))
1900 goto fail_ctrl_rx;
1901
1902 /* process receive header */
1903 packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
1904
1905 if (packet->status) {
1906 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1907 packet->status);
1908 goto fail_ctrl_rx;
1909 }
1910
1911 return packet;
1912
1913fail_ctrl_rx:
1914 if (packet != NULL) {
1915 htc_rxpkt_reset(packet);
1916 reclaim_rx_ctrl_buf(target, packet);
1917 }
1918
1919 return NULL;
1920}
1921
Kalle Vaload226ec2011-08-10 09:49:12 +03001922int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1923 struct list_head *pkt_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03001924{
1925 struct htc_endpoint *endpoint;
1926 struct htc_packet *first_pkt;
1927 bool rx_unblock = false;
1928 int status = 0, depth;
1929
1930 if (list_empty(pkt_queue))
1931 return -ENOMEM;
1932
1933 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1934
1935 if (first_pkt->endpoint >= ENDPOINT_MAX)
1936 return status;
1937
1938 depth = get_queue_depth(pkt_queue);
1939
1940 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1941 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1942 first_pkt->endpoint, depth, first_pkt->buf_len);
1943
1944 endpoint = &target->endpoint[first_pkt->endpoint];
1945
1946 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1947 struct htc_packet *packet, *tmp_pkt;
1948
1949 /* walk through queue and mark each one canceled */
1950 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1951 packet->status = -ECANCELED;
1952 list_del(&packet->list);
1953 do_rx_completion(endpoint, packet);
1954 }
1955
1956 return status;
1957 }
1958
1959 spin_lock_bh(&target->rx_lock);
1960
1961 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1962
1963 /* check if we are blocked waiting for a new buffer */
1964 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1965 if (target->ep_waiting == first_pkt->endpoint) {
1966 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1967 "receiver was blocked on ep:%d, unblocking.\n",
1968 target->ep_waiting);
1969 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1970 target->ep_waiting = ENDPOINT_MAX;
1971 rx_unblock = true;
1972 }
1973 }
1974
1975 spin_unlock_bh(&target->rx_lock);
1976
1977 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1978 /* TODO : implement a buffer threshold count? */
1979 ath6kldev_rx_control(target->dev, true);
1980
1981 return status;
1982}
1983
Kalle Vaload226ec2011-08-10 09:49:12 +03001984void ath6kl_htc_flush_rx_buf(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03001985{
1986 struct htc_endpoint *endpoint;
1987 struct htc_packet *packet, *tmp_pkt;
1988 int i;
1989
1990 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1991 endpoint = &target->endpoint[i];
1992 if (!endpoint->svc_id)
1993 /* not in use.. */
1994 continue;
1995
1996 spin_lock_bh(&target->rx_lock);
1997 list_for_each_entry_safe(packet, tmp_pkt,
1998 &endpoint->rx_bufq, list) {
1999 list_del(&packet->list);
2000 spin_unlock_bh(&target->rx_lock);
2001 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2002 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
2003 packet, packet->buf_len,
2004 packet->endpoint);
2005 dev_kfree_skb(packet->pkt_cntxt);
2006 spin_lock_bh(&target->rx_lock);
2007 }
2008 spin_unlock_bh(&target->rx_lock);
2009 }
2010}
2011
Kalle Vaload226ec2011-08-10 09:49:12 +03002012int ath6kl_htc_conn_service(struct htc_target *target,
2013 struct htc_service_connect_req *conn_req,
2014 struct htc_service_connect_resp *conn_resp)
Kalle Valobdcd8172011-07-18 00:22:30 +03002015{
2016 struct htc_packet *rx_pkt = NULL;
2017 struct htc_packet *tx_pkt = NULL;
2018 struct htc_conn_service_resp *resp_msg;
2019 struct htc_conn_service_msg *conn_msg;
2020 struct htc_endpoint *endpoint;
2021 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2022 unsigned int max_msg_sz = 0;
2023 int status = 0;
2024
2025 ath6kl_dbg(ATH6KL_DBG_TRC,
2026 "htc_conn_service, target:0x%p service id:0x%X\n",
2027 target, conn_req->svc_id);
2028
2029 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2030 /* special case for pseudo control service */
2031 assigned_ep = ENDPOINT_0;
2032 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2033 } else {
2034 /* allocate a packet to send to the target */
2035 tx_pkt = htc_get_control_buf(target, true);
2036
2037 if (!tx_pkt)
2038 return -ENOMEM;
2039
2040 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2041 memset(conn_msg, 0, sizeof(*conn_msg));
2042 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2043 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2044 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2045
2046 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2047 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2048 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2049
2050 /* we want synchronous operation */
2051 tx_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +03002052 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2053 status = ath6kl_htc_tx_issue(target, tx_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03002054
2055 if (status)
2056 goto fail_tx;
2057
2058 /* wait for response */
2059 rx_pkt = htc_wait_for_ctrl_msg(target);
2060
2061 if (!rx_pkt) {
2062 status = -ENOMEM;
2063 goto fail_tx;
2064 }
2065
2066 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2067
2068 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2069 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2070 status = -ENOMEM;
2071 goto fail_tx;
2072 }
2073
2074 conn_resp->resp_code = resp_msg->status;
2075 /* check response status */
2076 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2077 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2078 resp_msg->svc_id, resp_msg->status);
2079 status = -ENOMEM;
2080 goto fail_tx;
2081 }
2082
2083 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2084 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2085 }
2086
2087 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2088 status = -ENOMEM;
2089 goto fail_tx;
2090 }
2091
2092 endpoint = &target->endpoint[assigned_ep];
2093 endpoint->eid = assigned_ep;
2094 if (endpoint->svc_id) {
2095 status = -ENOMEM;
2096 goto fail_tx;
2097 }
2098
2099 /* return assigned endpoint to caller */
2100 conn_resp->endpoint = assigned_ep;
2101 conn_resp->len_max = max_msg_sz;
2102
2103 /* setup the endpoint */
2104
2105 /* this marks the endpoint in use */
2106 endpoint->svc_id = conn_req->svc_id;
2107
2108 endpoint->max_txq_depth = conn_req->max_txq_depth;
2109 endpoint->len_max = max_msg_sz;
2110 endpoint->ep_cb = conn_req->ep_cb;
2111 endpoint->cred_dist.svc_id = conn_req->svc_id;
2112 endpoint->cred_dist.htc_rsvd = endpoint;
2113 endpoint->cred_dist.endpoint = assigned_ep;
2114 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2115
2116 if (conn_req->max_rxmsg_sz) {
2117 /*
2118 * Override cred_per_msg calculation, this optimizes
2119 * the credit-low indications since the host will actually
2120 * issue smaller messages in the Send path.
2121 */
2122 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2123 status = -ENOMEM;
2124 goto fail_tx;
2125 }
2126 endpoint->cred_dist.cred_per_msg =
2127 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2128 } else
2129 endpoint->cred_dist.cred_per_msg =
2130 max_msg_sz / target->tgt_cred_sz;
2131
2132 if (!endpoint->cred_dist.cred_per_msg)
2133 endpoint->cred_dist.cred_per_msg = 1;
2134
2135 /* save local connection flags */
2136 endpoint->conn_flags = conn_req->flags;
2137
2138fail_tx:
2139 if (tx_pkt)
2140 htc_reclaim_txctrl_buf(target, tx_pkt);
2141
2142 if (rx_pkt) {
2143 htc_rxpkt_reset(rx_pkt);
2144 reclaim_rx_ctrl_buf(target, rx_pkt);
2145 }
2146
2147 return status;
2148}
2149
2150static void reset_ep_state(struct htc_target *target)
2151{
2152 struct htc_endpoint *endpoint;
2153 int i;
2154
2155 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2156 endpoint = &target->endpoint[i];
2157 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2158 endpoint->svc_id = 0;
2159 endpoint->len_max = 0;
2160 endpoint->max_txq_depth = 0;
2161 memset(&endpoint->ep_st, 0,
2162 sizeof(endpoint->ep_st));
2163 INIT_LIST_HEAD(&endpoint->rx_bufq);
2164 INIT_LIST_HEAD(&endpoint->txq);
2165 endpoint->target = target;
2166 }
2167
2168 /* reset distribution list */
2169 INIT_LIST_HEAD(&target->cred_dist_list);
2170}
2171
Kalle Vaload226ec2011-08-10 09:49:12 +03002172int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2173 enum htc_endpoint_id endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03002174{
2175 int num;
2176
2177 spin_lock_bh(&target->rx_lock);
2178 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2179 spin_unlock_bh(&target->rx_lock);
2180 return num;
2181}
2182
2183static void htc_setup_msg_bndl(struct htc_target *target)
2184{
Kalle Valobdcd8172011-07-18 00:22:30 +03002185 /* limit what HTC can handle */
2186 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2187 target->msg_per_bndl_max);
2188
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302189 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002190 target->msg_per_bndl_max = 0;
2191 return;
2192 }
2193
2194 /* limit bundle what the device layer can handle */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302195 target->msg_per_bndl_max = min(target->max_scat_entries,
Kalle Valobdcd8172011-07-18 00:22:30 +03002196 target->msg_per_bndl_max);
2197
2198 ath6kl_dbg(ATH6KL_DBG_TRC,
2199 "htc bundling allowed. max msg per htc bundle: %d\n",
2200 target->msg_per_bndl_max);
2201
2202 /* Max rx bundle size is limited by the max tx bundle size */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302203 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
Kalle Valobdcd8172011-07-18 00:22:30 +03002204 /* Max tx bundle size if limited by the extended mbox address range */
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302205 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302206 target->max_xfer_szper_scatreq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002207
2208 ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302209 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
Kalle Valobdcd8172011-07-18 00:22:30 +03002210
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302211 if (target->max_tx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002212 target->tx_bndl_enable = true;
2213
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302214 if (target->max_rx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002215 target->rx_bndl_enable = true;
2216
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05302217 if ((target->tgt_cred_sz % target->block_sz) != 0) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002218 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2219 target->tgt_cred_sz);
2220
2221 /*
2222 * Disallow send bundling since the credit size is
2223 * not aligned to a block size the I/O block
2224 * padding will spill into the next credit buffer
2225 * which is fatal.
2226 */
2227 target->tx_bndl_enable = false;
2228 }
2229}
2230
Kalle Vaload226ec2011-08-10 09:49:12 +03002231int ath6kl_htc_wait_target(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002232{
2233 struct htc_packet *packet = NULL;
2234 struct htc_ready_ext_msg *rdy_msg;
2235 struct htc_service_connect_req connect;
2236 struct htc_service_connect_resp resp;
2237 int status;
2238
2239 /* we should be getting 1 control message that the target is ready */
2240 packet = htc_wait_for_ctrl_msg(target);
2241
2242 if (!packet)
2243 return -ENOMEM;
2244
2245 /* we controlled the buffer creation so it's properly aligned */
2246 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2247
2248 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2249 (packet->act_len < sizeof(struct htc_ready_msg))) {
2250 status = -ENOMEM;
2251 goto fail_wait_target;
2252 }
2253
2254 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2255 status = -ENOMEM;
2256 goto fail_wait_target;
2257 }
2258
2259 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2260 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2261
2262 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2263 "target ready: credits: %d credit size: %d\n",
2264 target->tgt_creds, target->tgt_cred_sz);
2265
2266 /* check if this is an extended ready message */
2267 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2268 /* this is an extended message */
2269 target->htc_tgt_ver = rdy_msg->htc_ver;
2270 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2271 } else {
2272 /* legacy */
2273 target->htc_tgt_ver = HTC_VERSION_2P0;
2274 target->msg_per_bndl_max = 0;
2275 }
2276
2277 ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2278 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2279 target->htc_tgt_ver);
2280
2281 if (target->msg_per_bndl_max > 0)
2282 htc_setup_msg_bndl(target);
2283
2284 /* setup our pseudo HTC control endpoint connection */
2285 memset(&connect, 0, sizeof(connect));
2286 memset(&resp, 0, sizeof(resp));
2287 connect.ep_cb.rx = htc_ctrl_rx;
2288 connect.ep_cb.rx_refill = NULL;
2289 connect.ep_cb.tx_full = NULL;
2290 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2291 connect.svc_id = HTC_CTRL_RSVD_SVC;
2292
2293 /* connect fake service */
Kalle Vaload226ec2011-08-10 09:49:12 +03002294 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
Kalle Valobdcd8172011-07-18 00:22:30 +03002295
2296 if (status)
2297 ath6kl_hif_cleanup_scatter(target->dev->ar);
2298
2299fail_wait_target:
2300 if (packet) {
2301 htc_rxpkt_reset(packet);
2302 reclaim_rx_ctrl_buf(target, packet);
2303 }
2304
2305 return status;
2306}
2307
2308/*
2309 * Start HTC, enable interrupts and let the target know
2310 * host has finished setup.
2311 */
Kalle Vaload226ec2011-08-10 09:49:12 +03002312int ath6kl_htc_start(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002313{
2314 struct htc_packet *packet;
2315 int status;
2316
2317 /* Disable interrupts at the chip level */
2318 ath6kldev_disable_intrs(target->dev);
2319
2320 target->htc_flags = 0;
2321 target->rx_st_flags = 0;
2322
2323 /* Push control receive buffers into htc control endpoint */
2324 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2325 status = htc_add_rxbuf(target, packet);
2326 if (status)
2327 return status;
2328 }
2329
2330 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2331 ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2332 target->tgt_creds);
2333
2334 dump_cred_dist_stats(target);
2335
2336 /* Indicate to the target of the setup completion */
2337 status = htc_setup_tx_complete(target);
2338
2339 if (status)
2340 return status;
2341
2342 /* unmask interrupts */
2343 status = ath6kldev_unmask_intrs(target->dev);
2344
2345 if (status)
Kalle Vaload226ec2011-08-10 09:49:12 +03002346 ath6kl_htc_stop(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002347
2348 return status;
2349}
2350
2351/* htc_stop: stop interrupt reception, and flush all queued buffers */
Kalle Vaload226ec2011-08-10 09:49:12 +03002352void ath6kl_htc_stop(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002353{
2354 spin_lock_bh(&target->htc_lock);
2355 target->htc_flags |= HTC_OP_STATE_STOPPING;
2356 spin_unlock_bh(&target->htc_lock);
2357
2358 /*
2359 * Masking interrupts is a synchronous operation, when this
2360 * function returns all pending HIF I/O has completed, we can
2361 * safely flush the queues.
2362 */
2363 ath6kldev_mask_intrs(target->dev);
2364
Kalle Vaload226ec2011-08-10 09:49:12 +03002365 ath6kl_htc_flush_txep_all(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002366
Kalle Vaload226ec2011-08-10 09:49:12 +03002367 ath6kl_htc_flush_rx_buf(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002368
2369 reset_ep_state(target);
2370}
2371
Kalle Vaload226ec2011-08-10 09:49:12 +03002372void *ath6kl_htc_create(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +03002373{
2374 struct htc_target *target = NULL;
2375 struct htc_packet *packet;
2376 int status = 0, i = 0;
2377 u32 block_size, ctrl_bufsz;
2378
2379 target = kzalloc(sizeof(*target), GFP_KERNEL);
2380 if (!target) {
2381 ath6kl_err("unable to allocate memory\n");
2382 return NULL;
2383 }
2384
2385 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2386 if (!target->dev) {
2387 ath6kl_err("unable to allocate memory\n");
2388 status = -ENOMEM;
2389 goto fail_create_htc;
2390 }
2391
2392 spin_lock_init(&target->htc_lock);
2393 spin_lock_init(&target->rx_lock);
2394 spin_lock_init(&target->tx_lock);
2395
2396 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2397 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2398 INIT_LIST_HEAD(&target->cred_dist_list);
2399
2400 target->dev->ar = ar;
2401 target->dev->htc_cnxt = target;
Kalle Valobdcd8172011-07-18 00:22:30 +03002402 target->ep_waiting = ENDPOINT_MAX;
2403
2404 reset_ep_state(target);
2405
2406 status = ath6kldev_setup(target->dev);
2407
2408 if (status)
2409 goto fail_create_htc;
2410
2411 block_size = ar->mbox_info.block_size;
2412
2413 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2414 (block_size + HTC_HDR_LENGTH) :
2415 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2416
2417 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2418 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2419 if (!packet)
2420 break;
2421
2422 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2423 if (!packet->buf_start) {
2424 kfree(packet);
2425 break;
2426 }
2427
2428 packet->buf_len = ctrl_bufsz;
2429 if (i < NUM_CONTROL_RX_BUFFERS) {
2430 packet->act_len = 0;
2431 packet->buf = packet->buf_start;
2432 packet->endpoint = ENDPOINT_0;
2433 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2434 } else
2435 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2436 }
2437
2438fail_create_htc:
2439 if (i != NUM_CONTROL_BUFFERS || status) {
2440 if (target) {
Kalle Vaload226ec2011-08-10 09:49:12 +03002441 ath6kl_htc_cleanup(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002442 target = NULL;
2443 }
2444 }
2445
2446 return target;
2447}
2448
2449/* cleanup the HTC instance */
Kalle Vaload226ec2011-08-10 09:49:12 +03002450void ath6kl_htc_cleanup(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002451{
2452 struct htc_packet *packet, *tmp_packet;
2453
2454 ath6kl_hif_cleanup_scatter(target->dev->ar);
2455
2456 list_for_each_entry_safe(packet, tmp_packet,
2457 &target->free_ctrl_txbuf, list) {
2458 list_del(&packet->list);
2459 kfree(packet->buf_start);
2460 kfree(packet);
2461 }
2462
2463 list_for_each_entry_safe(packet, tmp_packet,
2464 &target->free_ctrl_rxbuf, list) {
2465 list_del(&packet->list);
2466 kfree(packet->buf_start);
2467 kfree(packet);
2468 }
2469
2470 kfree(target->dev);
2471 kfree(target);
2472}