blob: ca3d08462181469e8a3ccad9d053d2580500b41f [file] [log] [blame]
Kalle Valobdcd8172011-07-18 00:22:30 +03001/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "htc_hif.h"
19#include "debug.h"
20#include "hif-ops.h"
21#include <asm/unaligned.h>
22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
Kalle Valodfa01042011-09-06 11:10:49 +030025static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +053026{
27 u8 *align_addr;
28
29 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
30 align_addr = PTR_ALIGN(*buf - 4, 4);
31 memmove(align_addr, *buf, len);
32 *buf = align_addr;
33 }
34}
35
Kalle Valodfa01042011-09-06 11:10:49 +030036static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
37 int ctrl0, int ctrl1)
Kalle Valobdcd8172011-07-18 00:22:30 +030038{
39 struct htc_frame_hdr *hdr;
40
41 packet->buf -= HTC_HDR_LENGTH;
42 hdr = (struct htc_frame_hdr *)packet->buf;
43
44 /* Endianess? */
45 put_unaligned((u16)packet->act_len, &hdr->payld_len);
46 hdr->flags = flags;
47 hdr->eid = packet->endpoint;
48 hdr->ctrl[0] = ctrl0;
49 hdr->ctrl[1] = ctrl1;
50}
51
52static void htc_reclaim_txctrl_buf(struct htc_target *target,
53 struct htc_packet *pkt)
54{
55 spin_lock_bh(&target->htc_lock);
56 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
57 spin_unlock_bh(&target->htc_lock);
58}
59
60static struct htc_packet *htc_get_control_buf(struct htc_target *target,
61 bool tx)
62{
63 struct htc_packet *packet = NULL;
64 struct list_head *buf_list;
65
66 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
67
68 spin_lock_bh(&target->htc_lock);
69
70 if (list_empty(buf_list)) {
71 spin_unlock_bh(&target->htc_lock);
72 return NULL;
73 }
74
75 packet = list_first_entry(buf_list, struct htc_packet, list);
76 list_del(&packet->list);
77 spin_unlock_bh(&target->htc_lock);
78
79 if (tx)
80 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
81
82 return packet;
83}
84
85static void htc_tx_comp_update(struct htc_target *target,
86 struct htc_endpoint *endpoint,
87 struct htc_packet *packet)
88{
89 packet->completion = NULL;
90 packet->buf += HTC_HDR_LENGTH;
91
92 if (!packet->status)
93 return;
94
95 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
96 packet->status, packet->endpoint, packet->act_len,
97 packet->info.tx.cred_used);
98
99 /* on failure to submit, reclaim credits for this packet */
100 spin_lock_bh(&target->tx_lock);
101 endpoint->cred_dist.cred_to_dist +=
102 packet->info.tx.cred_used;
103 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
104
105 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
106 target->cred_dist_cntxt, &target->cred_dist_list);
107
108 ath6k_credit_distribute(target->cred_dist_cntxt,
109 &target->cred_dist_list,
110 HTC_CREDIT_DIST_SEND_COMPLETE);
111
112 spin_unlock_bh(&target->tx_lock);
113}
114
115static void htc_tx_complete(struct htc_endpoint *endpoint,
116 struct list_head *txq)
117{
118 if (list_empty(txq))
119 return;
120
121 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
122 "send complete ep %d, (%d pkts)\n",
123 endpoint->eid, get_queue_depth(txq));
124
125 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
126}
127
128static void htc_tx_comp_handler(struct htc_target *target,
129 struct htc_packet *packet)
130{
131 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
132 struct list_head container;
133
134 htc_tx_comp_update(target, endpoint, packet);
135 INIT_LIST_HEAD(&container);
136 list_add_tail(&packet->list, &container);
137 /* do completion */
138 htc_tx_complete(endpoint, &container);
139}
140
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530141static void htc_async_tx_scat_complete(struct htc_target *target,
142 struct hif_scatter_req *scat_req)
Kalle Valobdcd8172011-07-18 00:22:30 +0300143{
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530144 struct htc_endpoint *endpoint;
Kalle Valobdcd8172011-07-18 00:22:30 +0300145 struct htc_packet *packet;
146 struct list_head tx_compq;
147 int i;
148
149 INIT_LIST_HEAD(&tx_compq);
150
151 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
152 "htc_async_tx_scat_complete total len: %d entries: %d\n",
153 scat_req->len, scat_req->scat_entries);
154
155 if (scat_req->status)
156 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
157
Vasanthakumar Thiagarajane041c7f2011-07-16 20:29:09 +0530158 packet = scat_req->scat_list[0].packet;
159 endpoint = &target->endpoint[packet->endpoint];
160
Kalle Valobdcd8172011-07-18 00:22:30 +0300161 /* walk through the scatter list and process */
162 for (i = 0; i < scat_req->scat_entries; i++) {
163 packet = scat_req->scat_list[i].packet;
164 if (!packet) {
165 WARN_ON(1);
166 return;
167 }
168
169 packet->status = scat_req->status;
170 htc_tx_comp_update(target, endpoint, packet);
171 list_add_tail(&packet->list, &tx_compq);
172 }
173
174 /* free scatter request */
175 hif_scatter_req_add(target->dev->ar, scat_req);
176
177 /* complete all packets */
178 htc_tx_complete(endpoint, &tx_compq);
179}
180
Kalle Valodfa01042011-09-06 11:10:49 +0300181static int ath6kl_htc_tx_issue(struct htc_target *target,
182 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300183{
184 int status;
185 bool sync = false;
186 u32 padded_len, send_len;
187
188 if (!packet->completion)
189 sync = true;
190
191 send_len = packet->act_len + HTC_HDR_LENGTH;
192
193 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
194 __func__, send_len, sync ? "sync" : "async");
195
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530196 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300197
198 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
199 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
200 padded_len,
201 target->dev->ar->mbox_info.htc_addr,
202 sync ? "sync" : "async");
203
204 if (sync) {
205 status = hif_read_write_sync(target->dev->ar,
206 target->dev->ar->mbox_info.htc_addr,
207 packet->buf, padded_len,
208 HIF_WR_SYNC_BLOCK_INC);
209
210 packet->status = status;
Kalle Valo65d2bb12011-08-14 18:10:03 -0700211 packet->buf += HTC_HDR_LENGTH;
Kalle Valobdcd8172011-07-18 00:22:30 +0300212 } else
213 status = hif_write_async(target->dev->ar,
214 target->dev->ar->mbox_info.htc_addr,
215 packet->buf, padded_len,
216 HIF_WR_ASYNC_BLOCK_INC, packet);
217
218 return status;
219}
220
221static int htc_check_credits(struct htc_target *target,
222 struct htc_endpoint *ep, u8 *flags,
223 enum htc_endpoint_id eid, unsigned int len,
224 int *req_cred)
225{
226
227 *req_cred = (len > target->tgt_cred_sz) ?
228 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
229
230 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
231 *req_cred, ep->cred_dist.credits);
232
233 if (ep->cred_dist.credits < *req_cred) {
234 if (eid == ENDPOINT_0)
235 return -EINVAL;
236
237 /* Seek more credits */
238 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
239
240 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
241 target->cred_dist_cntxt, &ep->cred_dist);
242
243 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
244
245 ep->cred_dist.seek_cred = 0;
246
247 if (ep->cred_dist.credits < *req_cred) {
248 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
249 "not enough credits for ep %d - leaving packet in queue\n",
250 eid);
251 return -EINVAL;
252 }
253 }
254
255 ep->cred_dist.credits -= *req_cred;
256 ep->ep_st.cred_cosumd += *req_cred;
257
258 /* When we are getting low on credits, ask for more */
259 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
260 ep->cred_dist.seek_cred =
261 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
262
263 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
264 target->cred_dist_cntxt, &ep->cred_dist);
265
266 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
267
268 /* see if we were successful in getting more */
269 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
270 /* tell the target we need credits ASAP! */
271 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
272 ep->ep_st.cred_low_indicate += 1;
273 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
274 }
275 }
276
277 return 0;
278}
279
Kalle Valodfa01042011-09-06 11:10:49 +0300280static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
281 struct htc_endpoint *endpoint,
282 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300283{
284 int req_cred;
285 u8 flags;
286 struct htc_packet *packet;
287 unsigned int len;
288
289 while (true) {
290
291 flags = 0;
292
293 if (list_empty(&endpoint->txq))
294 break;
295 packet = list_first_entry(&endpoint->txq, struct htc_packet,
296 list);
297
298 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
299 "got head pkt:0x%p , queue depth: %d\n",
300 packet, get_queue_depth(&endpoint->txq));
301
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530302 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300303 packet->act_len + HTC_HDR_LENGTH);
304
305 if (htc_check_credits(target, endpoint, &flags,
306 packet->endpoint, len, &req_cred))
307 break;
308
309 /* now we can fully move onto caller's queue */
310 packet = list_first_entry(&endpoint->txq, struct htc_packet,
311 list);
312 list_move_tail(&packet->list, queue);
313
314 /* save the number of credits this packet consumed */
315 packet->info.tx.cred_used = req_cred;
316
317 /* all TX packets are handled asynchronously */
318 packet->completion = htc_tx_comp_handler;
319 packet->context = target;
320 endpoint->ep_st.tx_issued += 1;
321
322 /* save send flags */
323 packet->info.tx.flags = flags;
324 packet->info.tx.seqno = endpoint->seqno;
325 endpoint->seqno++;
326 }
327}
328
329/* See if the padded tx length falls on a credit boundary */
330static int htc_get_credit_padding(unsigned int cred_sz, int *len,
331 struct htc_endpoint *ep)
332{
333 int rem_cred, cred_pad;
334
335 rem_cred = *len % cred_sz;
336
337 /* No padding needed */
338 if (!rem_cred)
339 return 0;
340
341 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
342 return -1;
343
344 /*
345 * The transfer consumes a "partial" credit, this
346 * packet cannot be bundled unless we add
347 * additional "dummy" padding (max 255 bytes) to
348 * consume the entire credit.
349 */
350 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
351
352 if ((cred_pad > 0) && (cred_pad <= 255))
353 *len += cred_pad;
354 else
355 /* The amount of padding is too large, send as non-bundled */
356 return -1;
357
358 return cred_pad;
359}
360
Kalle Valodfa01042011-09-06 11:10:49 +0300361static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
362 struct htc_endpoint *endpoint,
363 struct hif_scatter_req *scat_req,
364 int n_scat,
365 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +0300366{
367 struct htc_packet *packet;
368 int i, len, rem_scat, cred_pad;
369 int status = 0;
370
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +0530371 rem_scat = target->max_tx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +0300372
373 for (i = 0; i < n_scat; i++) {
374 scat_req->scat_list[i].packet = NULL;
375
376 if (list_empty(queue))
377 break;
378
379 packet = list_first_entry(queue, struct htc_packet, list);
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530380 len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300381 packet->act_len + HTC_HDR_LENGTH);
382
383 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
384 &len, endpoint);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530385 if (cred_pad < 0 || rem_scat < len) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300386 status = -ENOSPC;
387 break;
388 }
389
390 rem_scat -= len;
391 /* now remove it from the queue */
Kalle Valobdcd8172011-07-18 00:22:30 +0300392 list_del(&packet->list);
393
394 scat_req->scat_list[i].packet = packet;
395 /* prepare packet and flag message as part of a send bundle */
Kalle Valodfa01042011-09-06 11:10:49 +0300396 ath6kl_htc_tx_prep_pkt(packet,
Kalle Valobdcd8172011-07-18 00:22:30 +0300397 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
398 cred_pad, packet->info.tx.seqno);
Vasanthakumar Thiagarajan94e532d2011-08-22 20:14:31 +0530399 /* Make sure the buffer is 4-byte aligned */
Kalle Valodfa01042011-09-06 11:10:49 +0300400 ath6kl_htc_tx_buf_align(&packet->buf,
401 packet->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +0300402 scat_req->scat_list[i].buf = packet->buf;
403 scat_req->scat_list[i].len = len;
404
405 scat_req->len += len;
406 scat_req->scat_entries++;
407 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
408 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
409 i, packet, len, rem_scat);
410 }
411
412 /* Roll back scatter setup in case of any failure */
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530413 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300414 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
415 packet = scat_req->scat_list[i].packet;
416 if (packet) {
417 packet->buf += HTC_HDR_LENGTH;
418 list_add(&packet->list, queue);
419 }
420 }
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530421 return -EAGAIN;
Kalle Valobdcd8172011-07-18 00:22:30 +0300422 }
423
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530424 return status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300425}
426
427/*
Kalle Valodfa01042011-09-06 11:10:49 +0300428 * Drain a queue and send as bundles this function may return without fully
429 * draining the queue when
Kalle Valobdcd8172011-07-18 00:22:30 +0300430 *
431 * 1. scatter resources are exhausted
432 * 2. a message that will consume a partial credit will stop the
433 * bundling process early
434 * 3. we drop below the minimum number of messages for a bundle
435 */
Kalle Valodfa01042011-09-06 11:10:49 +0300436static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
437 struct list_head *queue,
438 int *sent_bundle, int *n_bundle_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +0300439{
440 struct htc_target *target = endpoint->target;
441 struct hif_scatter_req *scat_req = NULL;
Kalle Valobdcd8172011-07-18 00:22:30 +0300442 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530443 int status;
Kalle Valobdcd8172011-07-18 00:22:30 +0300444
Kalle Valobdcd8172011-07-18 00:22:30 +0300445 while (true) {
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530446 status = 0;
Kalle Valobdcd8172011-07-18 00:22:30 +0300447 n_scat = get_queue_depth(queue);
448 n_scat = min(n_scat, target->msg_per_bndl_max);
449
450 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
451 /* not enough to bundle */
452 break;
453
454 scat_req = hif_scatter_req_get(target->dev->ar);
455
456 if (!scat_req) {
457 /* no scatter resources */
458 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
459 "no more scatter resources\n");
460 break;
461 }
462
463 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
464 n_scat);
465
466 scat_req->len = 0;
467 scat_req->scat_entries = 0;
468
Kalle Valodfa01042011-09-06 11:10:49 +0300469 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
470 scat_req, n_scat,
471 queue);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530472 if (status == -EAGAIN) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300473 hif_scatter_req_add(target->dev->ar, scat_req);
474 break;
475 }
476
477 /* send path is always asynchronous */
478 scat_req->complete = htc_async_tx_scat_complete;
Kalle Valobdcd8172011-07-18 00:22:30 +0300479 n_sent_bundle++;
480 tot_pkts_bundle += scat_req->scat_entries;
481
482 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
483 "send scatter total bytes: %d , entries: %d\n",
484 scat_req->len, scat_req->scat_entries);
485 ath6kldev_submit_scat_req(target->dev, scat_req, false);
Vasanthakumar Thiagarajanf7a7e7a2011-08-22 20:40:22 +0530486
487 if (status)
488 break;
Kalle Valobdcd8172011-07-18 00:22:30 +0300489 }
490
491 *sent_bundle = n_sent_bundle;
492 *n_bundle_pkts = tot_pkts_bundle;
Kalle Valodfa01042011-09-06 11:10:49 +0300493 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
494 __func__, n_sent_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +0300495
496 return;
497}
498
Kalle Valodfa01042011-09-06 11:10:49 +0300499static void ath6kl_htc_tx_from_queue(struct htc_target *target,
500 struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +0300501{
502 struct list_head txq;
503 struct htc_packet *packet;
504 int bundle_sent;
505 int n_pkts_bundle;
506
507 spin_lock_bh(&target->tx_lock);
508
509 endpoint->tx_proc_cnt++;
510 if (endpoint->tx_proc_cnt > 1) {
511 endpoint->tx_proc_cnt--;
512 spin_unlock_bh(&target->tx_lock);
513 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
514 return;
515 }
516
517 /*
518 * drain the endpoint TX queue for transmission as long
519 * as we have enough credits.
520 */
521 INIT_LIST_HEAD(&txq);
522
523 while (true) {
524
525 if (list_empty(&endpoint->txq))
526 break;
527
Kalle Valodfa01042011-09-06 11:10:49 +0300528 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
Kalle Valobdcd8172011-07-18 00:22:30 +0300529
530 if (list_empty(&txq))
531 break;
532
533 spin_unlock_bh(&target->tx_lock);
534
535 bundle_sent = 0;
536 n_pkts_bundle = 0;
537
538 while (true) {
539 /* try to send a bundle on each pass */
540 if ((target->tx_bndl_enable) &&
541 (get_queue_depth(&txq) >=
542 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
543 int temp1 = 0, temp2 = 0;
544
Kalle Valodfa01042011-09-06 11:10:49 +0300545 ath6kl_htc_tx_bundle(endpoint, &txq,
546 &temp1, &temp2);
Kalle Valobdcd8172011-07-18 00:22:30 +0300547 bundle_sent += temp1;
548 n_pkts_bundle += temp2;
549 }
550
551 if (list_empty(&txq))
552 break;
553
554 packet = list_first_entry(&txq, struct htc_packet,
555 list);
556 list_del(&packet->list);
557
Kalle Valodfa01042011-09-06 11:10:49 +0300558 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
559 0, packet->info.tx.seqno);
560 ath6kl_htc_tx_issue(target, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +0300561 }
562
563 spin_lock_bh(&target->tx_lock);
564
565 endpoint->ep_st.tx_bundles += bundle_sent;
566 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
567 }
568
569 endpoint->tx_proc_cnt = 0;
570 spin_unlock_bh(&target->tx_lock);
571}
572
Kalle Valodfa01042011-09-06 11:10:49 +0300573static bool ath6kl_htc_tx_try(struct htc_target *target,
574 struct htc_endpoint *endpoint,
575 struct htc_packet *tx_pkt)
Kalle Valobdcd8172011-07-18 00:22:30 +0300576{
577 struct htc_ep_callbacks ep_cb;
578 int txq_depth;
579 bool overflow = false;
580
581 ep_cb = endpoint->ep_cb;
582
583 spin_lock_bh(&target->tx_lock);
584 txq_depth = get_queue_depth(&endpoint->txq);
585 spin_unlock_bh(&target->tx_lock);
586
587 if (txq_depth >= endpoint->max_txq_depth)
588 overflow = true;
589
590 if (overflow)
591 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
592 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
593 endpoint->eid, overflow, txq_depth,
594 endpoint->max_txq_depth);
595
596 if (overflow && ep_cb.tx_full) {
597 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
598 "indicating overflowed tx packet: 0x%p\n", tx_pkt);
599
600 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
601 HTC_SEND_FULL_DROP) {
602 endpoint->ep_st.tx_dropped += 1;
603 return false;
604 }
605 }
606
607 spin_lock_bh(&target->tx_lock);
608 list_add_tail(&tx_pkt->list, &endpoint->txq);
609 spin_unlock_bh(&target->tx_lock);
610
Kalle Valodfa01042011-09-06 11:10:49 +0300611 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300612
613 return true;
614}
615
616static void htc_chk_ep_txq(struct htc_target *target)
617{
618 struct htc_endpoint *endpoint;
619 struct htc_endpoint_credit_dist *cred_dist;
620
621 /*
622 * Run through the credit distribution list to see if there are
623 * packets queued. NOTE: no locks need to be taken since the
624 * distribution list is not dynamic (cannot be re-ordered) and we
625 * are not modifying any state.
626 */
627 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
628 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
629
630 spin_lock_bh(&target->tx_lock);
631 if (!list_empty(&endpoint->txq)) {
632 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
633 "ep %d has %d credits and %d packets in tx queue\n",
634 cred_dist->endpoint,
635 endpoint->cred_dist.credits,
636 get_queue_depth(&endpoint->txq));
637 spin_unlock_bh(&target->tx_lock);
638 /*
639 * Try to start the stalled queue, this list is
640 * ordered by priority. If there are credits
641 * available the highest priority queue will get a
642 * chance to reclaim credits from lower priority
643 * ones.
644 */
Kalle Valodfa01042011-09-06 11:10:49 +0300645 ath6kl_htc_tx_from_queue(target, endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +0300646 spin_lock_bh(&target->tx_lock);
647 }
648 spin_unlock_bh(&target->tx_lock);
649 }
650}
651
652static int htc_setup_tx_complete(struct htc_target *target)
653{
654 struct htc_packet *send_pkt = NULL;
655 int status;
656
657 send_pkt = htc_get_control_buf(target, true);
658
659 if (!send_pkt)
660 return -ENOMEM;
661
662 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
663 struct htc_setup_comp_ext_msg *setup_comp_ext;
664 u32 flags = 0;
665
666 setup_comp_ext =
667 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
668 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
669 setup_comp_ext->msg_id =
670 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
671
672 if (target->msg_per_bndl_max > 0) {
673 /* Indicate HTC bundling to the target */
674 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
675 setup_comp_ext->msg_per_rxbndl =
676 target->msg_per_bndl_max;
677 }
678
679 memcpy(&setup_comp_ext->flags, &flags,
680 sizeof(setup_comp_ext->flags));
681 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
682 sizeof(struct htc_setup_comp_ext_msg),
683 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
684
685 } else {
686 struct htc_setup_comp_msg *setup_comp;
687 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
688 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
689 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
690 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
691 sizeof(struct htc_setup_comp_msg),
692 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
693 }
694
695 /* we want synchronous operation */
696 send_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +0300697 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
698 status = ath6kl_htc_tx_issue(target, send_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +0300699
700 if (send_pkt != NULL)
701 htc_reclaim_txctrl_buf(target, send_pkt);
702
703 return status;
704}
705
Kalle Vaload226ec2011-08-10 09:49:12 +0300706void ath6kl_htc_set_credit_dist(struct htc_target *target,
707 struct htc_credit_state_info *cred_dist_cntxt,
708 u16 srvc_pri_order[], int list_len)
Kalle Valobdcd8172011-07-18 00:22:30 +0300709{
710 struct htc_endpoint *endpoint;
711 int i, ep;
712
713 target->cred_dist_cntxt = cred_dist_cntxt;
714
715 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
716 &target->cred_dist_list);
717
718 for (i = 0; i < list_len; i++) {
719 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
720 endpoint = &target->endpoint[ep];
721 if (endpoint->svc_id == srvc_pri_order[i]) {
722 list_add_tail(&endpoint->cred_dist.list,
723 &target->cred_dist_list);
724 break;
725 }
726 }
727 if (ep >= ENDPOINT_MAX) {
728 WARN_ON(1);
729 return;
730 }
731 }
732}
733
Kalle Vaload226ec2011-08-10 09:49:12 +0300734int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300735{
736 struct htc_endpoint *endpoint;
737 struct list_head queue;
738
739 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
740 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
741 packet->endpoint, packet->buf, packet->act_len);
742
743 if (packet->endpoint >= ENDPOINT_MAX) {
744 WARN_ON(1);
745 return -EINVAL;
746 }
747
748 endpoint = &target->endpoint[packet->endpoint];
749
Kalle Valodfa01042011-09-06 11:10:49 +0300750 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
Kalle Valobdcd8172011-07-18 00:22:30 +0300751 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
752 -ECANCELED : -ENOSPC;
753 INIT_LIST_HEAD(&queue);
754 list_add(&packet->list, &queue);
755 htc_tx_complete(endpoint, &queue);
756 }
757
758 return 0;
759}
760
761/* flush endpoint TX queue */
Kalle Vaload226ec2011-08-10 09:49:12 +0300762void ath6kl_htc_flush_txep(struct htc_target *target,
763 enum htc_endpoint_id eid, u16 tag)
Kalle Valobdcd8172011-07-18 00:22:30 +0300764{
765 struct htc_packet *packet, *tmp_pkt;
766 struct list_head discard_q, container;
767 struct htc_endpoint *endpoint = &target->endpoint[eid];
768
769 if (!endpoint->svc_id) {
770 WARN_ON(1);
771 return;
772 }
773
774 /* initialize the discard queue */
775 INIT_LIST_HEAD(&discard_q);
776
777 spin_lock_bh(&target->tx_lock);
778
779 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
780 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
781 (tag == packet->info.tx.tag))
782 list_move_tail(&packet->list, &discard_q);
783 }
784
785 spin_unlock_bh(&target->tx_lock);
786
787 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
788 packet->status = -ECANCELED;
789 list_del(&packet->list);
790 ath6kl_dbg(ATH6KL_DBG_TRC,
791 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
792 packet, packet->act_len,
793 packet->endpoint, packet->info.tx.tag);
794
795 INIT_LIST_HEAD(&container);
796 list_add_tail(&packet->list, &container);
797 htc_tx_complete(endpoint, &container);
798 }
799
800}
801
Kalle Vaload226ec2011-08-10 09:49:12 +0300802static void ath6kl_htc_flush_txep_all(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +0300803{
804 struct htc_endpoint *endpoint;
805 int i;
806
807 dump_cred_dist_stats(target);
808
809 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
810 endpoint = &target->endpoint[i];
811 if (endpoint->svc_id == 0)
812 /* not in use.. */
813 continue;
Kalle Vaload226ec2011-08-10 09:49:12 +0300814 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
Kalle Valobdcd8172011-07-18 00:22:30 +0300815 }
816}
817
Kalle Vaload226ec2011-08-10 09:49:12 +0300818void ath6kl_htc_indicate_activity_change(struct htc_target *target,
819 enum htc_endpoint_id eid, bool active)
Kalle Valobdcd8172011-07-18 00:22:30 +0300820{
821 struct htc_endpoint *endpoint = &target->endpoint[eid];
822 bool dist = false;
823
824 if (endpoint->svc_id == 0) {
825 WARN_ON(1);
826 return;
827 }
828
829 spin_lock_bh(&target->tx_lock);
830
831 if (active) {
832 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
833 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
834 dist = true;
835 }
836 } else {
837 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
838 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
839 dist = true;
840 }
841 }
842
843 if (dist) {
844 endpoint->cred_dist.txq_depth =
845 get_queue_depth(&endpoint->txq);
846
847 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
848 target->cred_dist_cntxt, &target->cred_dist_list);
849
850 ath6k_credit_distribute(target->cred_dist_cntxt,
851 &target->cred_dist_list,
852 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
853 }
854
855 spin_unlock_bh(&target->tx_lock);
856
857 if (dist && !active)
858 htc_chk_ep_txq(target);
859}
860
861/* HTC Rx */
862
Kalle Valo689def92011-09-06 11:10:49 +0300863static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
864 int n_look_ahds)
Kalle Valobdcd8172011-07-18 00:22:30 +0300865{
866 endpoint->ep_st.rx_pkts++;
867 if (n_look_ahds == 1)
868 endpoint->ep_st.rx_lkahds++;
869 else if (n_look_ahds > 1)
870 endpoint->ep_st.rx_bundle_lkahd++;
871}
872
873static inline bool htc_valid_rx_frame_len(struct htc_target *target,
874 enum htc_endpoint_id eid, int len)
875{
876 return (eid == target->dev->ar->ctrl_ep) ?
877 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
878}
879
880static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
881{
882 struct list_head queue;
883
884 INIT_LIST_HEAD(&queue);
885 list_add_tail(&packet->list, &queue);
Kalle Vaload226ec2011-08-10 09:49:12 +0300886 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
Kalle Valobdcd8172011-07-18 00:22:30 +0300887}
888
889static void htc_reclaim_rxbuf(struct htc_target *target,
890 struct htc_packet *packet,
891 struct htc_endpoint *ep)
892{
893 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
894 htc_rxpkt_reset(packet);
895 packet->status = -ECANCELED;
896 ep->ep_cb.rx(ep->target, packet);
897 } else {
898 htc_rxpkt_reset(packet);
899 htc_add_rxbuf((void *)(target), packet);
900 }
901}
902
903static void reclaim_rx_ctrl_buf(struct htc_target *target,
904 struct htc_packet *packet)
905{
906 spin_lock_bh(&target->htc_lock);
907 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
908 spin_unlock_bh(&target->htc_lock);
909}
910
Kalle Valo689def92011-09-06 11:10:49 +0300911static int ath6kl_htc_rx_packet(struct htc_target *target,
912 struct htc_packet *packet,
913 u32 rx_len)
Kalle Valobdcd8172011-07-18 00:22:30 +0300914{
915 struct ath6kl_device *dev = target->dev;
916 u32 padded_len;
917 int status;
918
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530919 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
Kalle Valobdcd8172011-07-18 00:22:30 +0300920
921 if (padded_len > packet->buf_len) {
922 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
923 padded_len, rx_len, packet->buf_len);
924 return -ENOMEM;
925 }
926
927 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
928 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
929 packet, packet->info.rx.exp_hdr,
930 padded_len, dev->ar->mbox_info.htc_addr, "sync");
931
932 status = hif_read_write_sync(dev->ar,
933 dev->ar->mbox_info.htc_addr,
934 packet->buf, padded_len,
935 HIF_RD_SYNC_BLOCK_FIX);
936
937 packet->status = status;
938
939 return status;
940}
941
942/*
943 * optimization for recv packets, we can indicate a
944 * "hint" that there are more single-packets to fetch
945 * on this endpoint.
946 */
Kalle Valo689def92011-09-06 11:10:49 +0300947static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
948 struct htc_endpoint *endpoint,
949 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +0300950{
951 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
952
953 if (htc_hdr->eid == packet->endpoint) {
954 if (!list_empty(&endpoint->rx_bufq))
955 packet->info.rx.indicat_flags |=
956 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
957 }
958}
959
Kalle Valo689def92011-09-06 11:10:49 +0300960static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +0300961{
962 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
963
964 if (ep_cb.rx_refill_thresh > 0) {
965 spin_lock_bh(&endpoint->target->rx_lock);
966 if (get_queue_depth(&endpoint->rx_bufq)
967 < ep_cb.rx_refill_thresh) {
968 spin_unlock_bh(&endpoint->target->rx_lock);
969 ep_cb.rx_refill(endpoint->target, endpoint->eid);
970 return;
971 }
972 spin_unlock_bh(&endpoint->target->rx_lock);
973 }
974}
975
976/* This function is called with rx_lock held */
Kalle Valo689def92011-09-06 11:10:49 +0300977static int ath6kl_htc_rx_setup(struct htc_target *target,
978 struct htc_endpoint *ep,
979 u32 *lk_ahds, struct list_head *queue, int n_msg)
Kalle Valobdcd8172011-07-18 00:22:30 +0300980{
981 struct htc_packet *packet;
982 /* FIXME: type of lk_ahds can't be right */
983 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
984 struct htc_ep_callbacks ep_cb;
985 int status = 0, j, full_len;
986 bool no_recycle;
987
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +0530988 full_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +0300989 le16_to_cpu(htc_hdr->payld_len) +
990 sizeof(*htc_hdr));
991
992 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
993 ath6kl_warn("Rx buffer requested with invalid length\n");
994 return -EINVAL;
995 }
996
997 ep_cb = ep->ep_cb;
998 for (j = 0; j < n_msg; j++) {
999
1000 /*
1001 * Reset flag, any packets allocated using the
1002 * rx_alloc() API cannot be recycled on
1003 * cleanup,they must be explicitly returned.
1004 */
1005 no_recycle = false;
1006
1007 if (ep_cb.rx_allocthresh &&
1008 (full_len > ep_cb.rx_alloc_thresh)) {
1009 ep->ep_st.rx_alloc_thresh_hit += 1;
1010 ep->ep_st.rxalloc_thresh_byte +=
1011 le16_to_cpu(htc_hdr->payld_len);
1012
1013 spin_unlock_bh(&target->rx_lock);
1014 no_recycle = true;
1015
1016 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1017 full_len);
1018 spin_lock_bh(&target->rx_lock);
1019 } else {
1020 /* refill handler is being used */
1021 if (list_empty(&ep->rx_bufq)) {
1022 if (ep_cb.rx_refill) {
1023 spin_unlock_bh(&target->rx_lock);
1024 ep_cb.rx_refill(ep->target, ep->eid);
1025 spin_lock_bh(&target->rx_lock);
1026 }
1027 }
1028
1029 if (list_empty(&ep->rx_bufq))
1030 packet = NULL;
1031 else {
1032 packet = list_first_entry(&ep->rx_bufq,
1033 struct htc_packet, list);
1034 list_del(&packet->list);
1035 }
1036 }
1037
1038 if (!packet) {
1039 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1040 target->ep_waiting = ep->eid;
1041 return -ENOSPC;
1042 }
1043
1044 /* clear flags */
1045 packet->info.rx.rx_flags = 0;
1046 packet->info.rx.indicat_flags = 0;
1047 packet->status = 0;
1048
1049 if (no_recycle)
1050 /*
1051 * flag that these packets cannot be
1052 * recycled, they have to be returned to
1053 * the user
1054 */
1055 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1056
1057 /* Caller needs to free this upon any failure */
1058 list_add_tail(&packet->list, queue);
1059
1060 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1061 status = -ECANCELED;
1062 break;
1063 }
1064
1065 if (j) {
1066 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1067 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1068 } else
1069 /* set expected look ahead */
1070 packet->info.rx.exp_hdr = *lk_ahds;
1071
1072 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1073 HTC_HDR_LENGTH;
1074 }
1075
1076 return status;
1077}
1078
Kalle Valo689def92011-09-06 11:10:49 +03001079static int ath6kl_htc_rx_alloc(struct htc_target *target,
1080 u32 lk_ahds[], int msg,
1081 struct htc_endpoint *endpoint,
1082 struct list_head *queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03001083{
1084 int status = 0;
1085 struct htc_packet *packet, *tmp_pkt;
1086 struct htc_frame_hdr *htc_hdr;
1087 int i, n_msg;
1088
1089 spin_lock_bh(&target->rx_lock);
1090
1091 for (i = 0; i < msg; i++) {
1092
1093 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1094
1095 if (htc_hdr->eid >= ENDPOINT_MAX) {
1096 ath6kl_err("invalid ep in look-ahead: %d\n",
1097 htc_hdr->eid);
1098 status = -ENOMEM;
1099 break;
1100 }
1101
1102 if (htc_hdr->eid != endpoint->eid) {
1103 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1104 htc_hdr->eid, endpoint->eid, i);
1105 status = -ENOMEM;
1106 break;
1107 }
1108
1109 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1110 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1111 htc_hdr->payld_len,
1112 (u32) HTC_MAX_PAYLOAD_LENGTH);
1113 status = -ENOMEM;
1114 break;
1115 }
1116
1117 if (endpoint->svc_id == 0) {
1118 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1119 status = -ENOMEM;
1120 break;
1121 }
1122
1123 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1124 /*
1125 * HTC header indicates that every packet to follow
1126 * has the same padded length so that it can be
1127 * optimally fetched as a full bundle.
1128 */
1129 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1130 HTC_FLG_RX_BNDL_CNT_S;
1131
1132 /* the count doesn't include the starter frame */
1133 n_msg++;
1134 if (n_msg > target->msg_per_bndl_max) {
1135 status = -ENOMEM;
1136 break;
1137 }
1138
1139 endpoint->ep_st.rx_bundle_from_hdr += 1;
1140 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1141 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1142 n_msg);
1143 } else
1144 /* HTC header only indicates 1 message to fetch */
1145 n_msg = 1;
1146
1147 /* Setup packet buffers for each message */
Kalle Valo689def92011-09-06 11:10:49 +03001148 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1149 queue, n_msg);
Kalle Valobdcd8172011-07-18 00:22:30 +03001150
1151 /*
1152 * This is due to unavailabilty of buffers to rx entire data.
1153 * Return no error so that free buffers from queue can be used
1154 * to receive partial data.
1155 */
1156 if (status == -ENOSPC) {
1157 spin_unlock_bh(&target->rx_lock);
1158 return 0;
1159 }
1160
1161 if (status)
1162 break;
1163 }
1164
1165 spin_unlock_bh(&target->rx_lock);
1166
1167 if (status) {
1168 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1169 list_del(&packet->list);
1170 htc_reclaim_rxbuf(target, packet,
1171 &target->endpoint[packet->endpoint]);
1172 }
1173 }
1174
1175 return status;
1176}
1177
1178static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1179{
1180 if (packets->endpoint != ENDPOINT_0) {
1181 WARN_ON(1);
1182 return;
1183 }
1184
1185 if (packets->status == -ECANCELED) {
1186 reclaim_rx_ctrl_buf(context, packets);
1187 return;
1188 }
1189
1190 if (packets->act_len > 0) {
1191 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1192 packets->act_len + HTC_HDR_LENGTH);
1193
1194 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
Kalle Valoef094102011-09-27 14:30:45 +03001195 "Unexpected ENDPOINT 0 Message", "",
1196 packets->buf - HTC_HDR_LENGTH,
1197 packets->act_len + HTC_HDR_LENGTH);
Kalle Valobdcd8172011-07-18 00:22:30 +03001198 }
1199
1200 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1201}
1202
1203static void htc_proc_cred_rpt(struct htc_target *target,
1204 struct htc_credit_report *rpt,
1205 int n_entries,
1206 enum htc_endpoint_id from_ep)
1207{
1208 struct htc_endpoint *endpoint;
1209 int tot_credits = 0, i;
1210 bool dist = false;
1211
1212 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1213 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1214
1215 spin_lock_bh(&target->tx_lock);
1216
1217 for (i = 0; i < n_entries; i++, rpt++) {
1218 if (rpt->eid >= ENDPOINT_MAX) {
1219 WARN_ON(1);
1220 spin_unlock_bh(&target->tx_lock);
1221 return;
1222 }
1223
1224 endpoint = &target->endpoint[rpt->eid];
1225
1226 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1227 rpt->eid, rpt->credits);
1228
1229 endpoint->ep_st.tx_cred_rpt += 1;
1230 endpoint->ep_st.cred_retnd += rpt->credits;
1231
1232 if (from_ep == rpt->eid) {
1233 /*
1234 * This credit report arrived on the same endpoint
1235 * indicating it arrived in an RX packet.
1236 */
1237 endpoint->ep_st.cred_from_rx += rpt->credits;
1238 endpoint->ep_st.cred_rpt_from_rx += 1;
1239 } else if (from_ep == ENDPOINT_0) {
1240 /* credit arrived on endpoint 0 as a NULL message */
1241 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1242 endpoint->ep_st.cred_rpt_ep0 += 1;
1243 } else {
1244 endpoint->ep_st.cred_from_other += rpt->credits;
1245 endpoint->ep_st.cred_rpt_from_other += 1;
1246 }
1247
Raja Mani5ba3ee42011-07-19 19:27:31 +05301248 if (rpt->eid == ENDPOINT_0)
Kalle Valobdcd8172011-07-18 00:22:30 +03001249 /* always give endpoint 0 credits back */
1250 endpoint->cred_dist.credits += rpt->credits;
1251 else {
1252 endpoint->cred_dist.cred_to_dist += rpt->credits;
1253 dist = true;
1254 }
1255
1256 /*
1257 * Refresh tx depth for distribution function that will
1258 * recover these credits NOTE: this is only valid when
1259 * there are credits to recover!
1260 */
1261 endpoint->cred_dist.txq_depth =
1262 get_queue_depth(&endpoint->txq);
1263
1264 tot_credits += rpt->credits;
1265 }
1266
1267 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1268 "report indicated %d credits to distribute\n",
1269 tot_credits);
1270
1271 if (dist) {
1272 /*
1273 * This was a credit return based on a completed send
1274 * operations note, this is done with the lock held
1275 */
1276 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1277 target->cred_dist_cntxt, &target->cred_dist_list);
1278
1279 ath6k_credit_distribute(target->cred_dist_cntxt,
1280 &target->cred_dist_list,
1281 HTC_CREDIT_DIST_SEND_COMPLETE);
1282 }
1283
1284 spin_unlock_bh(&target->tx_lock);
1285
1286 if (tot_credits)
1287 htc_chk_ep_txq(target);
1288}
1289
1290static int htc_parse_trailer(struct htc_target *target,
1291 struct htc_record_hdr *record,
1292 u8 *record_buf, u32 *next_lk_ahds,
1293 enum htc_endpoint_id endpoint,
1294 int *n_lk_ahds)
1295{
1296 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1297 struct htc_lookahead_report *lk_ahd;
1298 int len;
1299
1300 switch (record->rec_id) {
1301 case HTC_RECORD_CREDITS:
1302 len = record->len / sizeof(struct htc_credit_report);
1303 if (!len) {
1304 WARN_ON(1);
1305 return -EINVAL;
1306 }
1307
1308 htc_proc_cred_rpt(target,
1309 (struct htc_credit_report *) record_buf,
1310 len, endpoint);
1311 break;
1312 case HTC_RECORD_LOOKAHEAD:
1313 len = record->len / sizeof(*lk_ahd);
1314 if (!len) {
1315 WARN_ON(1);
1316 return -EINVAL;
1317 }
1318
1319 lk_ahd = (struct htc_lookahead_report *) record_buf;
1320 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1321 && next_lk_ahds) {
1322
1323 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1324 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1325 lk_ahd->pre_valid, lk_ahd->post_valid);
1326
1327 /* look ahead bytes are valid, copy them over */
1328 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1329
1330 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
Kalle Valoef094102011-09-27 14:30:45 +03001331 "", next_lk_ahds, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001332
1333 *n_lk_ahds = 1;
1334 }
1335 break;
1336 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1337 len = record->len / sizeof(*bundle_lkahd_rpt);
1338 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1339 WARN_ON(1);
1340 return -EINVAL;
1341 }
1342
1343 if (next_lk_ahds) {
1344 int i;
1345
1346 bundle_lkahd_rpt =
1347 (struct htc_bundle_lkahd_rpt *) record_buf;
1348
1349 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001350 "", record_buf, record->len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001351
1352 for (i = 0; i < len; i++) {
1353 memcpy((u8 *)&next_lk_ahds[i],
1354 bundle_lkahd_rpt->lk_ahd, 4);
1355 bundle_lkahd_rpt++;
1356 }
1357
1358 *n_lk_ahds = i;
1359 }
1360 break;
1361 default:
1362 ath6kl_err("unhandled record: id:%d len:%d\n",
1363 record->rec_id, record->len);
1364 break;
1365 }
1366
1367 return 0;
1368
1369}
1370
1371static int htc_proc_trailer(struct htc_target *target,
1372 u8 *buf, int len, u32 *next_lk_ahds,
1373 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1374{
1375 struct htc_record_hdr *record;
1376 int orig_len;
1377 int status;
1378 u8 *record_buf;
1379 u8 *orig_buf;
1380
1381 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1382
Kalle Valoef094102011-09-27 14:30:45 +03001383 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
1384 buf, len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001385
1386 orig_buf = buf;
1387 orig_len = len;
1388 status = 0;
1389
1390 while (len > 0) {
1391
1392 if (len < sizeof(struct htc_record_hdr)) {
1393 status = -ENOMEM;
1394 break;
1395 }
1396 /* these are byte aligned structs */
1397 record = (struct htc_record_hdr *) buf;
1398 len -= sizeof(struct htc_record_hdr);
1399 buf += sizeof(struct htc_record_hdr);
1400
1401 if (record->len > len) {
1402 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1403 record->len, record->rec_id, len);
1404 status = -ENOMEM;
1405 break;
1406 }
1407 record_buf = buf;
1408
1409 status = htc_parse_trailer(target, record, record_buf,
1410 next_lk_ahds, endpoint, n_lk_ahds);
1411
1412 if (status)
1413 break;
1414
1415 /* advance buffer past this record for next time around */
1416 buf += record->len;
1417 len -= record->len;
1418 }
1419
Raja Mani2588f552011-07-19 19:27:30 +05301420 if (status)
1421 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
Kalle Valoef094102011-09-27 14:30:45 +03001422 "", orig_buf, orig_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001423
1424 return status;
1425}
1426
Kalle Valo689def92011-09-06 11:10:49 +03001427static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1428 struct htc_packet *packet,
1429 u32 *next_lkahds, int *n_lkahds)
Kalle Valobdcd8172011-07-18 00:22:30 +03001430{
1431 int status = 0;
1432 u16 payload_len;
1433 u32 lk_ahd;
1434 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1435
1436 if (n_lkahds != NULL)
1437 *n_lkahds = 0;
1438
Kalle Valoef094102011-09-27 14:30:45 +03001439 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
1440 packet->buf, packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001441
1442 /*
1443 * NOTE: we cannot assume the alignment of buf, so we use the safe
1444 * macros to retrieve 16 bit fields.
1445 */
1446 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1447
1448 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1449
1450 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1451 /*
1452 * Refresh the expected header and the actual length as it
1453 * was unknown when this packet was grabbed as part of the
1454 * bundle.
1455 */
1456 packet->info.rx.exp_hdr = lk_ahd;
1457 packet->act_len = payload_len + HTC_HDR_LENGTH;
1458
1459 /* validate the actual header that was refreshed */
1460 if (packet->act_len > packet->buf_len) {
1461 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1462 payload_len, lk_ahd);
1463 /*
1464 * Limit this to max buffer just to print out some
1465 * of the buffer.
1466 */
1467 packet->act_len = min(packet->act_len, packet->buf_len);
1468 status = -ENOMEM;
1469 goto fail_rx;
1470 }
1471
1472 if (packet->endpoint != htc_hdr->eid) {
1473 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1474 htc_hdr->eid, packet->endpoint);
1475 status = -ENOMEM;
1476 goto fail_rx;
1477 }
1478 }
1479
1480 if (lk_ahd != packet->info.rx.exp_hdr) {
Kalle Valo689def92011-09-06 11:10:49 +03001481 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1482 __func__, packet, packet->info.rx.rx_flags);
Kalle Valobdcd8172011-07-18 00:22:30 +03001483 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
Kalle Valoef094102011-09-27 14:30:45 +03001484 "", &packet->info.rx.exp_hdr, 4);
Kalle Valobdcd8172011-07-18 00:22:30 +03001485 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
Kalle Valoef094102011-09-27 14:30:45 +03001486 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
Kalle Valobdcd8172011-07-18 00:22:30 +03001487 status = -ENOMEM;
1488 goto fail_rx;
1489 }
1490
1491 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1492 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1493 htc_hdr->ctrl[0] > payload_len) {
Kalle Valo689def92011-09-06 11:10:49 +03001494 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1495 __func__, payload_len, htc_hdr->ctrl[0]);
Kalle Valobdcd8172011-07-18 00:22:30 +03001496 status = -ENOMEM;
1497 goto fail_rx;
1498 }
1499
1500 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1501 next_lkahds = NULL;
1502 n_lkahds = NULL;
1503 }
1504
1505 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1506 + payload_len - htc_hdr->ctrl[0],
1507 htc_hdr->ctrl[0], next_lkahds,
1508 n_lkahds, packet->endpoint);
1509
1510 if (status)
1511 goto fail_rx;
1512
1513 packet->act_len -= htc_hdr->ctrl[0];
1514 }
1515
1516 packet->buf += HTC_HDR_LENGTH;
1517 packet->act_len -= HTC_HDR_LENGTH;
1518
1519fail_rx:
1520 if (status)
1521 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
Kalle Valoef094102011-09-27 14:30:45 +03001522 "", packet->buf,
Kalle Valobdcd8172011-07-18 00:22:30 +03001523 packet->act_len < 256 ? packet->act_len : 256);
1524 else {
1525 if (packet->act_len > 0)
1526 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
Kalle Valoef094102011-09-27 14:30:45 +03001527 "HTC - Application Msg", "",
Kalle Valobdcd8172011-07-18 00:22:30 +03001528 packet->buf, packet->act_len);
1529 }
1530
1531 return status;
1532}
1533
Kalle Valo689def92011-09-06 11:10:49 +03001534static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1535 struct htc_packet *packet)
Kalle Valobdcd8172011-07-18 00:22:30 +03001536{
1537 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1538 "htc calling ep %d recv callback on packet 0x%p\n",
1539 endpoint->eid, packet);
1540 endpoint->ep_cb.rx(endpoint->target, packet);
1541}
1542
Kalle Valo689def92011-09-06 11:10:49 +03001543static int ath6kl_htc_rx_bundle(struct htc_target *target,
1544 struct list_head *rxq,
1545 struct list_head *sync_compq,
1546 int *n_pkt_fetched, bool part_bundle)
Kalle Valobdcd8172011-07-18 00:22:30 +03001547{
1548 struct hif_scatter_req *scat_req;
1549 struct htc_packet *packet;
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05301550 int rem_space = target->max_rx_bndl_sz;
Kalle Valobdcd8172011-07-18 00:22:30 +03001551 int n_scat_pkt, status = 0, i, len;
1552
1553 n_scat_pkt = get_queue_depth(rxq);
1554 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1555
1556 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1557 /*
1558 * We were forced to split this bundle receive operation
1559 * all packets in this partial bundle must have their
1560 * lookaheads ignored.
1561 */
1562 part_bundle = true;
1563
1564 /*
1565 * This would only happen if the target ignored our max
1566 * bundle limit.
1567 */
Kalle Valo689def92011-09-06 11:10:49 +03001568 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1569 __func__, get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001570 }
1571
1572 len = 0;
1573
1574 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
Kalle Valo689def92011-09-06 11:10:49 +03001575 "%s(): (numpackets: %d , actual : %d)\n",
1576 __func__, get_queue_depth(rxq), n_scat_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03001577
1578 scat_req = hif_scatter_req_get(target->dev->ar);
1579
1580 if (scat_req == NULL)
1581 goto fail_rx_pkt;
1582
Kalle Valobdcd8172011-07-18 00:22:30 +03001583 for (i = 0; i < n_scat_pkt; i++) {
1584 int pad_len;
1585
1586 packet = list_first_entry(rxq, struct htc_packet, list);
1587 list_del(&packet->list);
1588
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05301589 pad_len = CALC_TXRX_PADDED_LEN(target,
Kalle Valobdcd8172011-07-18 00:22:30 +03001590 packet->act_len);
1591
1592 if ((rem_space - pad_len) < 0) {
1593 list_add(&packet->list, rxq);
1594 break;
1595 }
1596
1597 rem_space -= pad_len;
1598
1599 if (part_bundle || (i < (n_scat_pkt - 1)))
1600 /*
1601 * Packet 0..n-1 cannot be checked for look-aheads
1602 * since we are fetching a bundle the last packet
1603 * however can have it's lookahead used
1604 */
1605 packet->info.rx.rx_flags |=
1606 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1607
1608 /* NOTE: 1 HTC packet per scatter entry */
1609 scat_req->scat_list[i].buf = packet->buf;
1610 scat_req->scat_list[i].len = pad_len;
1611
1612 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1613
1614 list_add_tail(&packet->list, sync_compq);
1615
1616 WARN_ON(!scat_req->scat_list[i].len);
1617 len += scat_req->scat_list[i].len;
1618 }
1619
1620 scat_req->len = len;
1621 scat_req->scat_entries = i;
1622
1623 status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1624
1625 if (!status)
1626 *n_pkt_fetched = i;
1627
1628 /* free scatter request */
1629 hif_scatter_req_add(target->dev->ar, scat_req);
1630
1631fail_rx_pkt:
1632
1633 return status;
1634}
1635
Kalle Valo689def92011-09-06 11:10:49 +03001636static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1637 struct list_head *comp_pktq,
1638 u32 lk_ahds[],
1639 int *n_lk_ahd)
Kalle Valobdcd8172011-07-18 00:22:30 +03001640{
1641 struct htc_packet *packet, *tmp_pkt;
1642 struct htc_endpoint *ep;
1643 int status = 0;
1644
1645 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001646 ep = &target->endpoint[packet->endpoint];
1647
1648 /* process header for each of the recv packet */
Kalle Valo689def92011-09-06 11:10:49 +03001649 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1650 n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001651 if (status)
1652 return status;
1653
Vasanthakumar Thiagarajan4159cc92011-10-03 17:28:07 +05301654 list_del(&packet->list);
1655
Kalle Valobdcd8172011-07-18 00:22:30 +03001656 if (list_empty(comp_pktq)) {
1657 /*
1658 * Last packet's more packet flag is set
1659 * based on the lookahead.
1660 */
1661 if (*n_lk_ahd > 0)
Kalle Valo689def92011-09-06 11:10:49 +03001662 ath6kl_htc_rx_set_indicate(lk_ahds[0],
1663 ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001664 } else
1665 /*
1666 * Packets in a bundle automatically have
1667 * this flag set.
1668 */
1669 packet->info.rx.indicat_flags |=
1670 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1671
Kalle Valo689def92011-09-06 11:10:49 +03001672 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
Kalle Valobdcd8172011-07-18 00:22:30 +03001673
1674 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1675 ep->ep_st.rx_bundl += 1;
1676
Kalle Valo689def92011-09-06 11:10:49 +03001677 ath6kl_htc_rx_complete(ep, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001678 }
1679
1680 return status;
1681}
1682
Kalle Valo689def92011-09-06 11:10:49 +03001683static int ath6kl_htc_rx_fetch(struct htc_target *target,
1684 struct list_head *rx_pktq,
1685 struct list_head *comp_pktq)
Kalle Valobdcd8172011-07-18 00:22:30 +03001686{
1687 int fetched_pkts;
1688 bool part_bundle = false;
1689 int status = 0;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301690 struct list_head tmp_rxq;
1691 struct htc_packet *packet, *tmp_pkt;
Kalle Valobdcd8172011-07-18 00:22:30 +03001692
1693 /* now go fetch the list of HTC packets */
1694 while (!list_empty(rx_pktq)) {
1695 fetched_pkts = 0;
1696
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301697 INIT_LIST_HEAD(&tmp_rxq);
1698
Kalle Valobdcd8172011-07-18 00:22:30 +03001699 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1700 /*
1701 * There are enough packets to attempt a
1702 * bundle transfer and recv bundling is
1703 * allowed.
1704 */
Kalle Valo689def92011-09-06 11:10:49 +03001705 status = ath6kl_htc_rx_bundle(target, rx_pktq,
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301706 &tmp_rxq,
Kalle Valo689def92011-09-06 11:10:49 +03001707 &fetched_pkts,
1708 part_bundle);
Kalle Valobdcd8172011-07-18 00:22:30 +03001709 if (status)
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301710 goto fail_rx;
Kalle Valobdcd8172011-07-18 00:22:30 +03001711
1712 if (!list_empty(rx_pktq))
1713 part_bundle = true;
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301714
1715 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001716 }
1717
1718 if (!fetched_pkts) {
Kalle Valobdcd8172011-07-18 00:22:30 +03001719
1720 packet = list_first_entry(rx_pktq, struct htc_packet,
1721 list);
1722
Kalle Valobdcd8172011-07-18 00:22:30 +03001723 /* fully synchronous */
1724 packet->completion = NULL;
1725
Vasanthakumar Thiagarajanb8d5d5f2011-10-03 17:28:25 +05301726 if (!list_is_singular(rx_pktq))
Kalle Valobdcd8172011-07-18 00:22:30 +03001727 /*
1728 * look_aheads in all packet
1729 * except the last one in the
1730 * bundle must be ignored
1731 */
1732 packet->info.rx.rx_flags |=
1733 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1734
1735 /* go fetch the packet */
Kalle Valo689def92011-09-06 11:10:49 +03001736 status = ath6kl_htc_rx_packet(target, packet,
1737 packet->act_len);
Kalle Valobdcd8172011-07-18 00:22:30 +03001738
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301739 list_move_tail(&packet->list, &tmp_rxq);
1740
1741 if (status)
1742 goto fail_rx;
1743
1744 list_splice_tail_init(&tmp_rxq, comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001745 }
1746 }
1747
Vasanthakumar Thiagarajan99f54292011-10-03 17:26:26 +05301748 return 0;
1749
1750fail_rx:
1751
1752 /*
1753 * Cleanup any packets we allocated but didn't use to
1754 * actually fetch any packets.
1755 */
1756
1757 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
1758 list_del(&packet->list);
1759 htc_reclaim_rxbuf(target, packet,
1760 &target->endpoint[packet->endpoint]);
1761 }
1762
1763 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
1764 list_del(&packet->list);
1765 htc_reclaim_rxbuf(target, packet,
1766 &target->endpoint[packet->endpoint]);
1767 }
1768
Kalle Valobdcd8172011-07-18 00:22:30 +03001769 return status;
1770}
1771
Kalle Vaload226ec2011-08-10 09:49:12 +03001772int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1773 u32 msg_look_ahead[], int *num_pkts)
Kalle Valobdcd8172011-07-18 00:22:30 +03001774{
1775 struct htc_packet *packets, *tmp_pkt;
1776 struct htc_endpoint *endpoint;
1777 struct list_head rx_pktq, comp_pktq;
1778 int status = 0;
1779 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1780 int num_look_ahead = 1;
1781 enum htc_endpoint_id id;
1782 int n_fetched = 0;
1783
1784 *num_pkts = 0;
1785
1786 /*
1787 * On first entry copy the look_aheads into our temp array for
1788 * processing
1789 */
1790 memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1791
1792 while (true) {
1793
1794 /*
1795 * First lookahead sets the expected endpoint IDs for all
1796 * packets in a bundle.
1797 */
1798 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1799 endpoint = &target->endpoint[id];
1800
1801 if (id >= ENDPOINT_MAX) {
1802 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1803 id);
1804 status = -ENOMEM;
1805 break;
1806 }
1807
1808 INIT_LIST_HEAD(&rx_pktq);
1809 INIT_LIST_HEAD(&comp_pktq);
1810
1811 /*
1812 * Try to allocate as many HTC RX packets indicated by the
1813 * look_aheads.
1814 */
Kalle Valo689def92011-09-06 11:10:49 +03001815 status = ath6kl_htc_rx_alloc(target, look_aheads,
1816 num_look_ahead, endpoint,
1817 &rx_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001818 if (status)
1819 break;
1820
1821 if (get_queue_depth(&rx_pktq) >= 2)
1822 /*
1823 * A recv bundle was detected, force IRQ status
1824 * re-check again
1825 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05301826 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03001827
1828 n_fetched += get_queue_depth(&rx_pktq);
1829
1830 num_look_ahead = 0;
1831
Kalle Valo689def92011-09-06 11:10:49 +03001832 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
Kalle Valobdcd8172011-07-18 00:22:30 +03001833
1834 if (!status)
Kalle Valo689def92011-09-06 11:10:49 +03001835 ath6kl_htc_rx_chk_water_mark(endpoint);
Kalle Valobdcd8172011-07-18 00:22:30 +03001836
1837 /* Process fetched packets */
Kalle Valo689def92011-09-06 11:10:49 +03001838 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
1839 look_aheads,
1840 &num_look_ahead);
Kalle Valobdcd8172011-07-18 00:22:30 +03001841
1842 if (!num_look_ahead || status)
1843 break;
1844
1845 /*
1846 * For SYNCH processing, if we get here, we are running
1847 * through the loop again due to a detected lookahead. Set
1848 * flag that we should re-check IRQ status registers again
1849 * before leaving IRQ processing, this can net better
1850 * performance in high throughput situations.
1851 */
Vasanthakumar Thiagarajanfcb82052011-07-18 14:23:31 +05301852 target->chk_irq_status_cnt = 1;
Kalle Valobdcd8172011-07-18 00:22:30 +03001853 }
1854
1855 if (status) {
1856 ath6kl_err("failed to get pending recv messages: %d\n",
1857 status);
Kalle Valobdcd8172011-07-18 00:22:30 +03001858
1859 /* cleanup any packets in sync completion queue */
1860 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1861 list_del(&packets->list);
1862 htc_reclaim_rxbuf(target, packets,
1863 &target->endpoint[packets->endpoint]);
1864 }
1865
1866 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1867 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1868 ath6kldev_rx_control(target->dev, false);
1869 }
1870 }
1871
1872 /*
1873 * Before leaving, check to see if host ran out of buffers and
1874 * needs to stop the receiver.
1875 */
1876 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1877 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1878 ath6kldev_rx_control(target->dev, false);
1879 }
1880 *num_pkts = n_fetched;
1881
1882 return status;
1883}
1884
1885/*
1886 * Synchronously wait for a control message from the target,
1887 * This function is used at initialization time ONLY. At init messages
1888 * on ENDPOINT 0 are expected.
1889 */
1890static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1891{
1892 struct htc_packet *packet = NULL;
1893 struct htc_frame_hdr *htc_hdr;
1894 u32 look_ahead;
1895
1896 if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1897 HTC_TARGET_RESPONSE_TIMEOUT))
1898 return NULL;
1899
1900 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1901 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1902
1903 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1904
1905 if (htc_hdr->eid != ENDPOINT_0)
1906 return NULL;
1907
1908 packet = htc_get_control_buf(target, false);
1909
1910 if (!packet)
1911 return NULL;
1912
1913 packet->info.rx.rx_flags = 0;
1914 packet->info.rx.exp_hdr = look_ahead;
1915 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1916
1917 if (packet->act_len > packet->buf_len)
1918 goto fail_ctrl_rx;
1919
1920 /* we want synchronous operation */
1921 packet->completion = NULL;
1922
1923 /* get the message from the device, this will block */
Kalle Valo689def92011-09-06 11:10:49 +03001924 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
Kalle Valobdcd8172011-07-18 00:22:30 +03001925 goto fail_ctrl_rx;
1926
1927 /* process receive header */
Kalle Valo689def92011-09-06 11:10:49 +03001928 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
Kalle Valobdcd8172011-07-18 00:22:30 +03001929
1930 if (packet->status) {
Kalle Valo689def92011-09-06 11:10:49 +03001931 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
Kalle Valobdcd8172011-07-18 00:22:30 +03001932 packet->status);
1933 goto fail_ctrl_rx;
1934 }
1935
1936 return packet;
1937
1938fail_ctrl_rx:
1939 if (packet != NULL) {
1940 htc_rxpkt_reset(packet);
1941 reclaim_rx_ctrl_buf(target, packet);
1942 }
1943
1944 return NULL;
1945}
1946
Kalle Vaload226ec2011-08-10 09:49:12 +03001947int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1948 struct list_head *pkt_queue)
Kalle Valobdcd8172011-07-18 00:22:30 +03001949{
1950 struct htc_endpoint *endpoint;
1951 struct htc_packet *first_pkt;
1952 bool rx_unblock = false;
1953 int status = 0, depth;
1954
1955 if (list_empty(pkt_queue))
1956 return -ENOMEM;
1957
1958 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1959
1960 if (first_pkt->endpoint >= ENDPOINT_MAX)
1961 return status;
1962
1963 depth = get_queue_depth(pkt_queue);
1964
1965 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1966 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1967 first_pkt->endpoint, depth, first_pkt->buf_len);
1968
1969 endpoint = &target->endpoint[first_pkt->endpoint];
1970
1971 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1972 struct htc_packet *packet, *tmp_pkt;
1973
1974 /* walk through queue and mark each one canceled */
1975 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1976 packet->status = -ECANCELED;
1977 list_del(&packet->list);
Kalle Valo689def92011-09-06 11:10:49 +03001978 ath6kl_htc_rx_complete(endpoint, packet);
Kalle Valobdcd8172011-07-18 00:22:30 +03001979 }
1980
1981 return status;
1982 }
1983
1984 spin_lock_bh(&target->rx_lock);
1985
1986 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1987
1988 /* check if we are blocked waiting for a new buffer */
1989 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1990 if (target->ep_waiting == first_pkt->endpoint) {
1991 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1992 "receiver was blocked on ep:%d, unblocking.\n",
1993 target->ep_waiting);
1994 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1995 target->ep_waiting = ENDPOINT_MAX;
1996 rx_unblock = true;
1997 }
1998 }
1999
2000 spin_unlock_bh(&target->rx_lock);
2001
2002 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2003 /* TODO : implement a buffer threshold count? */
2004 ath6kldev_rx_control(target->dev, true);
2005
2006 return status;
2007}
2008
Kalle Vaload226ec2011-08-10 09:49:12 +03002009void ath6kl_htc_flush_rx_buf(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002010{
2011 struct htc_endpoint *endpoint;
2012 struct htc_packet *packet, *tmp_pkt;
2013 int i;
2014
2015 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2016 endpoint = &target->endpoint[i];
2017 if (!endpoint->svc_id)
2018 /* not in use.. */
2019 continue;
2020
2021 spin_lock_bh(&target->rx_lock);
2022 list_for_each_entry_safe(packet, tmp_pkt,
2023 &endpoint->rx_bufq, list) {
2024 list_del(&packet->list);
2025 spin_unlock_bh(&target->rx_lock);
2026 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2027 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
2028 packet, packet->buf_len,
2029 packet->endpoint);
2030 dev_kfree_skb(packet->pkt_cntxt);
2031 spin_lock_bh(&target->rx_lock);
2032 }
2033 spin_unlock_bh(&target->rx_lock);
2034 }
2035}
2036
Kalle Vaload226ec2011-08-10 09:49:12 +03002037int ath6kl_htc_conn_service(struct htc_target *target,
2038 struct htc_service_connect_req *conn_req,
2039 struct htc_service_connect_resp *conn_resp)
Kalle Valobdcd8172011-07-18 00:22:30 +03002040{
2041 struct htc_packet *rx_pkt = NULL;
2042 struct htc_packet *tx_pkt = NULL;
2043 struct htc_conn_service_resp *resp_msg;
2044 struct htc_conn_service_msg *conn_msg;
2045 struct htc_endpoint *endpoint;
2046 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2047 unsigned int max_msg_sz = 0;
2048 int status = 0;
2049
2050 ath6kl_dbg(ATH6KL_DBG_TRC,
2051 "htc_conn_service, target:0x%p service id:0x%X\n",
2052 target, conn_req->svc_id);
2053
2054 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2055 /* special case for pseudo control service */
2056 assigned_ep = ENDPOINT_0;
2057 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2058 } else {
2059 /* allocate a packet to send to the target */
2060 tx_pkt = htc_get_control_buf(target, true);
2061
2062 if (!tx_pkt)
2063 return -ENOMEM;
2064
2065 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2066 memset(conn_msg, 0, sizeof(*conn_msg));
2067 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2068 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2069 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2070
2071 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2072 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2073 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2074
2075 /* we want synchronous operation */
2076 tx_pkt->completion = NULL;
Kalle Valodfa01042011-09-06 11:10:49 +03002077 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2078 status = ath6kl_htc_tx_issue(target, tx_pkt);
Kalle Valobdcd8172011-07-18 00:22:30 +03002079
2080 if (status)
2081 goto fail_tx;
2082
2083 /* wait for response */
2084 rx_pkt = htc_wait_for_ctrl_msg(target);
2085
2086 if (!rx_pkt) {
2087 status = -ENOMEM;
2088 goto fail_tx;
2089 }
2090
2091 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2092
2093 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2094 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2095 status = -ENOMEM;
2096 goto fail_tx;
2097 }
2098
2099 conn_resp->resp_code = resp_msg->status;
2100 /* check response status */
2101 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2102 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2103 resp_msg->svc_id, resp_msg->status);
2104 status = -ENOMEM;
2105 goto fail_tx;
2106 }
2107
2108 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2109 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2110 }
2111
2112 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2113 status = -ENOMEM;
2114 goto fail_tx;
2115 }
2116
2117 endpoint = &target->endpoint[assigned_ep];
2118 endpoint->eid = assigned_ep;
2119 if (endpoint->svc_id) {
2120 status = -ENOMEM;
2121 goto fail_tx;
2122 }
2123
2124 /* return assigned endpoint to caller */
2125 conn_resp->endpoint = assigned_ep;
2126 conn_resp->len_max = max_msg_sz;
2127
2128 /* setup the endpoint */
2129
2130 /* this marks the endpoint in use */
2131 endpoint->svc_id = conn_req->svc_id;
2132
2133 endpoint->max_txq_depth = conn_req->max_txq_depth;
2134 endpoint->len_max = max_msg_sz;
2135 endpoint->ep_cb = conn_req->ep_cb;
2136 endpoint->cred_dist.svc_id = conn_req->svc_id;
2137 endpoint->cred_dist.htc_rsvd = endpoint;
2138 endpoint->cred_dist.endpoint = assigned_ep;
2139 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2140
2141 if (conn_req->max_rxmsg_sz) {
2142 /*
2143 * Override cred_per_msg calculation, this optimizes
2144 * the credit-low indications since the host will actually
2145 * issue smaller messages in the Send path.
2146 */
2147 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2148 status = -ENOMEM;
2149 goto fail_tx;
2150 }
2151 endpoint->cred_dist.cred_per_msg =
2152 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2153 } else
2154 endpoint->cred_dist.cred_per_msg =
2155 max_msg_sz / target->tgt_cred_sz;
2156
2157 if (!endpoint->cred_dist.cred_per_msg)
2158 endpoint->cred_dist.cred_per_msg = 1;
2159
2160 /* save local connection flags */
2161 endpoint->conn_flags = conn_req->flags;
2162
2163fail_tx:
2164 if (tx_pkt)
2165 htc_reclaim_txctrl_buf(target, tx_pkt);
2166
2167 if (rx_pkt) {
2168 htc_rxpkt_reset(rx_pkt);
2169 reclaim_rx_ctrl_buf(target, rx_pkt);
2170 }
2171
2172 return status;
2173}
2174
2175static void reset_ep_state(struct htc_target *target)
2176{
2177 struct htc_endpoint *endpoint;
2178 int i;
2179
2180 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2181 endpoint = &target->endpoint[i];
2182 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2183 endpoint->svc_id = 0;
2184 endpoint->len_max = 0;
2185 endpoint->max_txq_depth = 0;
2186 memset(&endpoint->ep_st, 0,
2187 sizeof(endpoint->ep_st));
2188 INIT_LIST_HEAD(&endpoint->rx_bufq);
2189 INIT_LIST_HEAD(&endpoint->txq);
2190 endpoint->target = target;
2191 }
2192
2193 /* reset distribution list */
2194 INIT_LIST_HEAD(&target->cred_dist_list);
2195}
2196
Kalle Vaload226ec2011-08-10 09:49:12 +03002197int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2198 enum htc_endpoint_id endpoint)
Kalle Valobdcd8172011-07-18 00:22:30 +03002199{
2200 int num;
2201
2202 spin_lock_bh(&target->rx_lock);
2203 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2204 spin_unlock_bh(&target->rx_lock);
2205 return num;
2206}
2207
2208static void htc_setup_msg_bndl(struct htc_target *target)
2209{
Kalle Valobdcd8172011-07-18 00:22:30 +03002210 /* limit what HTC can handle */
2211 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2212 target->msg_per_bndl_max);
2213
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302214 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002215 target->msg_per_bndl_max = 0;
2216 return;
2217 }
2218
2219 /* limit bundle what the device layer can handle */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302220 target->msg_per_bndl_max = min(target->max_scat_entries,
Kalle Valobdcd8172011-07-18 00:22:30 +03002221 target->msg_per_bndl_max);
2222
2223 ath6kl_dbg(ATH6KL_DBG_TRC,
2224 "htc bundling allowed. max msg per htc bundle: %d\n",
2225 target->msg_per_bndl_max);
2226
2227 /* Max rx bundle size is limited by the max tx bundle size */
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302228 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
Kalle Valobdcd8172011-07-18 00:22:30 +03002229 /* Max tx bundle size if limited by the extended mbox address range */
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302230 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
Vasanthakumar Thiagarajan50745af2011-07-18 14:23:29 +05302231 target->max_xfer_szper_scatreq);
Kalle Valobdcd8172011-07-18 00:22:30 +03002232
2233 ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302234 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
Kalle Valobdcd8172011-07-18 00:22:30 +03002235
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302236 if (target->max_tx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002237 target->tx_bndl_enable = true;
2238
Vasanthakumar Thiagarajan3b2f5e52011-07-18 14:23:27 +05302239 if (target->max_rx_bndl_sz)
Kalle Valobdcd8172011-07-18 00:22:30 +03002240 target->rx_bndl_enable = true;
2241
Vasanthakumar Thiagarajan5be88242011-07-18 14:23:28 +05302242 if ((target->tgt_cred_sz % target->block_sz) != 0) {
Kalle Valobdcd8172011-07-18 00:22:30 +03002243 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2244 target->tgt_cred_sz);
2245
2246 /*
2247 * Disallow send bundling since the credit size is
2248 * not aligned to a block size the I/O block
2249 * padding will spill into the next credit buffer
2250 * which is fatal.
2251 */
2252 target->tx_bndl_enable = false;
2253 }
2254}
2255
Kalle Vaload226ec2011-08-10 09:49:12 +03002256int ath6kl_htc_wait_target(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002257{
2258 struct htc_packet *packet = NULL;
2259 struct htc_ready_ext_msg *rdy_msg;
2260 struct htc_service_connect_req connect;
2261 struct htc_service_connect_resp resp;
2262 int status;
2263
2264 /* we should be getting 1 control message that the target is ready */
2265 packet = htc_wait_for_ctrl_msg(target);
2266
2267 if (!packet)
2268 return -ENOMEM;
2269
2270 /* we controlled the buffer creation so it's properly aligned */
2271 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2272
2273 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2274 (packet->act_len < sizeof(struct htc_ready_msg))) {
2275 status = -ENOMEM;
2276 goto fail_wait_target;
2277 }
2278
2279 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2280 status = -ENOMEM;
2281 goto fail_wait_target;
2282 }
2283
2284 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2285 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2286
2287 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2288 "target ready: credits: %d credit size: %d\n",
2289 target->tgt_creds, target->tgt_cred_sz);
2290
2291 /* check if this is an extended ready message */
2292 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2293 /* this is an extended message */
2294 target->htc_tgt_ver = rdy_msg->htc_ver;
2295 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2296 } else {
2297 /* legacy */
2298 target->htc_tgt_ver = HTC_VERSION_2P0;
2299 target->msg_per_bndl_max = 0;
2300 }
2301
2302 ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2303 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2304 target->htc_tgt_ver);
2305
2306 if (target->msg_per_bndl_max > 0)
2307 htc_setup_msg_bndl(target);
2308
2309 /* setup our pseudo HTC control endpoint connection */
2310 memset(&connect, 0, sizeof(connect));
2311 memset(&resp, 0, sizeof(resp));
2312 connect.ep_cb.rx = htc_ctrl_rx;
2313 connect.ep_cb.rx_refill = NULL;
2314 connect.ep_cb.tx_full = NULL;
2315 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2316 connect.svc_id = HTC_CTRL_RSVD_SVC;
2317
2318 /* connect fake service */
Kalle Vaload226ec2011-08-10 09:49:12 +03002319 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
Kalle Valobdcd8172011-07-18 00:22:30 +03002320
2321 if (status)
2322 ath6kl_hif_cleanup_scatter(target->dev->ar);
2323
2324fail_wait_target:
2325 if (packet) {
2326 htc_rxpkt_reset(packet);
2327 reclaim_rx_ctrl_buf(target, packet);
2328 }
2329
2330 return status;
2331}
2332
2333/*
2334 * Start HTC, enable interrupts and let the target know
2335 * host has finished setup.
2336 */
Kalle Vaload226ec2011-08-10 09:49:12 +03002337int ath6kl_htc_start(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002338{
2339 struct htc_packet *packet;
2340 int status;
2341
2342 /* Disable interrupts at the chip level */
2343 ath6kldev_disable_intrs(target->dev);
2344
2345 target->htc_flags = 0;
2346 target->rx_st_flags = 0;
2347
2348 /* Push control receive buffers into htc control endpoint */
2349 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2350 status = htc_add_rxbuf(target, packet);
2351 if (status)
2352 return status;
2353 }
2354
2355 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2356 ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2357 target->tgt_creds);
2358
2359 dump_cred_dist_stats(target);
2360
2361 /* Indicate to the target of the setup completion */
2362 status = htc_setup_tx_complete(target);
2363
2364 if (status)
2365 return status;
2366
2367 /* unmask interrupts */
2368 status = ath6kldev_unmask_intrs(target->dev);
2369
2370 if (status)
Kalle Vaload226ec2011-08-10 09:49:12 +03002371 ath6kl_htc_stop(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002372
2373 return status;
2374}
2375
2376/* htc_stop: stop interrupt reception, and flush all queued buffers */
Kalle Vaload226ec2011-08-10 09:49:12 +03002377void ath6kl_htc_stop(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002378{
2379 spin_lock_bh(&target->htc_lock);
2380 target->htc_flags |= HTC_OP_STATE_STOPPING;
2381 spin_unlock_bh(&target->htc_lock);
2382
2383 /*
2384 * Masking interrupts is a synchronous operation, when this
2385 * function returns all pending HIF I/O has completed, we can
2386 * safely flush the queues.
2387 */
2388 ath6kldev_mask_intrs(target->dev);
2389
Kalle Vaload226ec2011-08-10 09:49:12 +03002390 ath6kl_htc_flush_txep_all(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002391
Kalle Vaload226ec2011-08-10 09:49:12 +03002392 ath6kl_htc_flush_rx_buf(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002393
2394 reset_ep_state(target);
2395}
2396
Kalle Vaload226ec2011-08-10 09:49:12 +03002397void *ath6kl_htc_create(struct ath6kl *ar)
Kalle Valobdcd8172011-07-18 00:22:30 +03002398{
2399 struct htc_target *target = NULL;
2400 struct htc_packet *packet;
2401 int status = 0, i = 0;
2402 u32 block_size, ctrl_bufsz;
2403
2404 target = kzalloc(sizeof(*target), GFP_KERNEL);
2405 if (!target) {
2406 ath6kl_err("unable to allocate memory\n");
2407 return NULL;
2408 }
2409
2410 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2411 if (!target->dev) {
2412 ath6kl_err("unable to allocate memory\n");
2413 status = -ENOMEM;
2414 goto fail_create_htc;
2415 }
2416
2417 spin_lock_init(&target->htc_lock);
2418 spin_lock_init(&target->rx_lock);
2419 spin_lock_init(&target->tx_lock);
2420
2421 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2422 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2423 INIT_LIST_HEAD(&target->cred_dist_list);
2424
2425 target->dev->ar = ar;
2426 target->dev->htc_cnxt = target;
Kalle Valobdcd8172011-07-18 00:22:30 +03002427 target->ep_waiting = ENDPOINT_MAX;
2428
2429 reset_ep_state(target);
2430
2431 status = ath6kldev_setup(target->dev);
2432
2433 if (status)
2434 goto fail_create_htc;
2435
2436 block_size = ar->mbox_info.block_size;
2437
2438 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2439 (block_size + HTC_HDR_LENGTH) :
2440 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2441
2442 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2443 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2444 if (!packet)
2445 break;
2446
2447 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2448 if (!packet->buf_start) {
2449 kfree(packet);
2450 break;
2451 }
2452
2453 packet->buf_len = ctrl_bufsz;
2454 if (i < NUM_CONTROL_RX_BUFFERS) {
2455 packet->act_len = 0;
2456 packet->buf = packet->buf_start;
2457 packet->endpoint = ENDPOINT_0;
2458 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2459 } else
2460 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2461 }
2462
2463fail_create_htc:
2464 if (i != NUM_CONTROL_BUFFERS || status) {
2465 if (target) {
Kalle Vaload226ec2011-08-10 09:49:12 +03002466 ath6kl_htc_cleanup(target);
Kalle Valobdcd8172011-07-18 00:22:30 +03002467 target = NULL;
2468 }
2469 }
2470
2471 return target;
2472}
2473
2474/* cleanup the HTC instance */
Kalle Vaload226ec2011-08-10 09:49:12 +03002475void ath6kl_htc_cleanup(struct htc_target *target)
Kalle Valobdcd8172011-07-18 00:22:30 +03002476{
2477 struct htc_packet *packet, *tmp_packet;
2478
2479 ath6kl_hif_cleanup_scatter(target->dev->ar);
2480
2481 list_for_each_entry_safe(packet, tmp_packet,
2482 &target->free_ctrl_txbuf, list) {
2483 list_del(&packet->list);
2484 kfree(packet->buf_start);
2485 kfree(packet);
2486 }
2487
2488 list_for_each_entry_safe(packet, tmp_packet,
2489 &target->free_ctrl_rxbuf, list) {
2490 list_del(&packet->list);
2491 kfree(packet->buf_start);
2492 kfree(packet);
2493 }
2494
2495 kfree(target->dev);
2496 kfree(target);
2497}