blob: 5b2ae14a94591705c8780e2ebde5f25d3ff222ee [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include "ipa_i.h"
19#include "ipa_trace.h"
20
21#define IPA_LAST_DESC_CNT 0xFFFF
22#define POLLING_INACTIVITY_RX 40
23#define POLLING_INACTIVITY_TX 40
24#define POLLING_MIN_SLEEP_TX 400
25#define POLLING_MAX_SLEEP_TX 500
26/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
27#define IPA_MTU 1500
28#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
29#define IPA_GENERIC_AGGR_TIME_LIMIT 1
30#define IPA_GENERIC_AGGR_PKT_LIMIT 0
31
32#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
33#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
34 (X) + NET_SKB_PAD) +\
35 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
36#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
37 (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
38#define IPA_GENERIC_RX_BUFF_LIMIT (\
39 IPA_REAL_GENERIC_RX_BUFF_SZ(\
40 IPA_GENERIC_RX_BUFF_BASE_SZ) -\
41 IPA_GENERIC_RX_BUFF_BASE_SZ)
42
43#define IPA_RX_BUFF_CLIENT_HEADROOM 256
44
45/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
46#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
47
48#define IPA_WLAN_RX_POOL_SZ 100
49#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
50#define IPA_WLAN_RX_BUFF_SZ 2048
51#define IPA_WLAN_COMM_RX_POOL_LOW 100
52#define IPA_WLAN_COMM_RX_POOL_HIGH 900
53
54#define IPA_ODU_RX_BUFF_SZ 2048
55#define IPA_ODU_RX_POOL_SZ 32
56#define IPA_SIZE_DL_CSUM_META_TRAILER 8
57
58#define IPA_HEADROOM 128
59
60static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags);
61static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys);
62static void ipa_replenish_rx_cache(struct ipa_sys_context *sys);
63static void replenish_rx_work_func(struct work_struct *work);
64static void ipa_wq_handle_rx(struct work_struct *work);
65static void ipa_wq_handle_tx(struct work_struct *work);
66static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size);
67static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys,
68 u32 size);
69static int ipa_assign_policy(struct ipa_sys_connect_params *in,
70 struct ipa_sys_context *sys);
71static void ipa_cleanup_rx(struct ipa_sys_context *sys);
72static void ipa_wq_rx_avail(struct work_struct *work);
73static void ipa_alloc_wlan_rx_common_cache(u32 size);
74static void ipa_cleanup_wlan_rx_common_cache(void);
75static void ipa_wq_repl_rx(struct work_struct *work);
76static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
77 struct sps_iovec *iovec);
78
79static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
80static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
81
82static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
83{
84 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
85 int i;
86
87 for (i = 0; i < cnt; i++) {
88 spin_lock_bh(&sys->spinlock);
89 if (unlikely(list_empty(&sys->head_desc_list))) {
90 spin_unlock_bh(&sys->spinlock);
91 return;
92 }
93 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
94 struct ipa_tx_pkt_wrapper,
95 link);
96 list_del(&tx_pkt_expected->link);
97 sys->len--;
98 spin_unlock_bh(&sys->spinlock);
99 if (!tx_pkt_expected->no_unmap_dma) {
100 if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
101 dma_unmap_single(ipa_ctx->pdev,
102 tx_pkt_expected->mem.phys_base,
103 tx_pkt_expected->mem.size,
104 DMA_TO_DEVICE);
105 } else {
106 dma_unmap_page(ipa_ctx->pdev,
107 tx_pkt_expected->mem.phys_base,
108 tx_pkt_expected->mem.size,
109 DMA_TO_DEVICE);
110 }
111 }
112 if (tx_pkt_expected->callback)
113 tx_pkt_expected->callback(tx_pkt_expected->user1,
114 tx_pkt_expected->user2);
115 if (tx_pkt_expected->cnt > 1 &&
116 tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) {
117 if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) {
118 dma_pool_free(ipa_ctx->dma_pool,
119 tx_pkt_expected->mult.base,
120 tx_pkt_expected->mult.phys_base);
121 } else {
122 dma_unmap_single(ipa_ctx->pdev,
123 tx_pkt_expected->mult.phys_base,
124 tx_pkt_expected->mult.size,
125 DMA_TO_DEVICE);
126 kfree(tx_pkt_expected->mult.base);
127 }
128 }
129 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected);
130 }
131}
132
133static void ipa_wq_write_done_status(int src_pipe)
134{
135 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
136 struct ipa_sys_context *sys;
137 u32 cnt;
138
139 WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes);
140
141 if (!ipa_ctx->ep[src_pipe].status.status_en)
142 return;
143
144 sys = ipa_ctx->ep[src_pipe].sys;
145 if (!sys)
146 return;
147
148 spin_lock_bh(&sys->spinlock);
149 if (unlikely(list_empty(&sys->head_desc_list))) {
150 spin_unlock_bh(&sys->spinlock);
151 return;
152 }
153 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
154 struct ipa_tx_pkt_wrapper,
155 link);
156 cnt = tx_pkt_expected->cnt;
157 spin_unlock_bh(&sys->spinlock);
158 ipa_wq_write_done_common(sys, cnt);
159}
160
161/**
162 * ipa_write_done() - this function will be (eventually) called when a Tx
163 * operation is complete
164 * * @work: work_struct used by the work queue
165 *
166 * Will be called in deferred context.
167 * - invoke the callback supplied by the client who sent this command
168 * - iterate over all packets and validate that
169 * the order for sent packet is the same as expected
170 * - delete all the tx packet descriptors from the system
171 * pipe context (not needed anymore)
172 * - return the tx buffer back to dma_pool
173 */
174static void ipa_wq_write_done(struct work_struct *work)
175{
176 struct ipa_tx_pkt_wrapper *tx_pkt;
177 u32 cnt;
178 struct ipa_sys_context *sys;
179
180 tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
181 cnt = tx_pkt->cnt;
182 sys = tx_pkt->sys;
183
184 ipa_wq_write_done_common(sys, cnt);
185}
186
187static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
188 bool in_poll_state)
189{
190 struct sps_iovec iov;
191 int ret;
192 int cnt = 0;
193
194 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
195 !atomic_read(&sys->curr_polling_state))) {
196 if (cnt && !process_all)
197 break;
198 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
199 if (ret) {
200 IPAERR("sps_get_iovec failed %d\n", ret);
201 break;
202 }
203
204 if (iov.addr == 0)
205 break;
206
207 ipa_wq_write_done_common(sys, 1);
208 cnt++;
209 };
210
211 return cnt;
212}
213
214/**
215 * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
216 */
217static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
218{
219 int ret;
220
221 if (!atomic_read(&sys->curr_polling_state)) {
222 IPAERR("already in intr mode\n");
223 goto fail;
224 }
225
226 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
227 if (ret) {
228 IPAERR("sps_get_config() failed %d\n", ret);
229 goto fail;
230 }
231 sys->event.options = SPS_O_EOT;
232 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
233 if (ret) {
234 IPAERR("sps_register_event() failed %d\n", ret);
235 goto fail;
236 }
237 sys->ep->connect.options =
238 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
239 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
240 if (ret) {
241 IPAERR("sps_set_config() failed %d\n", ret);
242 goto fail;
243 }
244 atomic_set(&sys->curr_polling_state, 0);
245 ipa_handle_tx_core(sys, true, false);
246 return;
247
248fail:
249 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
250 msecs_to_jiffies(1));
251}
252
253static void ipa_handle_tx(struct ipa_sys_context *sys)
254{
255 int inactive_cycles = 0;
256 int cnt;
257
258 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
259 do {
260 cnt = ipa_handle_tx_core(sys, true, true);
261 if (cnt == 0) {
262 inactive_cycles++;
263 usleep_range(POLLING_MIN_SLEEP_TX,
264 POLLING_MAX_SLEEP_TX);
265 } else {
266 inactive_cycles = 0;
267 }
268 } while (inactive_cycles <= POLLING_INACTIVITY_TX);
269
270 ipa_tx_switch_to_intr_mode(sys);
271 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
272}
273
274static void ipa_wq_handle_tx(struct work_struct *work)
275{
276 struct ipa_sys_context *sys;
277
278 sys = container_of(work, struct ipa_sys_context, work);
279
280 ipa_handle_tx(sys);
281}
282
283/**
284 * ipa_send_one() - Send a single descriptor
285 * @sys: system pipe context
286 * @desc: descriptor to send
287 * @in_atomic: whether caller is in atomic context
288 *
289 * - Allocate tx_packet wrapper
290 * - transfer data to the IPA
291 * - after the transfer was done the SPS will
292 * notify the sending user via ipa_sps_irq_comp_tx()
293 *
294 * Return codes: 0: success, -EFAULT: failure
295 */
296int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
297 bool in_atomic)
298{
299 struct ipa_tx_pkt_wrapper *tx_pkt;
300 int result;
301 u16 sps_flags = SPS_IOVEC_FLAG_EOT;
302 dma_addr_t dma_address;
303 u16 len;
304 u32 mem_flag = GFP_ATOMIC;
305 struct sps_iovec iov;
306 int ret;
307
308 if (unlikely(!in_atomic))
309 mem_flag = GFP_KERNEL;
310
311 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
312 if (!tx_pkt) {
313 IPAERR("failed to alloc tx wrapper\n");
314 goto fail_mem_alloc;
315 }
316
317 if (!desc->dma_address_valid) {
318 dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
319 desc->len, DMA_TO_DEVICE);
320 } else {
321 dma_address = desc->dma_address;
322 tx_pkt->no_unmap_dma = true;
323 }
324 if (!dma_address) {
325 IPAERR("failed to DMA wrap\n");
326 goto fail_dma_map;
327 }
328
329 INIT_LIST_HEAD(&tx_pkt->link);
330 tx_pkt->type = desc->type;
331 tx_pkt->cnt = 1; /* only 1 desc in this "set" */
332
333 tx_pkt->mem.phys_base = dma_address;
334 tx_pkt->mem.base = desc->pyld;
335 tx_pkt->mem.size = desc->len;
336 tx_pkt->sys = sys;
337 tx_pkt->callback = desc->callback;
338 tx_pkt->user1 = desc->user1;
339 tx_pkt->user2 = desc->user2;
340
341 /*
342 * Special treatment for immediate commands, where the structure of the
343 * descriptor is different
344 */
345 if (desc->type == IPA_IMM_CMD_DESC) {
346 sps_flags |= SPS_IOVEC_FLAG_IMME;
347 len = desc->opcode;
348 IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
349 desc->opcode, desc->len, sps_flags);
350 IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
351 } else {
352 len = desc->len;
353 }
354
355 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
356
357 spin_lock_bh(&sys->spinlock);
358 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
359 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
360 do {
361 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
362 if (ret) {
363 IPADBG("sps_get_iovec failed %d\n", ret);
364 break;
365 }
366 if ((iov.addr == 0x0) && (iov.size == 0x0))
367 break;
368 } while (1);
369 }
370 result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
371 sps_flags);
372 if (result) {
373 IPAERR("sps_transfer_one failed rc=%d\n", result);
374 goto fail_sps_send;
375 }
376
377 spin_unlock_bh(&sys->spinlock);
378
379 return 0;
380
381fail_sps_send:
382 list_del(&tx_pkt->link);
383 spin_unlock_bh(&sys->spinlock);
384 dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
385fail_dma_map:
386 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
387fail_mem_alloc:
388 return -EFAULT;
389}
390
391/**
392 * ipa_send() - Send multiple descriptors in one HW transaction
393 * @sys: system pipe context
394 * @num_desc: number of packets
395 * @desc: packets to send (may be immediate command or data)
396 * @in_atomic: whether caller is in atomic context
397 *
398 * This function is used for system-to-bam connection.
399 * - SPS driver expect struct sps_transfer which will contain all the data
400 * for a transaction
401 * - ipa_tx_pkt_wrapper will be used for each ipa
402 * descriptor (allocated from wrappers cache)
403 * - The wrapper struct will be configured for each ipa-desc payload and will
404 * contain information which will be later used by the user callbacks
405 * - each transfer will be made by calling to sps_transfer()
406 * - Each packet (command or data) that will be sent will also be saved in
407 * ipa_sys_context for later check that all data was sent
408 *
409 * Return codes: 0: success, -EFAULT: failure
410 */
411int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
412 bool in_atomic)
413{
414 struct ipa_tx_pkt_wrapper *tx_pkt;
415 struct ipa_tx_pkt_wrapper *next_pkt;
416 struct sps_transfer transfer = { 0 };
417 struct sps_iovec *iovec;
418 dma_addr_t dma_addr;
419 int i = 0;
420 int j;
421 int result;
422 int fail_dma_wrap = 0;
423 uint size = num_desc * sizeof(struct sps_iovec);
424 u32 mem_flag = GFP_ATOMIC;
425 struct sps_iovec iov;
426 int ret;
427
428 if (unlikely(!in_atomic))
429 mem_flag = GFP_KERNEL;
430
431 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
432 transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
433 &dma_addr);
434 if (!transfer.iovec) {
435 IPAERR("fail to alloc dma mem for sps xfr buff\n");
436 return -EFAULT;
437 }
438 } else {
439 transfer.iovec = kmalloc(size, mem_flag);
440 if (!transfer.iovec) {
441 IPAERR("fail to alloc mem for sps xfr buff ");
442 IPAERR("num_desc = %d size = %d\n", num_desc, size);
443 return -EFAULT;
444 }
445 dma_addr = dma_map_single(ipa_ctx->pdev,
446 transfer.iovec, size, DMA_TO_DEVICE);
447 if (!dma_addr) {
448 IPAERR("dma_map_single failed for sps xfr buff\n");
449 kfree(transfer.iovec);
450 return -EFAULT;
451 }
452 }
453
454 transfer.iovec_phys = dma_addr;
455 transfer.iovec_count = num_desc;
456 spin_lock_bh(&sys->spinlock);
457
458 for (i = 0; i < num_desc; i++) {
459 fail_dma_wrap = 0;
460 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
461 mem_flag);
462 if (!tx_pkt) {
463 IPAERR("failed to alloc tx wrapper\n");
464 goto failure;
465 }
466 /*
467 * first desc of set is "special" as it holds the count and
468 * other info
469 */
470 if (i == 0) {
471 transfer.user = tx_pkt;
472 tx_pkt->mult.phys_base = dma_addr;
473 tx_pkt->mult.base = transfer.iovec;
474 tx_pkt->mult.size = size;
475 tx_pkt->cnt = num_desc;
476 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
477 }
478
479 iovec = &transfer.iovec[i];
480 iovec->flags = 0;
481
482 INIT_LIST_HEAD(&tx_pkt->link);
483 tx_pkt->type = desc[i].type;
484
485 if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
486 tx_pkt->mem.base = desc[i].pyld;
487 tx_pkt->mem.size = desc[i].len;
488
489 if (!desc[i].dma_address_valid) {
490 tx_pkt->mem.phys_base =
491 dma_map_single(ipa_ctx->pdev,
492 tx_pkt->mem.base,
493 tx_pkt->mem.size,
494 DMA_TO_DEVICE);
495 } else {
496 tx_pkt->mem.phys_base = desc[i].dma_address;
497 tx_pkt->no_unmap_dma = true;
498 }
499 } else {
500 tx_pkt->mem.base = desc[i].frag;
501 tx_pkt->mem.size = desc[i].len;
502
503 if (!desc[i].dma_address_valid) {
504 tx_pkt->mem.phys_base =
505 skb_frag_dma_map(ipa_ctx->pdev,
506 desc[i].frag,
507 0, tx_pkt->mem.size,
508 DMA_TO_DEVICE);
509 } else {
510 tx_pkt->mem.phys_base = desc[i].dma_address;
511 tx_pkt->no_unmap_dma = true;
512 }
513 }
514
515 if (!tx_pkt->mem.phys_base) {
516 IPAERR("failed to alloc tx wrapper\n");
517 fail_dma_wrap = 1;
518 goto failure;
519 }
520
521 tx_pkt->sys = sys;
522 tx_pkt->callback = desc[i].callback;
523 tx_pkt->user1 = desc[i].user1;
524 tx_pkt->user2 = desc[i].user2;
525
526 /*
527 * Point the iovec to the buffer and
528 * add this packet to system pipe context.
529 */
530 iovec->addr = tx_pkt->mem.phys_base;
531 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
532
533 /*
534 * Special treatment for immediate commands, where the structure
535 * of the descriptor is different
536 */
537 if (desc[i].type == IPA_IMM_CMD_DESC) {
538 iovec->size = desc[i].opcode;
539 iovec->flags |= SPS_IOVEC_FLAG_IMME;
540 IPA_DUMP_BUFF(desc[i].pyld,
541 tx_pkt->mem.phys_base, desc[i].len);
542 } else {
543 iovec->size = desc[i].len;
544 }
545
546 if (i == (num_desc - 1)) {
547 iovec->flags |= SPS_IOVEC_FLAG_EOT;
548 /* "mark" the last desc */
549 tx_pkt->cnt = IPA_LAST_DESC_CNT;
550 }
551 }
552
553 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
554 do {
555 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
556 if (ret) {
557 IPADBG("sps_get_iovec failed %d\n", ret);
558 break;
559 }
560 if ((iov.addr == 0x0) && (iov.size == 0x0))
561 break;
562 } while (1);
563 }
564 result = sps_transfer(sys->ep->ep_hdl, &transfer);
565 if (result) {
566 IPAERR("sps_transfer failed rc=%d\n", result);
567 goto failure;
568 }
569
570 spin_unlock_bh(&sys->spinlock);
571 return 0;
572
573failure:
574 tx_pkt = transfer.user;
575 for (j = 0; j < i; j++) {
576 next_pkt = list_next_entry(tx_pkt, link);
577 list_del(&tx_pkt->link);
578 if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
579 dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
580 tx_pkt->mem.size,
581 DMA_TO_DEVICE);
582 } else {
583 dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
584 tx_pkt->mem.size,
585 DMA_TO_DEVICE);
586 }
587 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
588 tx_pkt = next_pkt;
589 }
590 if (j < num_desc)
591 /* last desc failed */
592 if (fail_dma_wrap)
593 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
594 if (transfer.iovec_phys) {
595 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
596 dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
597 transfer.iovec_phys);
598 } else {
599 dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys,
600 size, DMA_TO_DEVICE);
601 kfree(transfer.iovec);
602 }
603 }
604 spin_unlock_bh(&sys->spinlock);
605 return -EFAULT;
606}
607
608/**
609 * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver
610 * after an immediate command is complete.
611 * @user1: pointer to the descriptor of the transfer
612 * @user2:
613 *
614 * Complete the immediate commands completion object, this will release the
615 * thread which waits on this completion object (ipa_send_cmd())
616 */
617static void ipa_sps_irq_cmd_ack(void *user1, int user2)
618{
619 struct ipa_desc *desc = (struct ipa_desc *)user1;
620
621 if (!desc) {
622 IPAERR("desc is NULL\n");
623 WARN_ON(1);
624 return;
625 }
626 IPADBG("got ack for cmd=%d\n", desc->opcode);
627 complete(&desc->xfer_done);
628}
629
630/**
631 * ipa_send_cmd - send immediate commands
632 * @num_desc: number of descriptors within the desc struct
633 * @descr: descriptor structure
634 *
635 * Function will block till command gets ACK from IPA HW, caller needs
636 * to free any resources it allocated after function returns
637 * The callback in ipa_desc should not be set by the caller
638 * for this function.
639 */
640int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
641{
642 struct ipa_desc *desc;
643 int result = 0;
644 struct ipa_sys_context *sys;
645 int ep_idx;
646
647 IPADBG("sending command\n");
648
649 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
650 if (-1 == ep_idx) {
651 IPAERR("Client %u is not mapped\n",
652 IPA_CLIENT_APPS_CMD_PROD);
653 return -EFAULT;
654 }
655 sys = ipa_ctx->ep[ep_idx].sys;
656
657 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
658
659 if (num_desc == 1) {
660 init_completion(&descr->xfer_done);
661
662 if (descr->callback || descr->user1)
663 WARN_ON(1);
664
665 descr->callback = ipa_sps_irq_cmd_ack;
666 descr->user1 = descr;
667 if (ipa_send_one(sys, descr, true)) {
668 IPAERR("fail to send immediate command\n");
669 result = -EFAULT;
670 goto bail;
671 }
672 wait_for_completion(&descr->xfer_done);
673 } else {
674 desc = &descr[num_desc - 1];
675 init_completion(&desc->xfer_done);
676
677 if (desc->callback || desc->user1)
678 WARN_ON(1);
679
680 desc->callback = ipa_sps_irq_cmd_ack;
681 desc->user1 = desc;
682 if (ipa_send(sys, num_desc, descr, true)) {
683 IPAERR("fail to send multiple immediate command set\n");
684 result = -EFAULT;
685 goto bail;
686 }
687 wait_for_completion(&desc->xfer_done);
688 }
689
690bail:
691 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
692 return result;
693}
694
695/**
696 * ipa_sps_irq_tx_notify() - Callback function which will be called by
697 * the SPS driver to start a Tx poll operation.
698 * Called in an interrupt context.
699 * @notify: SPS driver supplied notification struct
700 *
701 * This function defer the work for this event to the tx workqueue.
702 */
703static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
704{
705 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
706 int ret;
707
708 IPADBG("event %d notified\n", notify->event_id);
709
710 switch (notify->event_id) {
711 case SPS_EVENT_EOT:
712 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
713 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
714 if (!atomic_read(&sys->curr_polling_state)) {
715 ret = sps_get_config(sys->ep->ep_hdl,
716 &sys->ep->connect);
717 if (ret) {
718 IPAERR("sps_get_config() failed %d\n", ret);
719 break;
720 }
721 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
722 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
723 ret = sps_set_config(sys->ep->ep_hdl,
724 &sys->ep->connect);
725 if (ret) {
726 IPAERR("sps_set_config() failed %d\n", ret);
727 break;
728 }
729 atomic_set(&sys->curr_polling_state, 1);
730 queue_work(sys->wq, &sys->work);
731 }
732 break;
733 default:
734 IPAERR("received unexpected event id %d\n", notify->event_id);
735 }
736}
737
738/**
739 * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
740 * the SPS driver after a Tx operation is complete.
741 * Called in an interrupt context.
742 * @notify: SPS driver supplied notification struct
743 *
744 * This function defer the work for this event to the tx workqueue.
745 * This event will be later handled by ipa_write_done.
746 */
747static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
748{
749 struct ipa_tx_pkt_wrapper *tx_pkt;
750
751 IPADBG("event %d notified\n", notify->event_id);
752
753 switch (notify->event_id) {
754 case SPS_EVENT_EOT:
755 tx_pkt = notify->data.transfer.user;
756 if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
757 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
758 queue_work(tx_pkt->sys->wq, &tx_pkt->work);
759 break;
760 default:
761 IPAERR("received unexpected event id %d\n", notify->event_id);
762 }
763}
764
765/**
766 * ipa_poll_pkt() - Poll packet from SPS BAM
767 * return 0 to caller on poll successfully
768 * else -EIO
769 *
770 */
771static int ipa_poll_pkt(struct ipa_sys_context *sys,
772 struct sps_iovec *iov)
773{
774 int ret;
775
776 ret = sps_get_iovec(sys->ep->ep_hdl, iov);
777 if (ret) {
778 IPAERR("sps_get_iovec failed %d\n", ret);
779 return ret;
780 }
781
782 if (iov->addr == 0)
783 return -EIO;
784
785 return 0;
786}
787
788/**
789 * ipa_handle_rx_core() - The core functionality of packet reception. This
790 * function is read from multiple code paths.
791 *
792 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
793 * endpoint. The function runs as long as there are packets in the pipe.
794 * For each packet:
795 * - Disconnect the packet from the system pipe linked list
796 * - Unmap the packets skb, make it non DMAable
797 * - Free the packet from the cache
798 * - Prepare a proper skb
799 * - Call the endpoints notify function, passing the skb in the parameters
800 * - Replenish the rx cache
801 */
802static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
803 bool in_poll_state)
804{
805 struct sps_iovec iov;
806 int ret;
807 int cnt = 0;
808
809 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
810 !atomic_read(&sys->curr_polling_state))) {
811 if (cnt && !process_all)
812 break;
813
814 ret = ipa_poll_pkt(sys, &iov);
815 if (ret)
816 break;
817
818 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
819 ipa_dma_memcpy_notify(sys, &iov);
820 else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
821 ipa_wlan_wq_rx_common(sys, iov.size);
822 else
823 ipa_wq_rx_common(sys, iov.size);
824
825 cnt++;
826 };
827
828 return cnt;
829}
830
831/**
832 * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
833 */
834static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
835{
836 int ret;
837
838 if (!sys->ep || !sys->ep->valid) {
839 IPAERR("EP Not Valid, no need to cleanup.\n");
840 return;
841 }
842
843 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
844 if (ret) {
845 IPAERR("sps_get_config() failed %d\n", ret);
846 goto fail;
847 }
848
849 if (!atomic_read(&sys->curr_polling_state) &&
850 ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
851 IPADBG("already in intr mode\n");
852 return;
853 }
854
855 if (!atomic_read(&sys->curr_polling_state)) {
856 IPAERR("already in intr mode\n");
857 goto fail;
858 }
859
860 sys->event.options = SPS_O_EOT;
861 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
862 if (ret) {
863 IPAERR("sps_register_event() failed %d\n", ret);
864 goto fail;
865 }
866 sys->ep->connect.options =
867 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
868 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
869 if (ret) {
870 IPAERR("sps_set_config() failed %d\n", ret);
871 goto fail;
872 }
873 atomic_set(&sys->curr_polling_state, 0);
874 if (!sys->ep->napi_enabled)
875 ipa_handle_rx_core(sys, true, false);
876 ipa_dec_release_wakelock(sys->ep->wakelock_client);
877 return;
878
879fail:
880 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
881 msecs_to_jiffies(1));
882}
883
884
885/**
886 * ipa_sps_irq_control() - Function to enable or disable BAM IRQ.
887 */
888static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
889{
890 int ret;
891
892 /*
893 * Do not change sps config in case we are in polling mode as this
894 * indicates that sps driver already notified EOT event and sps config
895 * should not change until ipa driver processes the packet.
896 */
897 if (atomic_read(&sys->curr_polling_state)) {
898 IPADBG("in polling mode, do not change config\n");
899 return;
900 }
901
902 if (enable) {
903 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
904 if (ret) {
905 IPAERR("sps_get_config() failed %d\n", ret);
906 return;
907 }
908 sys->event.options = SPS_O_EOT;
909 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
910 if (ret) {
911 IPAERR("sps_register_event() failed %d\n", ret);
912 return;
913 }
914 sys->ep->connect.options =
915 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
916 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
917 if (ret) {
918 IPAERR("sps_set_config() failed %d\n", ret);
919 return;
920 }
921 } else {
922 ret = sps_get_config(sys->ep->ep_hdl,
923 &sys->ep->connect);
924 if (ret) {
925 IPAERR("sps_get_config() failed %d\n", ret);
926 return;
927 }
928 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
929 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
930 ret = sps_set_config(sys->ep->ep_hdl,
931 &sys->ep->connect);
932 if (ret) {
933 IPAERR("sps_set_config() failed %d\n", ret);
934 return;
935 }
936 }
937}
938
939void ipa_sps_irq_control_all(bool enable)
940{
941 struct ipa_ep_context *ep;
942 int ipa_ep_idx, client_num;
943
944 IPADBG("\n");
945
946 for (client_num = IPA_CLIENT_CONS;
947 client_num < IPA_CLIENT_MAX; client_num++) {
948 if (!IPA_CLIENT_IS_APPS_CONS(client_num))
949 continue;
950
951 ipa_ep_idx = ipa_get_ep_mapping(client_num);
952 if (ipa_ep_idx == -1) {
953 IPAERR("Invalid client.\n");
954 continue;
955 }
956 ep = &ipa_ctx->ep[ipa_ep_idx];
957 if (!ep->valid) {
958 IPAERR("EP (%d) not allocated.\n", ipa_ep_idx);
959 continue;
960 }
961 ipa_sps_irq_control(ep->sys, enable);
962 }
963}
964
965/**
966 * ipa_rx_notify() - Callback function which is called by the SPS driver when a
967 * a packet is received
968 * @notify: SPS driver supplied notification information
969 *
970 * Called in an interrupt context, therefore the majority of the work is
971 * deffered using a work queue.
972 *
973 * After receiving a packet, the driver goes to polling mode and keeps pulling
974 * packets until the rx buffer is empty, then it goes back to interrupt mode.
975 * This comes to prevent the CPU from handling too many interrupts when the
976 * throughput is high.
977 */
978static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
979{
980 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
981 int ret;
982
983 IPADBG("event %d notified\n", notify->event_id);
984
985 switch (notify->event_id) {
986 case SPS_EVENT_EOT:
987 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
988 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
989
990 if (atomic_read(&sys->curr_polling_state)) {
991 sys->ep->eot_in_poll_err++;
992 break;
993 }
994
995 ret = sps_get_config(sys->ep->ep_hdl,
996 &sys->ep->connect);
997 if (ret) {
998 IPAERR("sps_get_config() failed %d\n", ret);
999 break;
1000 }
1001 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
1002 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1003 ret = sps_set_config(sys->ep->ep_hdl,
1004 &sys->ep->connect);
1005 if (ret) {
1006 IPAERR("sps_set_config() failed %d\n", ret);
1007 break;
1008 }
1009 ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
1010 atomic_set(&sys->curr_polling_state, 1);
1011 trace_intr_to_poll(sys->ep->client);
1012 queue_work(sys->wq, &sys->work);
1013 break;
1014 default:
1015 IPAERR("received unexpected event id %d\n", notify->event_id);
1016 }
1017}
1018
1019static void switch_to_intr_tx_work_func(struct work_struct *work)
1020{
1021 struct delayed_work *dwork;
1022 struct ipa_sys_context *sys;
1023
1024 dwork = container_of(work, struct delayed_work, work);
1025 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1026 ipa_handle_tx(sys);
1027}
1028
1029/**
1030 * ipa_handle_rx() - handle packet reception. This function is executed in the
1031 * context of a work queue.
1032 * @work: work struct needed by the work queue
1033 *
1034 * ipa_handle_rx_core() is run in polling mode. After all packets has been
1035 * received, the driver switches back to interrupt mode.
1036 */
1037static void ipa_handle_rx(struct ipa_sys_context *sys)
1038{
1039 int inactive_cycles = 0;
1040 int cnt;
1041
1042 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1043 do {
1044 cnt = ipa_handle_rx_core(sys, true, true);
1045 if (cnt == 0) {
1046 inactive_cycles++;
1047 trace_idle_sleep_enter(sys->ep->client);
1048 usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
1049 ipa_ctx->ipa_rx_max_timeout_usec);
1050 trace_idle_sleep_exit(sys->ep->client);
1051 } else {
1052 inactive_cycles = 0;
1053 }
1054
1055 /* if pipe is out of buffers there is no point polling for
1056 * completed descs; release the worker so delayed work can
1057 * run in a timely manner
1058 */
1059 if (sys->len == 0)
1060 break;
1061
1062 } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
1063
1064 trace_poll_to_intr(sys->ep->client);
1065 ipa_rx_switch_to_intr_mode(sys);
1066 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1067}
1068
1069/**
1070 * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
1071 * function is exectued in the softirq context
1072 *
1073 * if input budget is zero, the driver switches back to
1074 * interrupt mode
1075 *
1076 * return number of polled packets, on error 0(zero)
1077 */
1078int ipa2_rx_poll(u32 clnt_hdl, int weight)
1079{
1080 struct ipa_ep_context *ep;
1081 int ret;
1082 int cnt = 0;
1083 unsigned int delay = 1;
1084 struct sps_iovec iov;
1085
1086 IPADBG("\n");
1087 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1088 ipa_ctx->ep[clnt_hdl].valid == 0) {
1089 IPAERR("bad parm 0x%x\n", clnt_hdl);
1090 return cnt;
1091 }
1092
1093 ep = &ipa_ctx->ep[clnt_hdl];
1094 while (cnt < weight &&
1095 atomic_read(&ep->sys->curr_polling_state)) {
1096
1097 ret = ipa_poll_pkt(ep->sys, &iov);
1098 if (ret)
1099 break;
1100
1101 ipa_wq_rx_common(ep->sys, iov.size);
1102 cnt += 5;
1103 };
1104
1105 if (cnt == 0) {
1106 ep->inactive_cycles++;
1107 ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
1108
1109 if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
1110 ep->switch_to_intr = true;
1111 delay = 0;
1112 }
1113 queue_delayed_work(ep->sys->wq,
1114 &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
1115 } else
1116 ep->inactive_cycles = 0;
1117
1118 return cnt;
1119}
1120
1121static void switch_to_intr_rx_work_func(struct work_struct *work)
1122{
1123 struct delayed_work *dwork;
1124 struct ipa_sys_context *sys;
1125
1126 dwork = container_of(work, struct delayed_work, work);
1127 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1128
1129 if (sys->ep->napi_enabled) {
1130 if (sys->ep->switch_to_intr) {
1131 ipa_rx_switch_to_intr_mode(sys);
1132 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
1133 sys->ep->switch_to_intr = false;
1134 sys->ep->inactive_cycles = 0;
1135 } else
1136 sys->ep->client_notify(sys->ep->priv,
1137 IPA_CLIENT_START_POLL, 0);
1138 } else
1139 ipa_handle_rx(sys);
1140}
1141
1142/**
1143 * ipa_update_repl_threshold()- Update the repl_threshold for the client.
1144 *
1145 * Return value: None.
1146 */
1147void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
1148{
1149 int ep_idx;
1150 struct ipa_ep_context *ep;
1151
1152 /* Check if ep is valid. */
1153 ep_idx = ipa2_get_ep_mapping(ipa_client);
1154 if (ep_idx == -1) {
1155 IPADBG("Invalid IPA client\n");
1156 return;
1157 }
1158
1159 ep = &ipa_ctx->ep[ep_idx];
1160 if (!ep->valid) {
1161 IPADBG("EP not valid/Not applicable for client.\n");
1162 return;
1163 }
1164 /*
1165 * Determine how many buffers/descriptors remaining will
1166 * cause to drop below the yellow WM bar.
1167 */
Skylar Chang50b21692016-11-01 16:48:30 -07001168 if (ep->sys->rx_buff_sz)
1169 ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
1170 / ep->sys->rx_buff_sz;
1171 else
1172 ep->rx_replenish_threshold = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001173}
1174
1175/**
1176 * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
1177 * IPA EP configuration
1178 * @sys_in: [in] input needed to setup BAM pipe and configure EP
1179 * @clnt_hdl: [out] client handle
1180 *
1181 * - configure the end-point registers with the supplied
1182 * parameters from the user.
1183 * - call SPS APIs to create a system-to-bam connection with IPA.
1184 * - allocate descriptor FIFO
1185 * - register callback function(ipa_sps_irq_rx_notify or
1186 * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
1187 * not configured to pulling mode
1188 *
1189 * Returns: 0 on success, negative on failure
1190 */
1191int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
1192{
1193 struct ipa_ep_context *ep;
1194 int ipa_ep_idx;
1195 int result = -EINVAL;
1196 dma_addr_t dma_addr;
1197 char buff[IPA_RESOURCE_NAME_MAX];
1198 struct iommu_domain *smmu_domain;
1199
1200 if (unlikely(!ipa_ctx)) {
1201 IPAERR("IPA driver was not initialized\n");
1202 return -EINVAL;
1203 }
1204
1205 if (sys_in == NULL || clnt_hdl == NULL) {
1206 IPAERR("NULL args\n");
1207 goto fail_gen;
1208 }
1209
1210 if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
1211 IPAERR("bad parm client:%d fifo_sz:%d\n",
1212 sys_in->client, sys_in->desc_fifo_sz);
1213 goto fail_gen;
1214 }
1215
1216 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
1217 if (ipa_ep_idx == -1) {
1218 IPAERR("Invalid client.\n");
1219 goto fail_gen;
1220 }
1221
1222 ep = &ipa_ctx->ep[ipa_ep_idx];
1223
1224 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
1225
1226 if (ep->valid == 1) {
1227 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
1228 IPAERR("EP already allocated.\n");
1229 goto fail_and_disable_clocks;
1230 } else {
1231 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
1232 &sys_in->ipa_ep_cfg.hdr)) {
1233 IPAERR("fail to configure hdr prop of EP.\n");
1234 result = -EFAULT;
1235 goto fail_and_disable_clocks;
1236 }
1237 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
1238 &sys_in->ipa_ep_cfg.cfg)) {
1239 IPAERR("fail to configure cfg prop of EP.\n");
1240 result = -EFAULT;
1241 goto fail_and_disable_clocks;
1242 }
1243 IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
1244 sys_in->client, ipa_ep_idx, ep->sys);
1245 ep->client_notify = sys_in->notify;
1246 ep->priv = sys_in->priv;
1247 *clnt_hdl = ipa_ep_idx;
1248 if (!ep->keep_ipa_awake)
1249 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1250
1251 return 0;
1252 }
1253 }
1254
1255 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
1256
1257 if (!ep->sys) {
1258 ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL);
1259 if (!ep->sys) {
1260 IPAERR("failed to sys ctx for client %d\n",
1261 sys_in->client);
1262 result = -ENOMEM;
1263 goto fail_and_disable_clocks;
1264 }
1265
1266 ep->sys->ep = ep;
1267 snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
1268 sys_in->client);
1269 ep->sys->wq = alloc_workqueue(buff,
1270 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1271 if (!ep->sys->wq) {
1272 IPAERR("failed to create wq for client %d\n",
1273 sys_in->client);
1274 result = -EFAULT;
1275 goto fail_wq;
1276 }
1277
1278 snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
1279 sys_in->client);
1280 ep->sys->repl_wq = alloc_workqueue(buff,
1281 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1282 if (!ep->sys->repl_wq) {
1283 IPAERR("failed to create rep wq for client %d\n",
1284 sys_in->client);
1285 result = -EFAULT;
1286 goto fail_wq2;
1287 }
1288
1289 INIT_LIST_HEAD(&ep->sys->head_desc_list);
1290 INIT_LIST_HEAD(&ep->sys->rcycl_list);
1291 spin_lock_init(&ep->sys->spinlock);
1292 } else {
1293 memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
1294 }
1295
1296 ep->skip_ep_cfg = sys_in->skip_ep_cfg;
1297 if (ipa_assign_policy(sys_in, ep->sys)) {
1298 IPAERR("failed to sys ctx for client %d\n", sys_in->client);
1299 result = -ENOMEM;
1300 goto fail_gen2;
1301 }
1302
1303 ep->valid = 1;
1304 ep->client = sys_in->client;
1305 ep->client_notify = sys_in->notify;
1306 ep->napi_enabled = sys_in->napi_enabled;
1307 ep->priv = sys_in->priv;
1308 ep->keep_ipa_awake = sys_in->keep_ipa_awake;
1309 atomic_set(&ep->avail_fifo_desc,
1310 ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
1311
1312 if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
1313 ep->sys->status_stat == NULL) {
1314 ep->sys->status_stat =
1315 kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL);
1316 if (!ep->sys->status_stat) {
1317 IPAERR("no memory\n");
1318 goto fail_gen2;
1319 }
1320 }
1321
1322 result = ipa_enable_data_path(ipa_ep_idx);
1323 if (result) {
1324 IPAERR("enable data path failed res=%d clnt=%d.\n", result,
1325 ipa_ep_idx);
1326 goto fail_gen2;
1327 }
1328
1329 if (!ep->skip_ep_cfg) {
1330 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
1331 IPAERR("fail to configure EP.\n");
1332 goto fail_gen2;
1333 }
1334 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
1335 IPAERR("fail to configure status of EP.\n");
1336 goto fail_gen2;
1337 }
1338 IPADBG("ep configuration successful\n");
1339 } else {
1340 IPADBG("skipping ep configuration\n");
1341 }
1342
1343 /* Default Config */
1344 ep->ep_hdl = sps_alloc_endpoint();
1345 if (ep->ep_hdl == NULL) {
1346 IPAERR("SPS EP allocation failed.\n");
1347 goto fail_gen2;
1348 }
1349
1350 result = sps_get_config(ep->ep_hdl, &ep->connect);
1351 if (result) {
1352 IPAERR("fail to get config.\n");
1353 goto fail_sps_cfg;
1354 }
1355
1356 /* Specific Config */
1357 if (IPA_CLIENT_IS_CONS(sys_in->client)) {
1358 ep->connect.mode = SPS_MODE_SRC;
1359 ep->connect.destination = SPS_DEV_HANDLE_MEM;
1360 ep->connect.source = ipa_ctx->bam_handle;
1361 ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++;
1362 ep->connect.src_pipe_index = ipa_ep_idx;
1363 /*
1364 * Determine how many buffers/descriptors remaining will
1365 * cause to drop below the yellow WM bar.
1366 */
Skylar Chang50b21692016-11-01 16:48:30 -07001367 if (ep->sys->rx_buff_sz)
1368 ep->rx_replenish_threshold =
1369 ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz;
1370 else
1371 ep->rx_replenish_threshold = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001372 /* Only when the WAN pipes are setup, actual threshold will
1373 * be read from the register. So update LAN_CONS ep again with
1374 * right value.
1375 */
1376 if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS)
1377 ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS);
1378 } else {
1379 ep->connect.mode = SPS_MODE_DEST;
1380 ep->connect.source = SPS_DEV_HANDLE_MEM;
1381 ep->connect.destination = ipa_ctx->bam_handle;
1382 ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++;
1383 ep->connect.dest_pipe_index = ipa_ep_idx;
1384 }
1385
1386 IPADBG("client:%d ep:%d",
1387 sys_in->client, ipa_ep_idx);
1388
1389 IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
1390 ep->connect.dest_pipe_index,
1391 ep->connect.src_pipe_index);
1392
1393 ep->connect.options = ep->sys->sps_option;
1394 ep->connect.desc.size = sys_in->desc_fifo_sz;
1395 ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
1396 ep->connect.desc.size, &dma_addr, GFP_KERNEL);
1397 if (ipa_ctx->smmu_s1_bypass) {
1398 ep->connect.desc.phys_base = dma_addr;
1399 } else {
1400 ep->connect.desc.iova = dma_addr;
1401 smmu_domain = ipa2_get_smmu_domain();
1402 if (smmu_domain != NULL) {
1403 ep->connect.desc.phys_base =
1404 iommu_iova_to_phys(smmu_domain, dma_addr);
1405 }
1406 }
1407 if (ep->connect.desc.base == NULL) {
1408 IPAERR("fail to get DMA desc memory.\n");
1409 goto fail_sps_cfg;
1410 }
1411
1412 ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
1413
1414 result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client);
1415 if (result) {
1416 IPAERR("sps_connect fails.\n");
1417 goto fail_sps_connect;
1418 }
1419
1420 ep->sys->event.options = SPS_O_EOT;
1421 ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
1422 ep->sys->event.xfer_done = NULL;
1423 ep->sys->event.user = ep->sys;
1424 ep->sys->event.callback = ep->sys->sps_callback;
1425 result = sps_register_event(ep->ep_hdl, &ep->sys->event);
1426 if (result < 0) {
1427 IPAERR("register event error %d\n", result);
1428 goto fail_register_event;
1429 }
1430
1431 *clnt_hdl = ipa_ep_idx;
1432
1433 if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
1434 ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
1435 ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
1436 sizeof(void *), GFP_KERNEL);
1437 if (!ep->sys->repl.cache) {
1438 IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
1439 ep->sys->repl_hdlr = ipa_replenish_rx_cache;
1440 ep->sys->repl.capacity = 0;
1441 } else {
1442 atomic_set(&ep->sys->repl.head_idx, 0);
1443 atomic_set(&ep->sys->repl.tail_idx, 0);
1444 ipa_wq_repl_rx(&ep->sys->repl_work);
1445 }
1446 }
1447
1448 if (IPA_CLIENT_IS_CONS(sys_in->client))
1449 ipa_replenish_rx_cache(ep->sys);
1450
1451 if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
1452 ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
1453 atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
1454 }
1455
1456 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
1457 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
1458 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1459 sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1460 IPADBG("modem cfg emb pipe flt\n");
1461 else
1462 ipa_install_dflt_flt_rules(ipa_ep_idx);
1463 }
1464
1465 if (!ep->keep_ipa_awake)
1466 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1467
1468 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
1469 ipa_ep_idx, ep->sys);
1470
1471 return 0;
1472
1473fail_register_event:
1474 sps_disconnect(ep->ep_hdl);
1475fail_sps_connect:
1476 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1477 ep->connect.desc.base,
1478 ep->connect.desc.phys_base);
1479fail_sps_cfg:
1480 sps_free_endpoint(ep->ep_hdl);
1481fail_gen2:
1482 destroy_workqueue(ep->sys->repl_wq);
1483fail_wq2:
1484 destroy_workqueue(ep->sys->wq);
1485fail_wq:
1486 kfree(ep->sys);
1487 memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
1488fail_and_disable_clocks:
1489 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1490fail_gen:
1491 return result;
1492}
1493
1494/**
1495 * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
1496 * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe
1497 *
1498 * Returns: 0 on success, negative on failure
1499 */
1500int ipa2_teardown_sys_pipe(u32 clnt_hdl)
1501{
1502 struct ipa_ep_context *ep;
1503 int empty;
1504
1505 if (unlikely(!ipa_ctx)) {
1506 IPAERR("IPA driver was not initialized\n");
1507 return -EINVAL;
1508 }
1509
1510 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1511 ipa_ctx->ep[clnt_hdl].valid == 0) {
1512 IPAERR("bad parm.\n");
1513 return -EINVAL;
1514 }
1515
1516 ep = &ipa_ctx->ep[clnt_hdl];
1517
1518 if (!ep->keep_ipa_awake)
1519 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
1520
1521 ipa_disable_data_path(clnt_hdl);
1522 if (ep->napi_enabled) {
1523 ep->switch_to_intr = true;
1524 do {
1525 usleep_range(95, 105);
1526 } while (atomic_read(&ep->sys->curr_polling_state));
1527 }
1528
1529 if (IPA_CLIENT_IS_PROD(ep->client)) {
1530 do {
1531 spin_lock_bh(&ep->sys->spinlock);
1532 empty = list_empty(&ep->sys->head_desc_list);
1533 spin_unlock_bh(&ep->sys->spinlock);
1534 if (!empty)
1535 usleep_range(95, 105);
1536 else
1537 break;
1538 } while (1);
1539 }
1540
1541 if (IPA_CLIENT_IS_CONS(ep->client)) {
1542 cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
1543 cancel_delayed_work_sync(&ep->sys->switch_to_intr_work);
1544 }
1545
1546 flush_workqueue(ep->sys->wq);
1547 sps_disconnect(ep->ep_hdl);
1548 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1549 ep->connect.desc.base,
1550 ep->connect.desc.phys_base);
1551 sps_free_endpoint(ep->ep_hdl);
1552 if (ep->sys->repl_wq)
1553 flush_workqueue(ep->sys->repl_wq);
1554 if (IPA_CLIENT_IS_CONS(ep->client))
1555 ipa_cleanup_rx(ep->sys);
1556
1557 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
1558 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1559 ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1560 IPADBG("modem cfg emb pipe flt\n");
1561 else
1562 ipa_delete_dflt_flt_rules(clnt_hdl);
1563 }
1564
1565 if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
1566 atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt);
1567
1568 memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats));
1569
1570 if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
1571 ipa_cleanup_wlan_rx_common_cache();
1572
1573 ep->valid = 0;
1574 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
1575
1576 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
1577
1578 return 0;
1579}
1580
1581/**
1582 * ipa_tx_comp_usr_notify_release() - Callback function which will call the
1583 * user supplied callback function to release the skb, or release it on
1584 * its own if no callback function was supplied.
1585 * @user1
1586 * @user2
1587 *
1588 * This notified callback is for the destination client.
1589 * This function is supplied in ipa_connect.
1590 */
1591static void ipa_tx_comp_usr_notify_release(void *user1, int user2)
1592{
1593 struct sk_buff *skb = (struct sk_buff *)user1;
1594 int ep_idx = user2;
1595
1596 IPADBG("skb=%p ep=%d\n", skb, ep_idx);
1597
1598 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl);
1599
1600 if (ipa_ctx->ep[ep_idx].client_notify)
1601 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
1602 IPA_WRITE_DONE, (unsigned long)skb);
1603 else
1604 dev_kfree_skb_any(skb);
1605}
1606
1607static void ipa_tx_cmd_comp(void *user1, int user2)
1608{
1609 kfree(user1);
1610}
1611
1612/**
1613 * ipa2_tx_dp() - Data-path tx handler
1614 * @dst: [in] which IPA destination to route tx packets to
1615 * @skb: [in] the packet to send
1616 * @metadata: [in] TX packet meta-data
1617 *
1618 * Data-path tx handler, this is used for both SW data-path which by-passes most
1619 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
1620 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
1621 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
1622 * is an error. For errors, client needs to free the skb as needed. For success,
1623 * IPA driver will later invoke client callback if one was supplied. That
1624 * callback should free the skb. If no callback supplied, IPA driver will free
1625 * the skb internally
1626 *
1627 * The function will use two descriptors for this send command
1628 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
1629 * the first descriptor will be used to inform the IPA hardware that
1630 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
1631 * Once this send was done from SPS point-of-view the IPA driver will
1632 * get notified by the supplied callback - ipa_sps_irq_tx_comp()
1633 *
1634 * ipa_sps_irq_tx_comp will call to the user supplied
1635 * callback (from ipa_connect)
1636 *
1637 * Returns: 0 on success, negative on failure
1638 */
1639int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
1640 struct ipa_tx_meta *meta)
1641{
1642 struct ipa_desc *desc;
1643 struct ipa_desc _desc[2];
1644 int dst_ep_idx;
1645 struct ipa_ip_packet_init *cmd;
1646 struct ipa_sys_context *sys;
1647 int src_ep_idx;
1648 int num_frags, f;
1649
1650 if (unlikely(!ipa_ctx)) {
1651 IPAERR("IPA driver was not initialized\n");
1652 return -EINVAL;
1653 }
1654
1655 if (skb->len == 0) {
1656 IPAERR("packet size is 0\n");
1657 return -EINVAL;
1658 }
1659
1660 num_frags = skb_shinfo(skb)->nr_frags;
1661 if (num_frags) {
1662 /* 1 desc is needed for the linear portion of skb;
1663 * 1 desc may be needed for the PACKET_INIT;
1664 * 1 desc for each frag
1665 */
1666 desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
1667 if (!desc) {
1668 IPAERR("failed to alloc desc array\n");
1669 goto fail_mem;
1670 }
1671 } else {
1672 memset(_desc, 0, 2 * sizeof(struct ipa_desc));
1673 desc = &_desc[0];
1674 }
1675
1676 /*
1677 * USB_CONS: PKT_INIT ep_idx = dst pipe
1678 * Q6_CONS: PKT_INIT ep_idx = sender pipe
1679 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
1680 *
1681 * LAN TX: all PKT_INIT
1682 * WAN TX: PKT_INIT (cmd) + HW (data)
1683 *
1684 */
1685 if (IPA_CLIENT_IS_CONS(dst)) {
1686 src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1687 if (-1 == src_ep_idx) {
1688 IPAERR("Client %u is not mapped\n",
1689 IPA_CLIENT_APPS_LAN_WAN_PROD);
1690 goto fail_gen;
1691 }
1692 dst_ep_idx = ipa2_get_ep_mapping(dst);
1693 } else {
1694 src_ep_idx = ipa2_get_ep_mapping(dst);
1695 if (-1 == src_ep_idx) {
1696 IPAERR("Client %u is not mapped\n", dst);
1697 goto fail_gen;
1698 }
1699 if (meta && meta->pkt_init_dst_ep_valid)
1700 dst_ep_idx = meta->pkt_init_dst_ep;
1701 else
1702 dst_ep_idx = -1;
1703 }
1704
1705 sys = ipa_ctx->ep[src_ep_idx].sys;
1706
1707 if (!sys->ep->valid) {
1708 IPAERR("pipe not valid\n");
1709 goto fail_gen;
1710 }
1711
1712 if (dst_ep_idx != -1) {
1713 /* SW data path */
1714 cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
1715 if (!cmd) {
1716 IPAERR("failed to alloc immediate command object\n");
1717 goto fail_gen;
1718 }
1719
1720 cmd->destination_pipe_index = dst_ep_idx;
1721 desc[0].opcode = IPA_IP_PACKET_INIT;
1722 desc[0].pyld = cmd;
1723 desc[0].len = sizeof(struct ipa_ip_packet_init);
1724 desc[0].type = IPA_IMM_CMD_DESC;
1725 desc[0].callback = ipa_tx_cmd_comp;
1726 desc[0].user1 = cmd;
1727 desc[1].pyld = skb->data;
1728 desc[1].len = skb_headlen(skb);
1729 desc[1].type = IPA_DATA_DESC_SKB;
1730 desc[1].callback = ipa_tx_comp_usr_notify_release;
1731 desc[1].user1 = skb;
1732 desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid &&
1733 meta->pkt_init_dst_ep_remote) ?
1734 src_ep_idx :
1735 dst_ep_idx;
1736 if (meta && meta->dma_address_valid) {
1737 desc[1].dma_address_valid = true;
1738 desc[1].dma_address = meta->dma_address;
1739 }
1740
1741 for (f = 0; f < num_frags; f++) {
1742 desc[2+f].frag = &skb_shinfo(skb)->frags[f];
1743 desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
1744 desc[2+f].len = skb_frag_size(desc[2+f].frag);
1745 }
1746
1747 /* don't free skb till frag mappings are released */
1748 if (num_frags) {
1749 desc[2+f-1].callback = desc[1].callback;
1750 desc[2+f-1].user1 = desc[1].user1;
1751 desc[2+f-1].user2 = desc[1].user2;
1752 desc[1].callback = NULL;
1753 }
1754
1755 if (ipa_send(sys, num_frags + 2, desc, true)) {
1756 IPAERR("fail to send skb %p num_frags %u SWP\n",
1757 skb, num_frags);
1758 goto fail_send;
1759 }
1760 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
1761 } else {
1762 /* HW data path */
1763 desc[0].pyld = skb->data;
1764 desc[0].len = skb_headlen(skb);
1765 desc[0].type = IPA_DATA_DESC_SKB;
1766 desc[0].callback = ipa_tx_comp_usr_notify_release;
1767 desc[0].user1 = skb;
1768 desc[0].user2 = src_ep_idx;
1769
1770 if (meta && meta->dma_address_valid) {
1771 desc[0].dma_address_valid = true;
1772 desc[0].dma_address = meta->dma_address;
1773 }
1774
1775 if (num_frags == 0) {
1776 if (ipa_send_one(sys, desc, true)) {
1777 IPAERR("fail to send skb %p HWP\n", skb);
1778 goto fail_gen;
1779 }
1780 } else {
1781 for (f = 0; f < num_frags; f++) {
1782 desc[1+f].frag = &skb_shinfo(skb)->frags[f];
1783 desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
1784 desc[1+f].len = skb_frag_size(desc[1+f].frag);
1785 }
1786
1787 /* don't free skb till frag mappings are released */
1788 desc[1+f-1].callback = desc[0].callback;
1789 desc[1+f-1].user1 = desc[0].user1;
1790 desc[1+f-1].user2 = desc[0].user2;
1791 desc[0].callback = NULL;
1792
1793 if (ipa_send(sys, num_frags + 1, desc, true)) {
1794 IPAERR("fail to send skb %p num_frags %u HWP\n",
1795 skb, num_frags);
1796 goto fail_gen;
1797 }
1798 }
1799
1800 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
1801 }
1802
1803 if (num_frags) {
1804 kfree(desc);
1805 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
1806 }
1807
1808 return 0;
1809
1810fail_send:
1811 kfree(cmd);
1812fail_gen:
1813 if (num_frags)
1814 kfree(desc);
1815fail_mem:
1816 return -EFAULT;
1817}
1818
1819static void ipa_wq_handle_rx(struct work_struct *work)
1820{
1821 struct ipa_sys_context *sys;
1822
1823 sys = container_of(work, struct ipa_sys_context, work);
1824
1825 if (sys->ep->napi_enabled) {
1826 IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
1827 sys->ep->client_notify(sys->ep->priv,
1828 IPA_CLIENT_START_POLL, 0);
1829 } else
1830 ipa_handle_rx(sys);
1831}
1832
1833static void ipa_wq_repl_rx(struct work_struct *work)
1834{
1835 struct ipa_sys_context *sys;
1836 void *ptr;
1837 struct ipa_rx_pkt_wrapper *rx_pkt;
1838 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
1839 u32 next;
1840 u32 curr;
1841
1842 sys = container_of(work, struct ipa_sys_context, repl_work);
1843 curr = atomic_read(&sys->repl.tail_idx);
1844
1845begin:
1846 while (1) {
1847 next = (curr + 1) % sys->repl.capacity;
1848 if (next == atomic_read(&sys->repl.head_idx))
1849 goto fail_kmem_cache_alloc;
1850
1851 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
1852 flag);
1853 if (!rx_pkt) {
1854 pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
1855 __func__, sys);
1856 goto fail_kmem_cache_alloc;
1857 }
1858
1859 INIT_LIST_HEAD(&rx_pkt->link);
1860 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
1861 rx_pkt->sys = sys;
1862
1863 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1864 if (rx_pkt->data.skb == NULL) {
1865 pr_err_ratelimited("%s fail alloc skb sys=%p\n",
1866 __func__, sys);
1867 goto fail_skb_alloc;
1868 }
1869 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1870 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
1871 sys->rx_buff_sz,
1872 DMA_FROM_DEVICE);
1873 if (rx_pkt->data.dma_addr == 0 ||
1874 rx_pkt->data.dma_addr == ~0) {
1875 pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
1876 __func__, (void *)rx_pkt->data.dma_addr,
1877 ptr, sys);
1878 goto fail_dma_mapping;
1879 }
1880
1881 sys->repl.cache[curr] = rx_pkt;
1882 curr = next;
1883 /* ensure write is done before setting tail index */
1884 mb();
1885 atomic_set(&sys->repl.tail_idx, next);
1886 }
1887
1888 return;
1889
1890fail_dma_mapping:
1891 sys->free_skb(rx_pkt->data.skb);
1892fail_skb_alloc:
1893 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1894fail_kmem_cache_alloc:
1895 if (atomic_read(&sys->repl.tail_idx) ==
1896 atomic_read(&sys->repl.head_idx)) {
1897 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1898 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty);
1899 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1900 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty);
1901 else
1902 WARN_ON(1);
1903 pr_err_ratelimited("%s sys=%p repl ring empty\n",
1904 __func__, sys);
1905 goto begin;
1906 }
1907}
1908
1909static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys)
1910{
1911 struct ipa_rx_pkt_wrapper *rx_pkt = NULL;
1912 struct ipa_rx_pkt_wrapper *tmp;
1913 int ret;
1914 u32 rx_len_cached = 0;
1915
1916 IPADBG("\n");
1917
1918 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1919 rx_len_cached = sys->len;
1920
1921 if (rx_len_cached < sys->rx_pool_sz) {
1922 list_for_each_entry_safe(rx_pkt, tmp,
1923 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1924 list_del(&rx_pkt->link);
1925
1926 if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0)
1927 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1928
1929 INIT_LIST_HEAD(&rx_pkt->link);
1930 rx_pkt->len = 0;
1931 rx_pkt->sys = sys;
1932
1933 ret = sps_transfer_one(sys->ep->ep_hdl,
1934 rx_pkt->data.dma_addr,
1935 IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
1936
1937 if (ret) {
1938 IPAERR("sps_transfer_one failed %d\n", ret);
1939 goto fail_sps_transfer;
1940 }
1941
1942 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1943 rx_len_cached = ++sys->len;
1944
1945 if (rx_len_cached >= sys->rx_pool_sz) {
1946 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1947 return;
1948 }
1949 }
1950 }
1951 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1952
1953 if (rx_len_cached < sys->rx_pool_sz &&
1954 ipa_ctx->wc_memb.wlan_comm_total_cnt <
1955 IPA_WLAN_COMM_RX_POOL_HIGH) {
1956 ipa_replenish_rx_cache(sys);
1957 ipa_ctx->wc_memb.wlan_comm_total_cnt +=
1958 (sys->rx_pool_sz - rx_len_cached);
1959 }
1960
1961 return;
1962
1963fail_sps_transfer:
1964 list_del(&rx_pkt->link);
1965 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1966}
1967
1968static void ipa_cleanup_wlan_rx_common_cache(void)
1969{
1970 struct ipa_rx_pkt_wrapper *rx_pkt;
1971 struct ipa_rx_pkt_wrapper *tmp;
1972
1973 list_for_each_entry_safe(rx_pkt, tmp,
1974 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1975 list_del(&rx_pkt->link);
1976 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
1977 IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
1978 dev_kfree_skb_any(rx_pkt->data.skb);
1979 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1980 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1981 ipa_ctx->wc_memb.wlan_comm_total_cnt--;
1982 }
1983 ipa_ctx->wc_memb.total_tx_pkts_freed = 0;
1984
1985 if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0)
1986 IPAERR("wlan comm buff free cnt: %d\n",
1987 ipa_ctx->wc_memb.wlan_comm_free_cnt);
1988
1989 if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0)
1990 IPAERR("wlan comm buff total cnt: %d\n",
1991 ipa_ctx->wc_memb.wlan_comm_total_cnt);
1992
1993}
1994
1995static void ipa_alloc_wlan_rx_common_cache(u32 size)
1996{
1997 void *ptr;
1998 struct ipa_rx_pkt_wrapper *rx_pkt;
1999 int rx_len_cached = 0;
2000 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2001 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2002
2003 rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt;
2004 while (rx_len_cached < size) {
2005 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2006 flag);
2007 if (!rx_pkt) {
2008 IPAERR("failed to alloc rx wrapper\n");
2009 goto fail_kmem_cache_alloc;
2010 }
2011
2012 INIT_LIST_HEAD(&rx_pkt->link);
2013 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2014
2015 rx_pkt->data.skb =
2016 ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
2017 flag);
2018 if (rx_pkt->data.skb == NULL) {
2019 IPAERR("failed to alloc skb\n");
2020 goto fail_skb_alloc;
2021 }
2022 ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
2023 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2024 IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
2025 if (rx_pkt->data.dma_addr == 0 ||
2026 rx_pkt->data.dma_addr == ~0) {
2027 IPAERR("dma_map_single failure %p for %p\n",
2028 (void *)rx_pkt->data.dma_addr, ptr);
2029 goto fail_dma_mapping;
2030 }
2031
2032 list_add_tail(&rx_pkt->link,
2033 &ipa_ctx->wc_memb.wlan_comm_desc_list);
2034 rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
2035
2036 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
2037
2038 }
2039
2040 return;
2041
2042fail_dma_mapping:
2043 dev_kfree_skb_any(rx_pkt->data.skb);
2044fail_skb_alloc:
2045 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2046fail_kmem_cache_alloc:
2047 return;
2048}
2049
2050
2051/**
2052 * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
2053 *
2054 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
2055 * are IPA_RX_POOL_CEIL buffers in the cache.
2056 * - Allocate a buffer in the cache
2057 * - Initialized the packets link
2058 * - Initialize the packets work struct
2059 * - Allocate the packets socket buffer (skb)
2060 * - Fill the packets skb with data
2061 * - Make the packet DMAable
2062 * - Add the packet to the system pipe linked list
2063 * - Initiate a SPS transfer so that SPS driver will use this packet later.
2064 */
2065static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
2066{
2067 void *ptr;
2068 struct ipa_rx_pkt_wrapper *rx_pkt;
2069 int ret;
2070 int rx_len_cached = 0;
2071 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2072 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2073
2074 rx_len_cached = sys->len;
2075
2076 while (rx_len_cached < sys->rx_pool_sz) {
2077 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2078 flag);
2079 if (!rx_pkt) {
2080 IPAERR("failed to alloc rx wrapper\n");
2081 goto fail_kmem_cache_alloc;
2082 }
2083
2084 INIT_LIST_HEAD(&rx_pkt->link);
2085 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2086 rx_pkt->sys = sys;
2087
2088 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
2089 if (rx_pkt->data.skb == NULL) {
2090 IPAERR("failed to alloc skb\n");
2091 goto fail_skb_alloc;
2092 }
2093 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2094 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2095 sys->rx_buff_sz,
2096 DMA_FROM_DEVICE);
2097 if (rx_pkt->data.dma_addr == 0 ||
2098 rx_pkt->data.dma_addr == ~0) {
2099 IPAERR("dma_map_single failure %p for %p\n",
2100 (void *)rx_pkt->data.dma_addr, ptr);
2101 goto fail_dma_mapping;
2102 }
2103
2104 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2105 rx_len_cached = ++sys->len;
2106
2107 ret = sps_transfer_one(sys->ep->ep_hdl,
2108 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2109
2110 if (ret) {
2111 IPAERR("sps_transfer_one failed %d\n", ret);
2112 goto fail_sps_transfer;
2113 }
2114 }
2115
2116 return;
2117
2118fail_sps_transfer:
2119 list_del(&rx_pkt->link);
2120 rx_len_cached = --sys->len;
2121 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2122 sys->rx_buff_sz, DMA_FROM_DEVICE);
2123fail_dma_mapping:
2124 sys->free_skb(rx_pkt->data.skb);
2125fail_skb_alloc:
2126 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2127fail_kmem_cache_alloc:
2128 if (rx_len_cached == 0)
2129 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2130 msecs_to_jiffies(1));
2131}
2132
2133static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
2134{
2135 void *ptr;
2136 struct ipa_rx_pkt_wrapper *rx_pkt;
2137 int ret;
2138 int rx_len_cached = 0;
2139
2140 rx_len_cached = sys->len;
2141
2142 while (rx_len_cached < sys->rx_pool_sz) {
2143 spin_lock_bh(&sys->spinlock);
2144 if (list_empty(&sys->rcycl_list))
2145 goto fail_kmem_cache_alloc;
2146
2147 rx_pkt = list_first_entry(&sys->rcycl_list,
2148 struct ipa_rx_pkt_wrapper, link);
2149 list_del(&rx_pkt->link);
2150 spin_unlock_bh(&sys->spinlock);
2151 INIT_LIST_HEAD(&rx_pkt->link);
2152 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2153 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
2154 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
2155 if (rx_pkt->data.dma_addr == 0 ||
2156 rx_pkt->data.dma_addr == ~0)
2157 goto fail_dma_mapping;
2158
2159 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2160 rx_len_cached = ++sys->len;
2161
2162 ret = sps_transfer_one(sys->ep->ep_hdl,
2163 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2164
2165 if (ret) {
2166 IPAERR("sps_transfer_one failed %d\n", ret);
2167 goto fail_sps_transfer;
2168 }
2169 }
2170
2171 return;
2172fail_sps_transfer:
2173 rx_len_cached = --sys->len;
2174 list_del(&rx_pkt->link);
2175 INIT_LIST_HEAD(&rx_pkt->link);
2176 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2177 sys->rx_buff_sz, DMA_FROM_DEVICE);
2178fail_dma_mapping:
2179 spin_lock_bh(&sys->spinlock);
2180 list_add_tail(&rx_pkt->link, &sys->rcycl_list);
2181 INIT_LIST_HEAD(&rx_pkt->link);
2182 spin_unlock_bh(&sys->spinlock);
2183fail_kmem_cache_alloc:
2184 spin_unlock_bh(&sys->spinlock);
2185 if (rx_len_cached == 0)
2186 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2187 msecs_to_jiffies(1));
2188}
2189
2190static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
2191{
2192 struct ipa_rx_pkt_wrapper *rx_pkt;
2193 int ret;
2194 int rx_len_cached = 0;
2195 u32 curr;
2196
2197 rx_len_cached = sys->len;
2198 curr = atomic_read(&sys->repl.head_idx);
2199
2200 while (rx_len_cached < sys->rx_pool_sz) {
2201 if (curr == atomic_read(&sys->repl.tail_idx)) {
2202 queue_work(sys->repl_wq, &sys->repl_work);
2203 break;
2204 }
2205
2206 rx_pkt = sys->repl.cache[curr];
2207 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2208
2209 ret = sps_transfer_one(sys->ep->ep_hdl,
2210 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2211
2212 if (ret) {
2213 IPAERR("sps_transfer_one failed %d\n", ret);
2214 list_del(&rx_pkt->link);
2215 break;
2216 }
2217 rx_len_cached = ++sys->len;
2218 sys->repl_trig_cnt++;
2219 curr = (curr + 1) % sys->repl.capacity;
2220 /* ensure write is done before setting head index */
2221 mb();
2222 atomic_set(&sys->repl.head_idx, curr);
2223 }
2224
2225 if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0)
2226 queue_work(sys->repl_wq, &sys->repl_work);
2227
2228 if (rx_len_cached <= sys->ep->rx_replenish_threshold) {
2229 if (rx_len_cached == 0) {
2230 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
2231 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty);
2232 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
2233 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty);
2234 else
2235 WARN_ON(1);
2236 }
2237 sys->repl_trig_cnt = 0;
2238 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2239 msecs_to_jiffies(1));
2240 }
2241}
2242
2243static void replenish_rx_work_func(struct work_struct *work)
2244{
2245 struct delayed_work *dwork;
2246 struct ipa_sys_context *sys;
2247
2248 dwork = container_of(work, struct delayed_work, work);
2249 sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
2250 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2251 sys->repl_hdlr(sys);
2252 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2253}
2254
2255/**
2256 * ipa_cleanup_rx() - release RX queue resources
2257 *
2258 */
2259static void ipa_cleanup_rx(struct ipa_sys_context *sys)
2260{
2261 struct ipa_rx_pkt_wrapper *rx_pkt;
2262 struct ipa_rx_pkt_wrapper *r;
2263 u32 head;
2264 u32 tail;
2265
2266 list_for_each_entry_safe(rx_pkt, r,
2267 &sys->head_desc_list, link) {
2268 list_del(&rx_pkt->link);
2269 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2270 sys->rx_buff_sz, DMA_FROM_DEVICE);
2271 sys->free_skb(rx_pkt->data.skb);
2272 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2273 }
2274
2275 list_for_each_entry_safe(rx_pkt, r,
2276 &sys->rcycl_list, link) {
2277 list_del(&rx_pkt->link);
2278 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2279 sys->rx_buff_sz, DMA_FROM_DEVICE);
2280 sys->free_skb(rx_pkt->data.skb);
2281 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2282 }
2283
2284 if (sys->repl.cache) {
2285 head = atomic_read(&sys->repl.head_idx);
2286 tail = atomic_read(&sys->repl.tail_idx);
2287 while (head != tail) {
2288 rx_pkt = sys->repl.cache[head];
2289 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2290 sys->rx_buff_sz, DMA_FROM_DEVICE);
2291 sys->free_skb(rx_pkt->data.skb);
2292 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2293 head = (head + 1) % sys->repl.capacity;
2294 }
2295 kfree(sys->repl.cache);
2296 }
2297}
2298
2299static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
2300{
2301 struct sk_buff *skb2 = NULL;
2302
2303 skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
2304 if (likely(skb2)) {
2305 /* Set the data pointer */
2306 skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
2307 memcpy(skb2->data, skb->data, len);
2308 skb2->len = len;
2309 skb_set_tail_pointer(skb2, len);
2310 }
2311
2312 return skb2;
2313}
2314
2315static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
2316 struct ipa_sys_context *sys)
2317{
2318 int rc = 0;
2319 struct ipa_hw_pkt_status *status;
2320 struct sk_buff *skb2;
2321 int pad_len_byte;
2322 int len;
2323 unsigned char *buf;
2324 int src_pipe;
2325 unsigned int used = *(unsigned int *)skb->cb;
2326 unsigned int used_align = ALIGN(used, 32);
2327 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
Skylar Change985d272016-12-05 18:10:19 -08002328 u32 skb2_len;
Amir Levy9659e592016-10-27 18:08:27 +03002329
2330 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2331
2332 if (skb->len == 0) {
2333 IPAERR("ZLT\n");
2334 sys->free_skb(skb);
2335 return rc;
2336 }
2337
2338 if (sys->len_partial) {
2339 IPADBG("len_partial %d\n", sys->len_partial);
2340 buf = skb_push(skb, sys->len_partial);
2341 memcpy(buf, sys->prev_skb->data, sys->len_partial);
2342 sys->len_partial = 0;
2343 sys->free_skb(sys->prev_skb);
2344 sys->prev_skb = NULL;
2345 goto begin;
2346 }
2347
2348 /* this pipe has TX comp (status only) + mux-ed LAN RX data
2349 * (status+data)
2350 */
2351 if (sys->len_rem) {
2352 IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
2353 sys->len_pad);
2354 if (sys->len_rem <= skb->len) {
2355 if (sys->prev_skb) {
2356 skb2 = skb_copy_expand(sys->prev_skb, 0,
2357 sys->len_rem, GFP_KERNEL);
2358 if (likely(skb2)) {
2359 memcpy(skb_put(skb2, sys->len_rem),
2360 skb->data, sys->len_rem);
2361 skb_trim(skb2,
2362 skb2->len - sys->len_pad);
2363 skb2->truesize = skb2->len +
2364 sizeof(struct sk_buff);
2365 if (sys->drop_packet)
2366 dev_kfree_skb_any(skb2);
2367 else
2368 sys->ep->client_notify(
2369 sys->ep->priv,
2370 IPA_RECEIVE,
2371 (unsigned long)(skb2));
2372 } else {
2373 IPAERR("copy expand failed\n");
2374 }
2375 dev_kfree_skb_any(sys->prev_skb);
2376 }
2377 skb_pull(skb, sys->len_rem);
2378 sys->prev_skb = NULL;
2379 sys->len_rem = 0;
2380 sys->len_pad = 0;
2381 } else {
2382 if (sys->prev_skb) {
2383 skb2 = skb_copy_expand(sys->prev_skb, 0,
2384 skb->len, GFP_KERNEL);
2385 if (likely(skb2)) {
2386 memcpy(skb_put(skb2, skb->len),
2387 skb->data, skb->len);
2388 } else {
2389 IPAERR("copy expand failed\n");
2390 }
2391 dev_kfree_skb_any(sys->prev_skb);
2392 sys->prev_skb = skb2;
2393 }
2394 sys->len_rem -= skb->len;
2395 sys->free_skb(skb);
2396 return rc;
2397 }
2398 }
2399
2400begin:
2401 while (skb->len) {
2402 sys->drop_packet = false;
2403 IPADBG("LEN_REM %d\n", skb->len);
2404
2405 if (skb->len < IPA_PKT_STATUS_SIZE) {
2406 WARN_ON(sys->prev_skb != NULL);
2407 IPADBG("status straddles buffer\n");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002408 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002409 sys->len_partial = skb->len;
2410 return rc;
2411 }
2412
2413 status = (struct ipa_hw_pkt_status *)skb->data;
2414 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2415 status->status_opcode, status->endp_src_idx,
2416 status->endp_dest_idx, status->pkt_len);
2417 if (sys->status_stat) {
2418 sys->status_stat->status[sys->status_stat->curr] =
2419 *status;
2420 sys->status_stat->curr++;
2421 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2422 sys->status_stat->curr = 0;
2423 }
2424
2425 if (status->status_opcode !=
2426 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2427 status->status_opcode !=
2428 IPA_HW_STATUS_OPCODE_PACKET &&
2429 status->status_opcode !=
2430 IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET &&
2431 status->status_opcode !=
2432 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2433 IPAERR("unsupported opcode(%d)\n",
2434 status->status_opcode);
2435 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2436 continue;
2437 }
2438 IPA_STATS_EXCP_CNT(status->exception,
2439 ipa_ctx->stats.rx_excp_pkts);
2440 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2441 status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
2442 IPAERR("status fields invalid\n");
2443 IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
2444 status->status_opcode, status->endp_src_idx,
2445 status->endp_dest_idx, status->pkt_len);
2446 WARN_ON(1);
2447 BUG();
2448 }
2449 if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
2450 struct ipa_tag_completion *comp;
2451
2452 IPADBG("TAG packet arrived\n");
2453 if (status->tag_f_2 == IPA_COOKIE) {
2454 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2455 if (skb->len < sizeof(comp)) {
2456 IPAERR("TAG arrived without packet\n");
2457 return rc;
2458 }
2459 memcpy(&comp, skb->data, sizeof(comp));
2460 skb_pull(skb, sizeof(comp) +
2461 IPA_SIZE_DL_CSUM_META_TRAILER);
2462 complete(&comp->comp);
2463 if (atomic_dec_return(&comp->cnt) == 0)
2464 kfree(comp);
2465 continue;
2466 } else {
2467 IPADBG("ignoring TAG with wrong cookie\n");
2468 }
2469 }
2470 if (status->pkt_len == 0) {
2471 IPADBG("Skip aggr close status\n");
2472 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2473 IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close);
2474 IPA_STATS_DEC_CNT(
2475 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2476 continue;
2477 }
2478 if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
2479 /* RX data */
2480 src_pipe = status->endp_src_idx;
2481
2482 /*
2483 * A packet which is received back to the AP after
2484 * there was no route match.
2485 */
2486 if (!status->exception && !status->route_match)
2487 sys->drop_packet = true;
2488
2489 if (skb->len == IPA_PKT_STATUS_SIZE &&
2490 !status->exception) {
2491 WARN_ON(sys->prev_skb != NULL);
2492 IPADBG("Ins header in next buffer\n");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002493 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002494 sys->len_partial = skb->len;
2495 return rc;
2496 }
2497
2498 pad_len_byte = ((status->pkt_len + 3) & ~3) -
2499 status->pkt_len;
2500
2501 len = status->pkt_len + pad_len_byte +
2502 IPA_SIZE_DL_CSUM_META_TRAILER;
2503 IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
2504 status->pkt_len, len);
2505
2506 if (status->exception ==
2507 IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
2508 IPADBG("Dropping packet on DeAggr Exception\n");
2509 sys->drop_packet = true;
2510 }
2511
Skylar Change985d272016-12-05 18:10:19 -08002512 skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE;
2513 skb2_len = min(skb2_len, skb->len);
2514 skb2 = ipa_skb_copy_for_client(skb, skb2_len);
Amir Levy9659e592016-10-27 18:08:27 +03002515 if (likely(skb2)) {
2516 if (skb->len < len + IPA_PKT_STATUS_SIZE) {
2517 IPADBG("SPL skb len %d len %d\n",
2518 skb->len, len);
2519 sys->prev_skb = skb2;
2520 sys->len_rem = len - skb->len +
2521 IPA_PKT_STATUS_SIZE;
2522 sys->len_pad = pad_len_byte;
2523 skb_pull(skb, skb->len);
2524 } else {
2525 skb_trim(skb2, status->pkt_len +
2526 IPA_PKT_STATUS_SIZE);
2527 IPADBG("rx avail for %d\n",
2528 status->endp_dest_idx);
2529 if (sys->drop_packet) {
2530 dev_kfree_skb_any(skb2);
2531 } else if (status->pkt_len >
2532 IPA_GENERIC_AGGR_BYTE_LIMIT *
2533 1024) {
2534 IPAERR("packet size invalid\n");
2535 IPAERR("STATUS opcode=%d\n",
2536 status->status_opcode);
2537 IPAERR("src=%d dst=%d len=%d\n",
2538 status->endp_src_idx,
2539 status->endp_dest_idx,
2540 status->pkt_len);
2541 BUG();
2542 } else {
2543 skb2->truesize = skb2->len +
2544 sizeof(struct sk_buff) +
2545 (ALIGN(len +
2546 IPA_PKT_STATUS_SIZE, 32) *
2547 unused / used_align);
2548 sys->ep->client_notify(
2549 sys->ep->priv,
2550 IPA_RECEIVE,
2551 (unsigned long)(skb2));
2552 }
2553 skb_pull(skb, len +
2554 IPA_PKT_STATUS_SIZE);
2555 }
2556 } else {
2557 IPAERR("fail to alloc skb\n");
2558 if (skb->len < len) {
2559 sys->prev_skb = NULL;
2560 sys->len_rem = len - skb->len +
2561 IPA_PKT_STATUS_SIZE;
2562 sys->len_pad = pad_len_byte;
2563 skb_pull(skb, skb->len);
2564 } else {
2565 skb_pull(skb, len +
2566 IPA_PKT_STATUS_SIZE);
2567 }
2568 }
2569 /* TX comp */
2570 ipa_wq_write_done_status(src_pipe);
2571 IPADBG("tx comp imp for %d\n", src_pipe);
2572 } else {
2573 /* TX comp */
2574 ipa_wq_write_done_status(status->endp_src_idx);
2575 IPADBG("tx comp exp for %d\n", status->endp_src_idx);
2576 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2577 IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl);
2578 IPA_STATS_DEC_CNT(
2579 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2580 }
2581 };
2582
2583 sys->free_skb(skb);
2584 return rc;
2585}
2586
2587static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb,
2588 struct sk_buff *skb, unsigned int len)
2589{
2590 struct sk_buff *skb2;
2591
2592 skb2 = skb_copy_expand(prev_skb, 0,
2593 len, GFP_KERNEL);
2594 if (likely(skb2)) {
2595 memcpy(skb_put(skb2, len),
2596 skb->data, len);
2597 } else {
2598 IPAERR("copy expand failed\n");
2599 skb2 = NULL;
2600 }
2601 dev_kfree_skb_any(prev_skb);
2602
2603 return skb2;
2604}
2605
2606static void wan_rx_handle_splt_pyld(struct sk_buff *skb,
2607 struct ipa_sys_context *sys)
2608{
2609 struct sk_buff *skb2;
2610
2611 IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
2612 if (sys->len_rem <= skb->len) {
2613 if (sys->prev_skb) {
2614 skb2 = join_prev_skb(sys->prev_skb, skb,
2615 sys->len_rem);
2616 if (likely(skb2)) {
2617 IPADBG(
2618 "removing Status element from skb and sending to WAN client");
2619 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2620 skb2->truesize = skb2->len +
2621 sizeof(struct sk_buff);
2622 sys->ep->client_notify(sys->ep->priv,
2623 IPA_RECEIVE,
2624 (unsigned long)(skb2));
2625 }
2626 }
2627 skb_pull(skb, sys->len_rem);
2628 sys->prev_skb = NULL;
2629 sys->len_rem = 0;
2630 } else {
2631 if (sys->prev_skb) {
2632 skb2 = join_prev_skb(sys->prev_skb, skb,
2633 skb->len);
2634 sys->prev_skb = skb2;
2635 }
2636 sys->len_rem -= skb->len;
2637 skb_pull(skb, skb->len);
2638 }
2639}
2640
2641static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
2642 struct ipa_sys_context *sys)
2643{
2644 int rc = 0;
2645 struct ipa_hw_pkt_status *status;
2646 struct sk_buff *skb2;
2647 u16 pkt_len_with_pad;
2648 u32 qmap_hdr;
2649 int checksum_trailer_exists;
2650 int frame_len;
2651 int ep_idx;
2652 unsigned int used = *(unsigned int *)skb->cb;
2653 unsigned int used_align = ALIGN(used, 32);
2654 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2655
2656 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2657 if (skb->len == 0) {
2658 IPAERR("ZLT\n");
2659 goto bail;
2660 }
2661
2662 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
2663 sys->ep->client_notify(sys->ep->priv,
2664 IPA_RECEIVE, (unsigned long)(skb));
2665 return rc;
2666 }
2667 if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
2668 IPAERR("Recycle should enable only with GRO Aggr\n");
2669 ipa_assert();
2670 }
2671 /*
2672 * payload splits across 2 buff or more,
2673 * take the start of the payload from prev_skb
2674 */
2675 if (sys->len_rem)
2676 wan_rx_handle_splt_pyld(skb, sys);
2677
2678
2679 while (skb->len) {
2680 IPADBG("LEN_REM %d\n", skb->len);
2681 if (skb->len < IPA_PKT_STATUS_SIZE) {
2682 IPAERR("status straddles buffer\n");
2683 WARN_ON(1);
2684 goto bail;
2685 }
2686 status = (struct ipa_hw_pkt_status *)skb->data;
2687 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2688 status->status_opcode, status->endp_src_idx,
2689 status->endp_dest_idx, status->pkt_len);
2690
2691 if (sys->status_stat) {
2692 sys->status_stat->status[sys->status_stat->curr] =
2693 *status;
2694 sys->status_stat->curr++;
2695 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2696 sys->status_stat->curr = 0;
2697 }
2698
2699 if (status->status_opcode !=
2700 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2701 status->status_opcode !=
2702 IPA_HW_STATUS_OPCODE_PACKET &&
2703 status->status_opcode !=
2704 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2705 IPAERR("unsupported opcode\n");
2706 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2707 continue;
2708 }
2709 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2710 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2711 status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
2712 status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
2713 IPAERR("status fields invalid\n");
2714 WARN_ON(1);
2715 goto bail;
2716 }
2717 if (status->pkt_len == 0) {
2718 IPADBG("Skip aggr close status\n");
2719 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2720 IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts);
2721 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close);
2722 continue;
2723 }
2724 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
2725 if (status->endp_dest_idx != ep_idx) {
2726 IPAERR("expected endp_dest_idx %d received %d\n",
2727 ep_idx, status->endp_dest_idx);
2728 WARN_ON(1);
2729 goto bail;
2730 }
2731 /* RX data */
2732 if (skb->len == IPA_PKT_STATUS_SIZE) {
2733 IPAERR("Ins header in next buffer\n");
2734 WARN_ON(1);
2735 goto bail;
2736 }
2737 qmap_hdr = *(u32 *)(status+1);
2738 /*
2739 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
2740 * header
2741 */
2742
2743 /*QMAP is BE: convert the pkt_len field from BE to LE*/
2744 pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
2745 IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
2746 /*get the CHECKSUM_PROCESS bit*/
2747 checksum_trailer_exists = status->status_mask &
2748 IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
2749 IPADBG("checksum_trailer_exists %d\n",
2750 checksum_trailer_exists);
2751
2752 frame_len = IPA_PKT_STATUS_SIZE +
2753 IPA_QMAP_HEADER_LENGTH +
2754 pkt_len_with_pad;
2755 if (checksum_trailer_exists)
2756 frame_len += IPA_DL_CHECKSUM_LENGTH;
2757 IPADBG("frame_len %d\n", frame_len);
2758
2759 skb2 = skb_clone(skb, GFP_KERNEL);
2760 if (likely(skb2)) {
2761 /*
2762 * the len of actual data is smaller than expected
2763 * payload split across 2 buff
2764 */
2765 if (skb->len < frame_len) {
2766 IPADBG("SPL skb len %d len %d\n",
2767 skb->len, frame_len);
2768 sys->prev_skb = skb2;
2769 sys->len_rem = frame_len - skb->len;
2770 skb_pull(skb, skb->len);
2771 } else {
2772 skb_trim(skb2, frame_len);
2773 IPADBG("rx avail for %d\n",
2774 status->endp_dest_idx);
2775 IPADBG(
2776 "removing Status element from skb and sending to WAN client");
2777 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2778 skb2->truesize = skb2->len +
2779 sizeof(struct sk_buff) +
2780 (ALIGN(frame_len, 32) *
2781 unused / used_align);
2782 sys->ep->client_notify(sys->ep->priv,
2783 IPA_RECEIVE, (unsigned long)(skb2));
2784 skb_pull(skb, frame_len);
2785 }
2786 } else {
2787 IPAERR("fail to clone\n");
2788 if (skb->len < frame_len) {
2789 sys->prev_skb = NULL;
2790 sys->len_rem = frame_len - skb->len;
2791 skb_pull(skb, skb->len);
2792 } else {
2793 skb_pull(skb, frame_len);
2794 }
2795 }
2796 };
2797bail:
2798 sys->free_skb(skb);
2799 return rc;
2800}
2801
2802static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys)
2803{
2804 struct ipa_a5_mux_hdr *mux_hdr;
2805 unsigned int pull_len;
2806 unsigned int padding;
2807 struct ipa_ep_context *ep;
2808 unsigned int src_pipe;
2809
2810 mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
2811
2812 src_pipe = mux_hdr->src_pipe_index;
2813
2814 IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
2815 rx_skb->len, ntohs(mux_hdr->interface_id),
2816 src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
2817
2818 IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
2819
2820 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2821 IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
2822
2823 /*
2824 * Any packets arriving over AMPDU_TX should be dispatched
2825 * to the regular WLAN RX data-path.
2826 */
2827 if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
2828 src_pipe = WLAN_PROD_TX_EP;
2829
2830 ep = &ipa_ctx->ep[src_pipe];
2831 spin_lock(&ipa_ctx->disconnect_lock);
2832 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2833 !ep->valid || !ep->client_notify)) {
2834 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2835 src_pipe, ep->valid, ep->client_notify);
2836 dev_kfree_skb_any(rx_skb);
2837 spin_unlock(&ipa_ctx->disconnect_lock);
2838 return 0;
2839 }
2840
2841 pull_len = sizeof(struct ipa_a5_mux_hdr);
2842
2843 /*
2844 * IP packet starts on word boundary
2845 * remove the MUX header and any padding and pass the frame to
2846 * the client which registered a rx callback on the "src pipe"
2847 */
2848 padding = ep->cfg.hdr.hdr_len & 0x3;
2849 if (padding)
2850 pull_len += 4 - padding;
2851
2852 IPADBG("pulling %d bytes from skb\n", pull_len);
2853 skb_pull(rx_skb, pull_len);
2854 ep->client_notify(ep->priv, IPA_RECEIVE,
2855 (unsigned long)(rx_skb));
2856 spin_unlock(&ipa_ctx->disconnect_lock);
2857 return 0;
2858}
2859
2860static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags)
2861{
2862 return __dev_alloc_skb(len, flags);
2863}
2864
2865static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len,
2866 gfp_t flags)
2867{
2868 struct sk_buff *skb;
2869
2870 skb = __dev_alloc_skb(len + IPA_HEADROOM, flags);
2871 if (skb)
2872 skb_reserve(skb, IPA_HEADROOM);
2873
2874 return skb;
2875}
2876
2877static void ipa_free_skb_rx(struct sk_buff *skb)
2878{
2879 dev_kfree_skb_any(skb);
2880}
2881
2882void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
2883{
2884 struct sk_buff *rx_skb = (struct sk_buff *)data;
2885 struct ipa_hw_pkt_status *status;
2886 struct ipa_ep_context *ep;
2887 unsigned int src_pipe;
2888 u32 metadata;
2889
2890 status = (struct ipa_hw_pkt_status *)rx_skb->data;
2891 src_pipe = status->endp_src_idx;
2892 metadata = status->metadata;
2893 ep = &ipa_ctx->ep[src_pipe];
2894 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2895 !ep->valid ||
2896 !ep->client_notify)) {
2897 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2898 src_pipe, ep->valid, ep->client_notify);
2899 dev_kfree_skb_any(rx_skb);
2900 return;
2901 }
2902 if (!status->exception)
2903 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE +
2904 IPA_LAN_RX_HEADER_LENGTH);
2905 else
2906 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE);
2907
2908 /*
2909 * Metadata Info
2910 * ------------------------------------------
2911 * | 3 | 2 | 1 | 0 |
2912 * | fw_desc | vdev_id | qmap mux id | Resv |
2913 * ------------------------------------------
2914 */
2915 *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
2916 IPADBG("meta_data: 0x%x cb: 0x%x\n",
2917 metadata, *(u32 *)rx_skb->cb);
2918
2919 ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
2920}
2921
2922void ipa2_recycle_wan_skb(struct sk_buff *skb)
2923{
2924 struct ipa_rx_pkt_wrapper *rx_pkt;
2925 int ep_idx = ipa2_get_ep_mapping(
2926 IPA_CLIENT_APPS_WAN_CONS);
2927 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2928 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2929
2930 if (unlikely(ep_idx == -1)) {
2931 IPAERR("dest EP does not exist\n");
2932 ipa_assert();
2933 }
2934
2935 rx_pkt = kmem_cache_zalloc(
2936 ipa_ctx->rx_pkt_wrapper_cache, flag);
2937 if (!rx_pkt)
2938 ipa_assert();
2939
2940 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2941 rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
2942
2943 rx_pkt->data.skb = skb;
2944 rx_pkt->data.dma_addr = 0;
2945 ipa_skb_recycle(rx_pkt->data.skb);
2946 skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
2947 INIT_LIST_HEAD(&rx_pkt->link);
2948 spin_lock_bh(&rx_pkt->sys->spinlock);
2949 list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
2950 spin_unlock_bh(&rx_pkt->sys->spinlock);
2951}
2952
2953static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2954{
2955 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2956 struct sk_buff *rx_skb;
2957
2958 if (unlikely(list_empty(&sys->head_desc_list))) {
2959 WARN_ON(1);
2960 return;
2961 }
2962 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2963 struct ipa_rx_pkt_wrapper,
2964 link);
2965 list_del(&rx_pkt_expected->link);
2966 sys->len--;
2967 if (size)
2968 rx_pkt_expected->len = size;
2969 rx_skb = rx_pkt_expected->data.skb;
2970 dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
2971 sys->rx_buff_sz, DMA_FROM_DEVICE);
2972 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2973 rx_skb->len = rx_pkt_expected->len;
2974 *(unsigned int *)rx_skb->cb = rx_skb->len;
2975 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2976 sys->pyld_hdlr(rx_skb, sys);
2977 sys->repl_hdlr(sys);
2978 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
2979
2980}
2981
2982static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2983{
2984 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2985 struct sk_buff *rx_skb;
2986
2987 if (unlikely(list_empty(&sys->head_desc_list))) {
2988 WARN_ON(1);
2989 return;
2990 }
2991 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2992 struct ipa_rx_pkt_wrapper,
2993 link);
2994 list_del(&rx_pkt_expected->link);
2995 sys->len--;
2996
2997 if (size)
2998 rx_pkt_expected->len = size;
2999
3000 rx_skb = rx_pkt_expected->data.skb;
3001 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
3002 rx_skb->len = rx_pkt_expected->len;
3003 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
3004 sys->ep->wstats.tx_pkts_rcvd++;
3005 if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
3006 ipa2_free_skb(&rx_pkt_expected->data);
3007 sys->ep->wstats.tx_pkts_dropped++;
3008 } else {
3009 sys->ep->wstats.tx_pkts_sent++;
3010 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3011 (unsigned long)(&rx_pkt_expected->data));
3012 }
3013 ipa_replenish_wlan_rx_cache(sys);
3014}
3015
3016static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
3017 struct sps_iovec *iovec)
3018{
3019 IPADBG("ENTER.\n");
3020 if (unlikely(list_empty(&sys->head_desc_list))) {
3021 IPAERR("descriptor list is empty!\n");
3022 WARN_ON(1);
3023 return;
3024 }
3025 if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) {
3026 IPAERR("received unexpected event. sps flag is 0x%x\n"
3027 , iovec->flags);
3028 WARN_ON(1);
3029 return;
3030 }
3031 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3032 (unsigned long)(iovec));
3033 IPADBG("EXIT\n");
3034}
3035
3036static void ipa_wq_rx_avail(struct work_struct *work)
3037{
3038 struct ipa_rx_pkt_wrapper *rx_pkt;
3039 struct ipa_sys_context *sys;
3040
3041 rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work);
3042 if (unlikely(rx_pkt == NULL))
3043 WARN_ON(1);
3044 sys = rx_pkt->sys;
3045 ipa_wq_rx_common(sys, 0);
3046}
3047
3048/**
3049 * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
3050 * the SPS driver after a Rx operation is complete.
3051 * Called in an interrupt context.
3052 * @notify: SPS driver supplied notification struct
3053 *
3054 * This function defer the work for this event to a workqueue.
3055 */
3056void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
3057{
3058 struct ipa_rx_pkt_wrapper *rx_pkt;
3059
3060 switch (notify->event_id) {
3061 case SPS_EVENT_EOT:
3062 rx_pkt = notify->data.transfer.user;
3063 if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
3064 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
3065 rx_pkt->len = notify->data.transfer.iovec.size;
3066 IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
3067 notify->user, rx_pkt->len);
3068 queue_work(rx_pkt->sys->wq, &rx_pkt->work);
3069 break;
3070 default:
3071 IPAERR("received unexpected event id %d sys=%p\n",
3072 notify->event_id, notify->user);
3073 }
3074}
3075
3076static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
3077 struct ipa_sys_context *sys)
3078{
3079 if (sys->ep->client_notify) {
3080 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3081 (unsigned long)(rx_skb));
3082 } else {
3083 dev_kfree_skb_any(rx_skb);
3084 WARN_ON(1);
3085 }
3086
3087 return 0;
3088}
3089
3090static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
3091 struct ipa_sys_context *sys)
3092{
3093 unsigned long int aggr_byte_limit;
3094
3095 sys->ep->status.status_en = true;
3096 sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
3097 if (IPA_CLIENT_IS_PROD(in->client)) {
3098 if (!sys->ep->skip_ep_cfg) {
3099 sys->policy = IPA_POLICY_NOINTR_MODE;
3100 sys->sps_option = SPS_O_AUTO_ENABLE;
3101 sys->sps_callback = NULL;
3102 sys->ep->status.status_ep = ipa2_get_ep_mapping(
3103 IPA_CLIENT_APPS_LAN_CONS);
3104 if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
3105 sys->ep->status.status_en = false;
3106 } else {
3107 sys->policy = IPA_POLICY_INTR_MODE;
3108 sys->sps_option = (SPS_O_AUTO_ENABLE |
3109 SPS_O_EOT);
3110 sys->sps_callback =
3111 ipa_sps_irq_tx_no_aggr_notify;
3112 }
3113 return 0;
3114 }
3115
3116 aggr_byte_limit =
3117 (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
3118 ipa_adjust_ra_buff_base_sz(
3119 in->ipa_ep_cfg.aggr.aggr_byte_limit));
3120
3121 if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
3122 in->client == IPA_CLIENT_APPS_WAN_CONS) {
3123 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3124 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3125 | SPS_O_ACK_TRANSFERS);
3126 sys->sps_callback = ipa_sps_irq_rx_notify;
3127 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3128 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3129 switch_to_intr_rx_work_func);
3130 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3131 replenish_rx_work_func);
3132 INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
3133 atomic_set(&sys->curr_polling_state, 0);
3134 sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
3135 IPA_GENERIC_RX_BUFF_BASE_SZ) -
3136 IPA_HEADROOM;
3137 sys->get_skb = ipa_get_skb_ipa_rx_headroom;
3138 sys->free_skb = ipa_free_skb_rx;
3139 in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
3140 in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
3141 in->ipa_ep_cfg.aggr.aggr_time_limit =
3142 IPA_GENERIC_AGGR_TIME_LIMIT;
3143 if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3144 sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
3145 if (nr_cpu_ids > 1) {
3146 sys->repl_hdlr =
3147 ipa_fast_replenish_rx_cache;
3148 sys->repl_trig_thresh =
3149 sys->rx_pool_sz / 8;
3150 } else {
3151 sys->repl_hdlr =
3152 ipa_replenish_rx_cache;
3153 }
3154 sys->rx_pool_sz =
3155 ipa_ctx->lan_rx_ring_size;
3156 in->ipa_ep_cfg.aggr.aggr_byte_limit =
3157 IPA_GENERIC_AGGR_BYTE_LIMIT;
3158 in->ipa_ep_cfg.aggr.aggr_pkt_limit =
3159 IPA_GENERIC_AGGR_PKT_LIMIT;
3160 sys->ep->wakelock_client =
3161 IPA_WAKELOCK_REF_CLIENT_LAN_RX;
3162 } else if (in->client ==
3163 IPA_CLIENT_APPS_WAN_CONS) {
3164 sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003165 sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
3166 if (nr_cpu_ids > 1) {
Amir Levy9659e592016-10-27 18:08:27 +03003167 sys->repl_hdlr =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003168 ipa_fast_replenish_rx_cache;
3169 sys->repl_trig_thresh =
3170 sys->rx_pool_sz / 8;
Amir Levy9659e592016-10-27 18:08:27 +03003171 } else {
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003172 sys->repl_hdlr =
3173 ipa_replenish_rx_cache;
3174 }
3175 if (in->napi_enabled) {
Amir Levy9659e592016-10-27 18:08:27 +03003176 sys->rx_pool_sz =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003177 IPA_WAN_NAPI_CONS_RX_POOL_SZ;
3178 if (in->recycle_enabled) {
3179 sys->repl_hdlr =
3180 ipa_replenish_rx_cache_recycle;
3181 }
Amir Levy9659e592016-10-27 18:08:27 +03003182 }
3183 sys->ep->wakelock_client =
3184 IPA_WAKELOCK_REF_CLIENT_WAN_RX;
3185 in->ipa_ep_cfg.aggr.aggr_sw_eof_active
3186 = true;
3187 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
3188 IPAERR("get close-by %u\n",
3189 ipa_adjust_ra_buff_base_sz(
3190 in->ipa_ep_cfg.aggr.
3191 aggr_byte_limit));
3192 IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
3193 /* disable ipa_status */
3194 sys->ep->status.
3195 status_en = false;
3196 sys->rx_buff_sz =
3197 IPA_GENERIC_RX_BUFF_SZ(
3198 ipa_adjust_ra_buff_base_sz(
3199 in->ipa_ep_cfg.aggr.
3200 aggr_byte_limit - IPA_HEADROOM));
3201 in->ipa_ep_cfg.aggr.
3202 aggr_byte_limit =
3203 sys->rx_buff_sz < in->
3204 ipa_ep_cfg.aggr.aggr_byte_limit ?
3205 IPA_ADJUST_AGGR_BYTE_LIMIT(
3206 sys->rx_buff_sz) :
3207 IPA_ADJUST_AGGR_BYTE_LIMIT(
3208 in->ipa_ep_cfg.
3209 aggr.aggr_byte_limit);
3210 IPAERR("set aggr_limit %lu\n",
3211 (unsigned long int)
3212 in->ipa_ep_cfg.aggr.
3213 aggr_byte_limit);
3214 } else {
3215 in->ipa_ep_cfg.aggr.
3216 aggr_byte_limit =
3217 IPA_GENERIC_AGGR_BYTE_LIMIT;
3218 in->ipa_ep_cfg.aggr.
3219 aggr_pkt_limit =
3220 IPA_GENERIC_AGGR_PKT_LIMIT;
3221 }
3222 }
3223 } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
3224 IPADBG("assigning policy to client:%d",
3225 in->client);
3226
3227 sys->ep->status.status_en = false;
3228 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3229 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3230 | SPS_O_ACK_TRANSFERS);
3231 sys->sps_callback = ipa_sps_irq_rx_notify;
3232 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3233 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3234 switch_to_intr_rx_work_func);
3235 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3236 replenish_rx_work_func);
3237 atomic_set(&sys->curr_polling_state, 0);
3238 sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
3239 sys->rx_pool_sz = in->desc_fifo_sz /
3240 sizeof(struct sps_iovec) - 1;
3241 if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
3242 sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
3243 sys->pyld_hdlr = NULL;
3244 sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
3245 sys->get_skb = ipa_get_skb_ipa_rx;
3246 sys->free_skb = ipa_free_skb_rx;
3247 in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
3248 sys->ep->wakelock_client =
3249 IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
3250 } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
3251 IPADBG("assigning policy to client:%d",
3252 in->client);
3253
3254 sys->ep->status.status_en = false;
3255 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3256 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3257 | SPS_O_ACK_TRANSFERS);
3258 sys->sps_callback = ipa_sps_irq_rx_notify;
3259 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3260 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3261 switch_to_intr_rx_work_func);
3262 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3263 replenish_rx_work_func);
3264 atomic_set(&sys->curr_polling_state, 0);
3265 sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
3266 sys->rx_pool_sz = in->desc_fifo_sz /
3267 sizeof(struct sps_iovec) - 1;
3268 if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
3269 sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
3270 sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
3271 sys->get_skb = ipa_get_skb_ipa_rx;
3272 sys->free_skb = ipa_free_skb_rx;
3273 sys->repl_hdlr = ipa_replenish_rx_cache;
3274 sys->ep->wakelock_client =
3275 IPA_WAKELOCK_REF_CLIENT_ODU_RX;
3276 } else if (in->client ==
3277 IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
3278 IPADBG("assigning policy to client:%d",
3279 in->client);
3280 sys->ep->status.status_en = false;
3281 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3282 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3283 | SPS_O_ACK_TRANSFERS);
3284 sys->sps_callback = ipa_sps_irq_rx_notify;
3285 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3286 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3287 switch_to_intr_rx_work_func);
3288 } else if (in->client ==
3289 IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
3290 IPADBG("assigning policy to client:%d",
3291 in->client);
3292 sys->ep->status.status_en = false;
3293 sys->policy = IPA_POLICY_NOINTR_MODE;
3294 sys->sps_option = SPS_O_AUTO_ENABLE |
3295 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
3296 } else {
3297 IPAERR("Need to install a RX pipe hdlr\n");
3298 WARN_ON(1);
3299 return -EINVAL;
3300 }
3301 return 0;
3302}
3303
3304static int ipa_assign_policy(struct ipa_sys_connect_params *in,
3305 struct ipa_sys_context *sys)
3306{
3307 if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
3308 sys->policy = IPA_POLICY_INTR_MODE;
3309 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3310 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3311 return 0;
3312 }
3313
3314 if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
3315 if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) {
3316 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3317 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3318 SPS_O_ACK_TRANSFERS);
3319 sys->sps_callback = ipa_sps_irq_tx_notify;
3320 INIT_WORK(&sys->work, ipa_wq_handle_tx);
3321 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3322 switch_to_intr_tx_work_func);
3323 atomic_set(&sys->curr_polling_state, 0);
3324 } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3325 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3326 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3327 SPS_O_ACK_TRANSFERS);
3328 sys->sps_callback = ipa_sps_irq_rx_notify;
3329 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3330 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3331 switch_to_intr_rx_work_func);
3332 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3333 replenish_rx_work_func);
3334 atomic_set(&sys->curr_polling_state, 0);
3335 sys->rx_buff_sz = IPA_RX_SKB_SIZE;
3336 sys->rx_pool_sz = IPA_RX_POOL_CEIL;
3337 sys->pyld_hdlr = ipa_rx_pyld_hdlr;
3338 sys->get_skb = ipa_get_skb_ipa_rx;
3339 sys->free_skb = ipa_free_skb_rx;
3340 sys->repl_hdlr = ipa_replenish_rx_cache;
3341 } else if (IPA_CLIENT_IS_PROD(in->client)) {
3342 sys->policy = IPA_POLICY_INTR_MODE;
3343 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3344 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3345 } else {
3346 IPAERR("Need to install a RX pipe hdlr\n");
3347 WARN_ON(1);
3348 return -EINVAL;
3349 }
3350
3351 return 0;
3352 } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3353 return ipa_assign_policy_v2(in, sys);
3354
3355 IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
3356 WARN_ON(1);
3357 return -EINVAL;
3358}
3359
3360/**
3361 * ipa_tx_client_rx_notify_release() - Callback function
3362 * which will call the user supplied callback function to
3363 * release the skb, or release it on its own if no callback
3364 * function was supplied
3365 *
3366 * @user1: [in] - Data Descriptor
3367 * @user2: [in] - endpoint idx
3368 *
3369 * This notified callback is for the destination client
3370 * This function is supplied in ipa_tx_dp_mul
3371 */
3372static void ipa_tx_client_rx_notify_release(void *user1, int user2)
3373{
3374 struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
3375 int ep_idx = user2;
3376
3377 IPADBG("Received data desc anchor:%p\n", dd);
3378
3379 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3380 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3381
3382 /* wlan host driver waits till tx complete before unload */
3383 IPADBG("ep=%d fifo_desc_free_count=%d\n",
3384 ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc));
3385 IPADBG("calling client notify callback with priv:%p\n",
3386 ipa_ctx->ep[ep_idx].priv);
3387
3388 if (ipa_ctx->ep[ep_idx].client_notify) {
3389 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
3390 IPA_WRITE_DONE, (unsigned long)user1);
3391 ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++;
3392 }
3393}
3394/**
3395 * ipa_tx_client_rx_pkt_status() - Callback function
3396 * which will call the user supplied callback function to
3397 * increase the available fifo descriptor
3398 *
3399 * @user1: [in] - Data Descriptor
3400 * @user2: [in] - endpoint idx
3401 *
3402 * This notified callback is for the destination client
3403 * This function is supplied in ipa_tx_dp_mul
3404 */
3405static void ipa_tx_client_rx_pkt_status(void *user1, int user2)
3406{
3407 int ep_idx = user2;
3408
3409 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3410 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3411}
3412
3413
3414/**
3415 * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets
3416 * @src: [in] - Client that is sending data
3417 * @ipa_tx_data_desc: [in] data descriptors from wlan
3418 *
3419 * this is used for to transfer data descriptors that received
3420 * from WLAN1_PROD pipe to IPA HW
3421 *
3422 * The function will send data descriptors from WLAN1_PROD (one
3423 * at a time) using sps_transfer_one. Will set EOT flag for last
3424 * descriptor Once this send was done from SPS point-of-view the
3425 * IPA driver will get notified by the supplied callback -
3426 * ipa_sps_irq_tx_no_aggr_notify()
3427 *
3428 * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
3429 * callback (from ipa_connect)
3430 *
3431 * Returns: 0 on success, negative on failure
3432 */
3433int ipa2_tx_dp_mul(enum ipa_client_type src,
3434 struct ipa_tx_data_desc *data_desc)
3435{
3436 /* The second byte in wlan header holds qmap id */
3437#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
3438 struct ipa_tx_data_desc *entry;
3439 struct ipa_sys_context *sys;
3440 struct ipa_desc desc = { 0 };
3441 u32 num_desc, cnt;
3442 int ep_idx;
3443
3444 if (unlikely(!ipa_ctx)) {
3445 IPAERR("IPA driver was not initialized\n");
3446 return -EINVAL;
3447 }
3448
3449 IPADBG("Received data desc anchor:%p\n", data_desc);
3450
3451 spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3452
3453 ep_idx = ipa2_get_ep_mapping(src);
3454 if (unlikely(ep_idx == -1)) {
3455 IPAERR("dest EP does not exist.\n");
3456 goto fail_send;
3457 }
3458 IPADBG("ep idx:%d\n", ep_idx);
3459 sys = ipa_ctx->ep[ep_idx].sys;
3460
3461 if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) {
3462 IPAERR("dest EP not valid.\n");
3463 goto fail_send;
3464 }
3465 sys->ep->wstats.rx_hd_rcvd++;
3466
3467 /* Calculate the number of descriptors */
3468 num_desc = 0;
3469 list_for_each_entry(entry, &data_desc->link, link) {
3470 num_desc++;
3471 }
3472 IPADBG("Number of Data Descriptors:%d", num_desc);
3473
3474 if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
3475 IPAERR("Insufficient data descriptors available\n");
3476 goto fail_send;
3477 }
3478
3479 /* Assign callback only for last data descriptor */
3480 cnt = 0;
3481 list_for_each_entry(entry, &data_desc->link, link) {
3482 IPADBG("Parsing data desc :%d\n", cnt);
3483 cnt++;
3484 ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
3485 (u8)sys->ep->cfg.meta.qmap_id;
3486 desc.pyld = entry->pyld_buffer;
3487 desc.len = entry->pyld_len;
3488 desc.type = IPA_DATA_DESC_SKB;
3489 desc.user1 = data_desc;
3490 desc.user2 = ep_idx;
3491 IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
3492 entry->priv, desc.pyld, desc.len);
3493
3494 /* In case of last descriptor populate callback */
3495 if (cnt == num_desc) {
3496 IPADBG("data desc:%p\n", data_desc);
3497 desc.callback = ipa_tx_client_rx_notify_release;
3498 } else {
3499 desc.callback = ipa_tx_client_rx_pkt_status;
3500 }
3501
3502 IPADBG("calling ipa_send_one()\n");
3503 if (ipa_send_one(sys, &desc, true)) {
3504 IPAERR("fail to send skb\n");
3505 sys->ep->wstats.rx_pkt_leak += (cnt-1);
3506 sys->ep->wstats.rx_dp_fail++;
3507 goto fail_send;
3508 }
3509
3510 if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
3511 atomic_dec(&sys->ep->avail_fifo_desc);
3512
3513 sys->ep->wstats.rx_pkts_rcvd++;
3514 IPADBG("ep=%d fifo desc=%d\n",
3515 ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
3516 }
3517
3518 sys->ep->wstats.rx_hd_processed++;
3519 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3520 return 0;
3521
3522fail_send:
3523 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3524 return -EFAULT;
3525
3526}
3527
3528void ipa2_free_skb(struct ipa_rx_data *data)
3529{
3530 struct ipa_rx_pkt_wrapper *rx_pkt;
3531
3532 if (unlikely(!ipa_ctx)) {
3533 IPAERR("IPA driver was not initialized\n");
3534 return;
3535 }
3536
3537 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3538
3539 ipa_ctx->wc_memb.total_tx_pkts_freed++;
3540 rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data);
3541
3542 ipa_skb_recycle(rx_pkt->data.skb);
3543 (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
3544
3545 list_add_tail(&rx_pkt->link,
3546 &ipa_ctx->wc_memb.wlan_comm_desc_list);
3547 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
3548
3549 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3550}
3551
3552
3553/* Functions added to support kernel tests */
3554
3555int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
3556 unsigned long *ipa_bam_hdl,
3557 u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
3558{
3559 struct ipa_ep_context *ep;
3560 int ipa_ep_idx;
3561 int result = -EINVAL;
3562
3563 if (sys_in == NULL || clnt_hdl == NULL) {
3564 IPAERR("NULL args\n");
3565 goto fail_gen;
3566 }
3567
3568 if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) {
3569 IPAERR("NULL args\n");
3570 goto fail_gen;
3571 }
3572 if (sys_in->client >= IPA_CLIENT_MAX) {
3573 IPAERR("bad parm client:%d\n", sys_in->client);
3574 goto fail_gen;
3575 }
3576
3577 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
3578 if (ipa_ep_idx == -1) {
3579 IPAERR("Invalid client :%d\n", sys_in->client);
3580 goto fail_gen;
3581 }
3582
3583 ep = &ipa_ctx->ep[ipa_ep_idx];
3584
3585 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
3586
3587 if (ep->valid == 1) {
3588 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
3589 IPAERR("EP %d already allocated\n", ipa_ep_idx);
3590 goto fail_and_disable_clocks;
3591 } else {
3592 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
3593 &sys_in->ipa_ep_cfg.hdr)) {
3594 IPAERR("fail to configure hdr prop of EP %d\n",
3595 ipa_ep_idx);
3596 result = -EFAULT;
3597 goto fail_and_disable_clocks;
3598 }
3599 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
3600 &sys_in->ipa_ep_cfg.cfg)) {
3601 IPAERR("fail to configure cfg prop of EP %d\n",
3602 ipa_ep_idx);
3603 result = -EFAULT;
3604 goto fail_and_disable_clocks;
3605 }
3606 IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
3607 sys_in->client, ipa_ep_idx, ep->sys);
3608 ep->client_notify = sys_in->notify;
3609 ep->priv = sys_in->priv;
3610 *clnt_hdl = ipa_ep_idx;
3611 if (!ep->keep_ipa_awake)
3612 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3613
3614 return 0;
3615 }
3616 }
3617
3618 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
3619
3620 ep->valid = 1;
3621 ep->client = sys_in->client;
3622 ep->client_notify = sys_in->notify;
3623 ep->priv = sys_in->priv;
3624 ep->keep_ipa_awake = true;
3625
3626 result = ipa_enable_data_path(ipa_ep_idx);
3627 if (result) {
3628 IPAERR("enable data path failed res=%d clnt=%d.\n",
3629 result, ipa_ep_idx);
3630 goto fail_gen2;
3631 }
3632
3633 if (!ep->skip_ep_cfg) {
3634 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
3635 IPAERR("fail to configure EP.\n");
3636 goto fail_gen2;
3637 }
3638 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
3639 IPAERR("fail to configure status of EP.\n");
3640 goto fail_gen2;
3641 }
3642 IPADBG("ep configuration successful\n");
3643 } else {
3644 IPADBG("skipping ep configuration\n");
3645 }
3646
3647 *clnt_hdl = ipa_ep_idx;
3648
3649 *ipa_pipe_num = ipa_ep_idx;
3650 *ipa_bam_hdl = ipa_ctx->bam_handle;
3651
3652 if (!ep->keep_ipa_awake)
3653 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3654
3655 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
3656 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
3657 ipa_ep_idx, ep->sys);
3658
3659 return 0;
3660
3661fail_gen2:
3662fail_and_disable_clocks:
3663 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3664fail_gen:
3665 return result;
3666}
3667
3668int ipa2_sys_teardown(u32 clnt_hdl)
3669{
3670 struct ipa_ep_context *ep;
3671
3672 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3673 ipa_ctx->ep[clnt_hdl].valid == 0) {
3674 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3675 return -EINVAL;
3676 }
3677
3678 ep = &ipa_ctx->ep[clnt_hdl];
3679
3680 if (!ep->keep_ipa_awake)
3681 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3682
3683 ipa_disable_data_path(clnt_hdl);
3684 ep->valid = 0;
3685
3686 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3687
3688 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
3689
3690 return 0;
3691}
3692
3693int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
3694 unsigned long gsi_ev_hdl)
3695{
3696 IPAERR("GSI not supported in IPAv2");
3697 return -EFAULT;
3698}
3699
3700
3701/**
3702 * ipa_adjust_ra_buff_base_sz()
3703 *
3704 * Return value: the largest power of two which is smaller
3705 * than the input value
3706 */
3707static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
3708{
3709 aggr_byte_limit += IPA_MTU;
3710 aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
3711 aggr_byte_limit--;
3712 aggr_byte_limit |= aggr_byte_limit >> 1;
3713 aggr_byte_limit |= aggr_byte_limit >> 2;
3714 aggr_byte_limit |= aggr_byte_limit >> 4;
3715 aggr_byte_limit |= aggr_byte_limit >> 8;
3716 aggr_byte_limit |= aggr_byte_limit >> 16;
3717 aggr_byte_limit++;
3718 return aggr_byte_limit >> 1;
3719}