blob: 02e4a7698bb8f8a5a9c587b937e971b3ffb147f7 [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include "ipa_i.h"
19#include "ipa_trace.h"
20
21#define IPA_LAST_DESC_CNT 0xFFFF
22#define POLLING_INACTIVITY_RX 40
23#define POLLING_INACTIVITY_TX 40
24#define POLLING_MIN_SLEEP_TX 400
25#define POLLING_MAX_SLEEP_TX 500
26/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
27#define IPA_MTU 1500
28#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
29#define IPA_GENERIC_AGGR_TIME_LIMIT 1
30#define IPA_GENERIC_AGGR_PKT_LIMIT 0
31
32#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
33#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
34 (X) + NET_SKB_PAD) +\
35 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
36#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
37 (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
38#define IPA_GENERIC_RX_BUFF_LIMIT (\
39 IPA_REAL_GENERIC_RX_BUFF_SZ(\
40 IPA_GENERIC_RX_BUFF_BASE_SZ) -\
41 IPA_GENERIC_RX_BUFF_BASE_SZ)
42
43#define IPA_RX_BUFF_CLIENT_HEADROOM 256
44
45/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
46#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
47
48#define IPA_WLAN_RX_POOL_SZ 100
49#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
50#define IPA_WLAN_RX_BUFF_SZ 2048
51#define IPA_WLAN_COMM_RX_POOL_LOW 100
52#define IPA_WLAN_COMM_RX_POOL_HIGH 900
53
54#define IPA_ODU_RX_BUFF_SZ 2048
55#define IPA_ODU_RX_POOL_SZ 32
56#define IPA_SIZE_DL_CSUM_META_TRAILER 8
57
58#define IPA_HEADROOM 128
59
60static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags);
61static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys);
62static void ipa_replenish_rx_cache(struct ipa_sys_context *sys);
63static void replenish_rx_work_func(struct work_struct *work);
64static void ipa_wq_handle_rx(struct work_struct *work);
65static void ipa_wq_handle_tx(struct work_struct *work);
66static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size);
67static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys,
68 u32 size);
69static int ipa_assign_policy(struct ipa_sys_connect_params *in,
70 struct ipa_sys_context *sys);
71static void ipa_cleanup_rx(struct ipa_sys_context *sys);
72static void ipa_wq_rx_avail(struct work_struct *work);
73static void ipa_alloc_wlan_rx_common_cache(u32 size);
74static void ipa_cleanup_wlan_rx_common_cache(void);
75static void ipa_wq_repl_rx(struct work_struct *work);
76static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
77 struct sps_iovec *iovec);
78
79static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
80static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
81
82static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
83{
84 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
85 int i;
86
87 for (i = 0; i < cnt; i++) {
88 spin_lock_bh(&sys->spinlock);
89 if (unlikely(list_empty(&sys->head_desc_list))) {
90 spin_unlock_bh(&sys->spinlock);
91 return;
92 }
93 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
94 struct ipa_tx_pkt_wrapper,
95 link);
96 list_del(&tx_pkt_expected->link);
97 sys->len--;
98 spin_unlock_bh(&sys->spinlock);
99 if (!tx_pkt_expected->no_unmap_dma) {
100 if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
101 dma_unmap_single(ipa_ctx->pdev,
102 tx_pkt_expected->mem.phys_base,
103 tx_pkt_expected->mem.size,
104 DMA_TO_DEVICE);
105 } else {
106 dma_unmap_page(ipa_ctx->pdev,
107 tx_pkt_expected->mem.phys_base,
108 tx_pkt_expected->mem.size,
109 DMA_TO_DEVICE);
110 }
111 }
112 if (tx_pkt_expected->callback)
113 tx_pkt_expected->callback(tx_pkt_expected->user1,
114 tx_pkt_expected->user2);
115 if (tx_pkt_expected->cnt > 1 &&
116 tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) {
117 if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) {
118 dma_pool_free(ipa_ctx->dma_pool,
119 tx_pkt_expected->mult.base,
120 tx_pkt_expected->mult.phys_base);
121 } else {
122 dma_unmap_single(ipa_ctx->pdev,
123 tx_pkt_expected->mult.phys_base,
124 tx_pkt_expected->mult.size,
125 DMA_TO_DEVICE);
126 kfree(tx_pkt_expected->mult.base);
127 }
128 }
129 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected);
130 }
131}
132
133static void ipa_wq_write_done_status(int src_pipe)
134{
135 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
136 struct ipa_sys_context *sys;
137 u32 cnt;
138
139 WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes);
140
141 if (!ipa_ctx->ep[src_pipe].status.status_en)
142 return;
143
144 sys = ipa_ctx->ep[src_pipe].sys;
145 if (!sys)
146 return;
147
148 spin_lock_bh(&sys->spinlock);
149 if (unlikely(list_empty(&sys->head_desc_list))) {
150 spin_unlock_bh(&sys->spinlock);
151 return;
152 }
153 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
154 struct ipa_tx_pkt_wrapper,
155 link);
156 cnt = tx_pkt_expected->cnt;
157 spin_unlock_bh(&sys->spinlock);
158 ipa_wq_write_done_common(sys, cnt);
159}
160
161/**
162 * ipa_write_done() - this function will be (eventually) called when a Tx
163 * operation is complete
164 * * @work: work_struct used by the work queue
165 *
166 * Will be called in deferred context.
167 * - invoke the callback supplied by the client who sent this command
168 * - iterate over all packets and validate that
169 * the order for sent packet is the same as expected
170 * - delete all the tx packet descriptors from the system
171 * pipe context (not needed anymore)
172 * - return the tx buffer back to dma_pool
173 */
174static void ipa_wq_write_done(struct work_struct *work)
175{
176 struct ipa_tx_pkt_wrapper *tx_pkt;
177 u32 cnt;
178 struct ipa_sys_context *sys;
179
180 tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
181 cnt = tx_pkt->cnt;
182 sys = tx_pkt->sys;
183
184 ipa_wq_write_done_common(sys, cnt);
185}
186
187static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
188 bool in_poll_state)
189{
190 struct sps_iovec iov;
191 int ret;
192 int cnt = 0;
193
194 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
195 !atomic_read(&sys->curr_polling_state))) {
196 if (cnt && !process_all)
197 break;
198 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
199 if (ret) {
200 IPAERR("sps_get_iovec failed %d\n", ret);
201 break;
202 }
203
204 if (iov.addr == 0)
205 break;
206
207 ipa_wq_write_done_common(sys, 1);
208 cnt++;
209 };
210
211 return cnt;
212}
213
214/**
215 * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
216 */
217static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
218{
219 int ret;
220
221 if (!atomic_read(&sys->curr_polling_state)) {
222 IPAERR("already in intr mode\n");
223 goto fail;
224 }
225
226 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
227 if (ret) {
228 IPAERR("sps_get_config() failed %d\n", ret);
229 goto fail;
230 }
231 sys->event.options = SPS_O_EOT;
232 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
233 if (ret) {
234 IPAERR("sps_register_event() failed %d\n", ret);
235 goto fail;
236 }
237 sys->ep->connect.options =
238 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
239 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
240 if (ret) {
241 IPAERR("sps_set_config() failed %d\n", ret);
242 goto fail;
243 }
244 atomic_set(&sys->curr_polling_state, 0);
245 ipa_handle_tx_core(sys, true, false);
246 return;
247
248fail:
249 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
250 msecs_to_jiffies(1));
251}
252
253static void ipa_handle_tx(struct ipa_sys_context *sys)
254{
255 int inactive_cycles = 0;
256 int cnt;
257
258 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
259 do {
260 cnt = ipa_handle_tx_core(sys, true, true);
261 if (cnt == 0) {
262 inactive_cycles++;
263 usleep_range(POLLING_MIN_SLEEP_TX,
264 POLLING_MAX_SLEEP_TX);
265 } else {
266 inactive_cycles = 0;
267 }
268 } while (inactive_cycles <= POLLING_INACTIVITY_TX);
269
270 ipa_tx_switch_to_intr_mode(sys);
271 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
272}
273
274static void ipa_wq_handle_tx(struct work_struct *work)
275{
276 struct ipa_sys_context *sys;
277
278 sys = container_of(work, struct ipa_sys_context, work);
279
280 ipa_handle_tx(sys);
281}
282
283/**
284 * ipa_send_one() - Send a single descriptor
285 * @sys: system pipe context
286 * @desc: descriptor to send
287 * @in_atomic: whether caller is in atomic context
288 *
289 * - Allocate tx_packet wrapper
290 * - transfer data to the IPA
291 * - after the transfer was done the SPS will
292 * notify the sending user via ipa_sps_irq_comp_tx()
293 *
294 * Return codes: 0: success, -EFAULT: failure
295 */
296int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
297 bool in_atomic)
298{
299 struct ipa_tx_pkt_wrapper *tx_pkt;
300 int result;
301 u16 sps_flags = SPS_IOVEC_FLAG_EOT;
302 dma_addr_t dma_address;
303 u16 len;
304 u32 mem_flag = GFP_ATOMIC;
305 struct sps_iovec iov;
306 int ret;
307
308 if (unlikely(!in_atomic))
309 mem_flag = GFP_KERNEL;
310
311 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
312 if (!tx_pkt) {
313 IPAERR("failed to alloc tx wrapper\n");
314 goto fail_mem_alloc;
315 }
316
317 if (!desc->dma_address_valid) {
318 dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
319 desc->len, DMA_TO_DEVICE);
320 } else {
321 dma_address = desc->dma_address;
322 tx_pkt->no_unmap_dma = true;
323 }
324 if (!dma_address) {
325 IPAERR("failed to DMA wrap\n");
326 goto fail_dma_map;
327 }
328
329 INIT_LIST_HEAD(&tx_pkt->link);
330 tx_pkt->type = desc->type;
331 tx_pkt->cnt = 1; /* only 1 desc in this "set" */
332
333 tx_pkt->mem.phys_base = dma_address;
334 tx_pkt->mem.base = desc->pyld;
335 tx_pkt->mem.size = desc->len;
336 tx_pkt->sys = sys;
337 tx_pkt->callback = desc->callback;
338 tx_pkt->user1 = desc->user1;
339 tx_pkt->user2 = desc->user2;
340
341 /*
342 * Special treatment for immediate commands, where the structure of the
343 * descriptor is different
344 */
345 if (desc->type == IPA_IMM_CMD_DESC) {
346 sps_flags |= SPS_IOVEC_FLAG_IMME;
347 len = desc->opcode;
348 IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
349 desc->opcode, desc->len, sps_flags);
350 IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
351 } else {
352 len = desc->len;
353 }
354
355 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
356
357 spin_lock_bh(&sys->spinlock);
358 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
359 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
360 do {
361 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
362 if (ret) {
363 IPADBG("sps_get_iovec failed %d\n", ret);
364 break;
365 }
366 if ((iov.addr == 0x0) && (iov.size == 0x0))
367 break;
368 } while (1);
369 }
370 result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
371 sps_flags);
372 if (result) {
373 IPAERR("sps_transfer_one failed rc=%d\n", result);
374 goto fail_sps_send;
375 }
376
377 spin_unlock_bh(&sys->spinlock);
378
379 return 0;
380
381fail_sps_send:
382 list_del(&tx_pkt->link);
383 spin_unlock_bh(&sys->spinlock);
384 dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
385fail_dma_map:
386 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
387fail_mem_alloc:
388 return -EFAULT;
389}
390
391/**
392 * ipa_send() - Send multiple descriptors in one HW transaction
393 * @sys: system pipe context
394 * @num_desc: number of packets
395 * @desc: packets to send (may be immediate command or data)
396 * @in_atomic: whether caller is in atomic context
397 *
398 * This function is used for system-to-bam connection.
399 * - SPS driver expect struct sps_transfer which will contain all the data
400 * for a transaction
401 * - ipa_tx_pkt_wrapper will be used for each ipa
402 * descriptor (allocated from wrappers cache)
403 * - The wrapper struct will be configured for each ipa-desc payload and will
404 * contain information which will be later used by the user callbacks
405 * - each transfer will be made by calling to sps_transfer()
406 * - Each packet (command or data) that will be sent will also be saved in
407 * ipa_sys_context for later check that all data was sent
408 *
409 * Return codes: 0: success, -EFAULT: failure
410 */
411int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
412 bool in_atomic)
413{
414 struct ipa_tx_pkt_wrapper *tx_pkt;
415 struct ipa_tx_pkt_wrapper *next_pkt;
416 struct sps_transfer transfer = { 0 };
417 struct sps_iovec *iovec;
418 dma_addr_t dma_addr;
419 int i = 0;
420 int j;
421 int result;
422 int fail_dma_wrap = 0;
423 uint size = num_desc * sizeof(struct sps_iovec);
424 u32 mem_flag = GFP_ATOMIC;
425 struct sps_iovec iov;
426 int ret;
427
428 if (unlikely(!in_atomic))
429 mem_flag = GFP_KERNEL;
430
431 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
432 transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
433 &dma_addr);
434 if (!transfer.iovec) {
435 IPAERR("fail to alloc dma mem for sps xfr buff\n");
436 return -EFAULT;
437 }
438 } else {
439 transfer.iovec = kmalloc(size, mem_flag);
440 if (!transfer.iovec) {
441 IPAERR("fail to alloc mem for sps xfr buff ");
442 IPAERR("num_desc = %d size = %d\n", num_desc, size);
443 return -EFAULT;
444 }
445 dma_addr = dma_map_single(ipa_ctx->pdev,
446 transfer.iovec, size, DMA_TO_DEVICE);
447 if (!dma_addr) {
448 IPAERR("dma_map_single failed for sps xfr buff\n");
449 kfree(transfer.iovec);
450 return -EFAULT;
451 }
452 }
453
454 transfer.iovec_phys = dma_addr;
455 transfer.iovec_count = num_desc;
456 spin_lock_bh(&sys->spinlock);
457
458 for (i = 0; i < num_desc; i++) {
459 fail_dma_wrap = 0;
460 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
461 mem_flag);
462 if (!tx_pkt) {
463 IPAERR("failed to alloc tx wrapper\n");
464 goto failure;
465 }
466 /*
467 * first desc of set is "special" as it holds the count and
468 * other info
469 */
470 if (i == 0) {
471 transfer.user = tx_pkt;
472 tx_pkt->mult.phys_base = dma_addr;
473 tx_pkt->mult.base = transfer.iovec;
474 tx_pkt->mult.size = size;
475 tx_pkt->cnt = num_desc;
476 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
477 }
478
479 iovec = &transfer.iovec[i];
480 iovec->flags = 0;
481
482 INIT_LIST_HEAD(&tx_pkt->link);
483 tx_pkt->type = desc[i].type;
484
485 if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
486 tx_pkt->mem.base = desc[i].pyld;
487 tx_pkt->mem.size = desc[i].len;
488
489 if (!desc[i].dma_address_valid) {
490 tx_pkt->mem.phys_base =
491 dma_map_single(ipa_ctx->pdev,
492 tx_pkt->mem.base,
493 tx_pkt->mem.size,
494 DMA_TO_DEVICE);
495 } else {
496 tx_pkt->mem.phys_base = desc[i].dma_address;
497 tx_pkt->no_unmap_dma = true;
498 }
499 } else {
500 tx_pkt->mem.base = desc[i].frag;
501 tx_pkt->mem.size = desc[i].len;
502
503 if (!desc[i].dma_address_valid) {
504 tx_pkt->mem.phys_base =
505 skb_frag_dma_map(ipa_ctx->pdev,
506 desc[i].frag,
507 0, tx_pkt->mem.size,
508 DMA_TO_DEVICE);
509 } else {
510 tx_pkt->mem.phys_base = desc[i].dma_address;
511 tx_pkt->no_unmap_dma = true;
512 }
513 }
514
515 if (!tx_pkt->mem.phys_base) {
516 IPAERR("failed to alloc tx wrapper\n");
517 fail_dma_wrap = 1;
518 goto failure;
519 }
520
521 tx_pkt->sys = sys;
522 tx_pkt->callback = desc[i].callback;
523 tx_pkt->user1 = desc[i].user1;
524 tx_pkt->user2 = desc[i].user2;
525
526 /*
527 * Point the iovec to the buffer and
528 * add this packet to system pipe context.
529 */
530 iovec->addr = tx_pkt->mem.phys_base;
531 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
532
533 /*
534 * Special treatment for immediate commands, where the structure
535 * of the descriptor is different
536 */
537 if (desc[i].type == IPA_IMM_CMD_DESC) {
538 iovec->size = desc[i].opcode;
539 iovec->flags |= SPS_IOVEC_FLAG_IMME;
540 IPA_DUMP_BUFF(desc[i].pyld,
541 tx_pkt->mem.phys_base, desc[i].len);
542 } else {
543 iovec->size = desc[i].len;
544 }
545
546 if (i == (num_desc - 1)) {
547 iovec->flags |= SPS_IOVEC_FLAG_EOT;
548 /* "mark" the last desc */
549 tx_pkt->cnt = IPA_LAST_DESC_CNT;
550 }
551 }
552
553 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
554 do {
555 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
556 if (ret) {
557 IPADBG("sps_get_iovec failed %d\n", ret);
558 break;
559 }
560 if ((iov.addr == 0x0) && (iov.size == 0x0))
561 break;
562 } while (1);
563 }
564 result = sps_transfer(sys->ep->ep_hdl, &transfer);
565 if (result) {
566 IPAERR("sps_transfer failed rc=%d\n", result);
567 goto failure;
568 }
569
570 spin_unlock_bh(&sys->spinlock);
571 return 0;
572
573failure:
574 tx_pkt = transfer.user;
575 for (j = 0; j < i; j++) {
576 next_pkt = list_next_entry(tx_pkt, link);
577 list_del(&tx_pkt->link);
578 if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
579 dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
580 tx_pkt->mem.size,
581 DMA_TO_DEVICE);
582 } else {
583 dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
584 tx_pkt->mem.size,
585 DMA_TO_DEVICE);
586 }
587 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
588 tx_pkt = next_pkt;
589 }
590 if (j < num_desc)
591 /* last desc failed */
592 if (fail_dma_wrap)
593 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
594 if (transfer.iovec_phys) {
595 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
596 dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
597 transfer.iovec_phys);
598 } else {
599 dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys,
600 size, DMA_TO_DEVICE);
601 kfree(transfer.iovec);
602 }
603 }
604 spin_unlock_bh(&sys->spinlock);
605 return -EFAULT;
606}
607
608/**
609 * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver
610 * after an immediate command is complete.
611 * @user1: pointer to the descriptor of the transfer
612 * @user2:
613 *
614 * Complete the immediate commands completion object, this will release the
615 * thread which waits on this completion object (ipa_send_cmd())
616 */
617static void ipa_sps_irq_cmd_ack(void *user1, int user2)
618{
619 struct ipa_desc *desc = (struct ipa_desc *)user1;
620
621 if (!desc) {
622 IPAERR("desc is NULL\n");
623 WARN_ON(1);
624 return;
625 }
626 IPADBG("got ack for cmd=%d\n", desc->opcode);
627 complete(&desc->xfer_done);
628}
629
630/**
631 * ipa_send_cmd - send immediate commands
632 * @num_desc: number of descriptors within the desc struct
633 * @descr: descriptor structure
634 *
635 * Function will block till command gets ACK from IPA HW, caller needs
636 * to free any resources it allocated after function returns
637 * The callback in ipa_desc should not be set by the caller
638 * for this function.
639 */
640int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
641{
642 struct ipa_desc *desc;
643 int result = 0;
644 struct ipa_sys_context *sys;
645 int ep_idx;
646
647 IPADBG("sending command\n");
648
649 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
650 if (-1 == ep_idx) {
651 IPAERR("Client %u is not mapped\n",
652 IPA_CLIENT_APPS_CMD_PROD);
653 return -EFAULT;
654 }
655 sys = ipa_ctx->ep[ep_idx].sys;
656
657 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
658
659 if (num_desc == 1) {
660 init_completion(&descr->xfer_done);
661
662 if (descr->callback || descr->user1)
663 WARN_ON(1);
664
665 descr->callback = ipa_sps_irq_cmd_ack;
666 descr->user1 = descr;
667 if (ipa_send_one(sys, descr, true)) {
668 IPAERR("fail to send immediate command\n");
669 result = -EFAULT;
670 goto bail;
671 }
672 wait_for_completion(&descr->xfer_done);
673 } else {
674 desc = &descr[num_desc - 1];
675 init_completion(&desc->xfer_done);
676
677 if (desc->callback || desc->user1)
678 WARN_ON(1);
679
680 desc->callback = ipa_sps_irq_cmd_ack;
681 desc->user1 = desc;
682 if (ipa_send(sys, num_desc, descr, true)) {
683 IPAERR("fail to send multiple immediate command set\n");
684 result = -EFAULT;
685 goto bail;
686 }
687 wait_for_completion(&desc->xfer_done);
688 }
689
690bail:
691 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
692 return result;
693}
694
695/**
696 * ipa_sps_irq_tx_notify() - Callback function which will be called by
697 * the SPS driver to start a Tx poll operation.
698 * Called in an interrupt context.
699 * @notify: SPS driver supplied notification struct
700 *
701 * This function defer the work for this event to the tx workqueue.
702 */
703static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
704{
705 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
706 int ret;
707
708 IPADBG("event %d notified\n", notify->event_id);
709
710 switch (notify->event_id) {
711 case SPS_EVENT_EOT:
712 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
713 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
714 if (!atomic_read(&sys->curr_polling_state)) {
715 ret = sps_get_config(sys->ep->ep_hdl,
716 &sys->ep->connect);
717 if (ret) {
718 IPAERR("sps_get_config() failed %d\n", ret);
719 break;
720 }
721 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
722 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
723 ret = sps_set_config(sys->ep->ep_hdl,
724 &sys->ep->connect);
725 if (ret) {
726 IPAERR("sps_set_config() failed %d\n", ret);
727 break;
728 }
729 atomic_set(&sys->curr_polling_state, 1);
730 queue_work(sys->wq, &sys->work);
731 }
732 break;
733 default:
734 IPAERR("received unexpected event id %d\n", notify->event_id);
735 }
736}
737
738/**
739 * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
740 * the SPS driver after a Tx operation is complete.
741 * Called in an interrupt context.
742 * @notify: SPS driver supplied notification struct
743 *
744 * This function defer the work for this event to the tx workqueue.
745 * This event will be later handled by ipa_write_done.
746 */
747static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
748{
749 struct ipa_tx_pkt_wrapper *tx_pkt;
750
751 IPADBG("event %d notified\n", notify->event_id);
752
753 switch (notify->event_id) {
754 case SPS_EVENT_EOT:
755 tx_pkt = notify->data.transfer.user;
756 if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
757 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
758 queue_work(tx_pkt->sys->wq, &tx_pkt->work);
759 break;
760 default:
761 IPAERR("received unexpected event id %d\n", notify->event_id);
762 }
763}
764
765/**
766 * ipa_poll_pkt() - Poll packet from SPS BAM
767 * return 0 to caller on poll successfully
768 * else -EIO
769 *
770 */
771static int ipa_poll_pkt(struct ipa_sys_context *sys,
772 struct sps_iovec *iov)
773{
774 int ret;
775
776 ret = sps_get_iovec(sys->ep->ep_hdl, iov);
777 if (ret) {
778 IPAERR("sps_get_iovec failed %d\n", ret);
779 return ret;
780 }
781
782 if (iov->addr == 0)
783 return -EIO;
784
785 return 0;
786}
787
788/**
789 * ipa_handle_rx_core() - The core functionality of packet reception. This
790 * function is read from multiple code paths.
791 *
792 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
793 * endpoint. The function runs as long as there are packets in the pipe.
794 * For each packet:
795 * - Disconnect the packet from the system pipe linked list
796 * - Unmap the packets skb, make it non DMAable
797 * - Free the packet from the cache
798 * - Prepare a proper skb
799 * - Call the endpoints notify function, passing the skb in the parameters
800 * - Replenish the rx cache
801 */
802static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
803 bool in_poll_state)
804{
805 struct sps_iovec iov;
806 int ret;
807 int cnt = 0;
808
809 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
810 !atomic_read(&sys->curr_polling_state))) {
811 if (cnt && !process_all)
812 break;
813
814 ret = ipa_poll_pkt(sys, &iov);
815 if (ret)
816 break;
817
818 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
819 ipa_dma_memcpy_notify(sys, &iov);
820 else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
821 ipa_wlan_wq_rx_common(sys, iov.size);
822 else
823 ipa_wq_rx_common(sys, iov.size);
824
825 cnt++;
826 };
827
828 return cnt;
829}
830
831/**
832 * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
833 */
834static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
835{
836 int ret;
837
838 if (!sys->ep || !sys->ep->valid) {
839 IPAERR("EP Not Valid, no need to cleanup.\n");
840 return;
841 }
842
843 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
844 if (ret) {
845 IPAERR("sps_get_config() failed %d\n", ret);
846 goto fail;
847 }
848
849 if (!atomic_read(&sys->curr_polling_state) &&
850 ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
851 IPADBG("already in intr mode\n");
852 return;
853 }
854
855 if (!atomic_read(&sys->curr_polling_state)) {
856 IPAERR("already in intr mode\n");
857 goto fail;
858 }
859
860 sys->event.options = SPS_O_EOT;
861 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
862 if (ret) {
863 IPAERR("sps_register_event() failed %d\n", ret);
864 goto fail;
865 }
866 sys->ep->connect.options =
867 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
868 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
869 if (ret) {
870 IPAERR("sps_set_config() failed %d\n", ret);
871 goto fail;
872 }
873 atomic_set(&sys->curr_polling_state, 0);
874 if (!sys->ep->napi_enabled)
875 ipa_handle_rx_core(sys, true, false);
876 ipa_dec_release_wakelock(sys->ep->wakelock_client);
877 return;
878
879fail:
880 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
881 msecs_to_jiffies(1));
882}
883
884
885/**
886 * ipa_sps_irq_control() - Function to enable or disable BAM IRQ.
887 */
888static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
889{
890 int ret;
891
892 /*
893 * Do not change sps config in case we are in polling mode as this
894 * indicates that sps driver already notified EOT event and sps config
895 * should not change until ipa driver processes the packet.
896 */
897 if (atomic_read(&sys->curr_polling_state)) {
898 IPADBG("in polling mode, do not change config\n");
899 return;
900 }
901
902 if (enable) {
903 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
904 if (ret) {
905 IPAERR("sps_get_config() failed %d\n", ret);
906 return;
907 }
908 sys->event.options = SPS_O_EOT;
909 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
910 if (ret) {
911 IPAERR("sps_register_event() failed %d\n", ret);
912 return;
913 }
914 sys->ep->connect.options =
915 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
916 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
917 if (ret) {
918 IPAERR("sps_set_config() failed %d\n", ret);
919 return;
920 }
921 } else {
922 ret = sps_get_config(sys->ep->ep_hdl,
923 &sys->ep->connect);
924 if (ret) {
925 IPAERR("sps_get_config() failed %d\n", ret);
926 return;
927 }
928 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
929 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
930 ret = sps_set_config(sys->ep->ep_hdl,
931 &sys->ep->connect);
932 if (ret) {
933 IPAERR("sps_set_config() failed %d\n", ret);
934 return;
935 }
936 }
937}
938
939void ipa_sps_irq_control_all(bool enable)
940{
941 struct ipa_ep_context *ep;
942 int ipa_ep_idx, client_num;
943
944 IPADBG("\n");
945
946 for (client_num = IPA_CLIENT_CONS;
947 client_num < IPA_CLIENT_MAX; client_num++) {
948 if (!IPA_CLIENT_IS_APPS_CONS(client_num))
949 continue;
950
951 ipa_ep_idx = ipa_get_ep_mapping(client_num);
952 if (ipa_ep_idx == -1) {
953 IPAERR("Invalid client.\n");
954 continue;
955 }
956 ep = &ipa_ctx->ep[ipa_ep_idx];
957 if (!ep->valid) {
958 IPAERR("EP (%d) not allocated.\n", ipa_ep_idx);
959 continue;
960 }
961 ipa_sps_irq_control(ep->sys, enable);
962 }
963}
964
965/**
966 * ipa_rx_notify() - Callback function which is called by the SPS driver when a
967 * a packet is received
968 * @notify: SPS driver supplied notification information
969 *
970 * Called in an interrupt context, therefore the majority of the work is
971 * deffered using a work queue.
972 *
973 * After receiving a packet, the driver goes to polling mode and keeps pulling
974 * packets until the rx buffer is empty, then it goes back to interrupt mode.
975 * This comes to prevent the CPU from handling too many interrupts when the
976 * throughput is high.
977 */
978static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
979{
980 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
981 int ret;
982
983 IPADBG("event %d notified\n", notify->event_id);
984
985 switch (notify->event_id) {
986 case SPS_EVENT_EOT:
987 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
988 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
989
990 if (atomic_read(&sys->curr_polling_state)) {
991 sys->ep->eot_in_poll_err++;
992 break;
993 }
994
995 ret = sps_get_config(sys->ep->ep_hdl,
996 &sys->ep->connect);
997 if (ret) {
998 IPAERR("sps_get_config() failed %d\n", ret);
999 break;
1000 }
1001 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
1002 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1003 ret = sps_set_config(sys->ep->ep_hdl,
1004 &sys->ep->connect);
1005 if (ret) {
1006 IPAERR("sps_set_config() failed %d\n", ret);
1007 break;
1008 }
1009 ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
1010 atomic_set(&sys->curr_polling_state, 1);
1011 trace_intr_to_poll(sys->ep->client);
1012 queue_work(sys->wq, &sys->work);
1013 break;
1014 default:
1015 IPAERR("received unexpected event id %d\n", notify->event_id);
1016 }
1017}
1018
1019static void switch_to_intr_tx_work_func(struct work_struct *work)
1020{
1021 struct delayed_work *dwork;
1022 struct ipa_sys_context *sys;
1023
1024 dwork = container_of(work, struct delayed_work, work);
1025 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1026 ipa_handle_tx(sys);
1027}
1028
1029/**
1030 * ipa_handle_rx() - handle packet reception. This function is executed in the
1031 * context of a work queue.
1032 * @work: work struct needed by the work queue
1033 *
1034 * ipa_handle_rx_core() is run in polling mode. After all packets has been
1035 * received, the driver switches back to interrupt mode.
1036 */
1037static void ipa_handle_rx(struct ipa_sys_context *sys)
1038{
1039 int inactive_cycles = 0;
1040 int cnt;
1041
1042 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1043 do {
1044 cnt = ipa_handle_rx_core(sys, true, true);
1045 if (cnt == 0) {
1046 inactive_cycles++;
1047 trace_idle_sleep_enter(sys->ep->client);
1048 usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
1049 ipa_ctx->ipa_rx_max_timeout_usec);
1050 trace_idle_sleep_exit(sys->ep->client);
1051 } else {
1052 inactive_cycles = 0;
1053 }
1054
1055 /* if pipe is out of buffers there is no point polling for
1056 * completed descs; release the worker so delayed work can
1057 * run in a timely manner
1058 */
1059 if (sys->len == 0)
1060 break;
1061
1062 } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
1063
1064 trace_poll_to_intr(sys->ep->client);
1065 ipa_rx_switch_to_intr_mode(sys);
1066 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1067}
1068
1069/**
1070 * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
1071 * function is exectued in the softirq context
1072 *
1073 * if input budget is zero, the driver switches back to
1074 * interrupt mode
1075 *
1076 * return number of polled packets, on error 0(zero)
1077 */
1078int ipa2_rx_poll(u32 clnt_hdl, int weight)
1079{
1080 struct ipa_ep_context *ep;
1081 int ret;
1082 int cnt = 0;
1083 unsigned int delay = 1;
1084 struct sps_iovec iov;
1085
1086 IPADBG("\n");
1087 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1088 ipa_ctx->ep[clnt_hdl].valid == 0) {
1089 IPAERR("bad parm 0x%x\n", clnt_hdl);
1090 return cnt;
1091 }
1092
1093 ep = &ipa_ctx->ep[clnt_hdl];
1094 while (cnt < weight &&
1095 atomic_read(&ep->sys->curr_polling_state)) {
1096
1097 ret = ipa_poll_pkt(ep->sys, &iov);
1098 if (ret)
1099 break;
1100
1101 ipa_wq_rx_common(ep->sys, iov.size);
1102 cnt += 5;
1103 };
1104
1105 if (cnt == 0) {
1106 ep->inactive_cycles++;
1107 ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
1108
1109 if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
1110 ep->switch_to_intr = true;
1111 delay = 0;
1112 }
1113 queue_delayed_work(ep->sys->wq,
1114 &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
1115 } else
1116 ep->inactive_cycles = 0;
1117
1118 return cnt;
1119}
1120
1121static void switch_to_intr_rx_work_func(struct work_struct *work)
1122{
1123 struct delayed_work *dwork;
1124 struct ipa_sys_context *sys;
1125
1126 dwork = container_of(work, struct delayed_work, work);
1127 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1128
1129 if (sys->ep->napi_enabled) {
1130 if (sys->ep->switch_to_intr) {
1131 ipa_rx_switch_to_intr_mode(sys);
1132 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
1133 sys->ep->switch_to_intr = false;
1134 sys->ep->inactive_cycles = 0;
1135 } else
1136 sys->ep->client_notify(sys->ep->priv,
1137 IPA_CLIENT_START_POLL, 0);
1138 } else
1139 ipa_handle_rx(sys);
1140}
1141
1142/**
1143 * ipa_update_repl_threshold()- Update the repl_threshold for the client.
1144 *
1145 * Return value: None.
1146 */
1147void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
1148{
1149 int ep_idx;
1150 struct ipa_ep_context *ep;
1151
1152 /* Check if ep is valid. */
1153 ep_idx = ipa2_get_ep_mapping(ipa_client);
1154 if (ep_idx == -1) {
1155 IPADBG("Invalid IPA client\n");
1156 return;
1157 }
1158
1159 ep = &ipa_ctx->ep[ep_idx];
1160 if (!ep->valid) {
1161 IPADBG("EP not valid/Not applicable for client.\n");
1162 return;
1163 }
1164 /*
1165 * Determine how many buffers/descriptors remaining will
1166 * cause to drop below the yellow WM bar.
1167 */
1168 ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
1169 / ep->sys->rx_buff_sz;
1170}
1171
1172/**
1173 * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
1174 * IPA EP configuration
1175 * @sys_in: [in] input needed to setup BAM pipe and configure EP
1176 * @clnt_hdl: [out] client handle
1177 *
1178 * - configure the end-point registers with the supplied
1179 * parameters from the user.
1180 * - call SPS APIs to create a system-to-bam connection with IPA.
1181 * - allocate descriptor FIFO
1182 * - register callback function(ipa_sps_irq_rx_notify or
1183 * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
1184 * not configured to pulling mode
1185 *
1186 * Returns: 0 on success, negative on failure
1187 */
1188int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
1189{
1190 struct ipa_ep_context *ep;
1191 int ipa_ep_idx;
1192 int result = -EINVAL;
1193 dma_addr_t dma_addr;
1194 char buff[IPA_RESOURCE_NAME_MAX];
1195 struct iommu_domain *smmu_domain;
1196
1197 if (unlikely(!ipa_ctx)) {
1198 IPAERR("IPA driver was not initialized\n");
1199 return -EINVAL;
1200 }
1201
1202 if (sys_in == NULL || clnt_hdl == NULL) {
1203 IPAERR("NULL args\n");
1204 goto fail_gen;
1205 }
1206
1207 if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
1208 IPAERR("bad parm client:%d fifo_sz:%d\n",
1209 sys_in->client, sys_in->desc_fifo_sz);
1210 goto fail_gen;
1211 }
1212
1213 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
1214 if (ipa_ep_idx == -1) {
1215 IPAERR("Invalid client.\n");
1216 goto fail_gen;
1217 }
1218
1219 ep = &ipa_ctx->ep[ipa_ep_idx];
1220
1221 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
1222
1223 if (ep->valid == 1) {
1224 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
1225 IPAERR("EP already allocated.\n");
1226 goto fail_and_disable_clocks;
1227 } else {
1228 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
1229 &sys_in->ipa_ep_cfg.hdr)) {
1230 IPAERR("fail to configure hdr prop of EP.\n");
1231 result = -EFAULT;
1232 goto fail_and_disable_clocks;
1233 }
1234 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
1235 &sys_in->ipa_ep_cfg.cfg)) {
1236 IPAERR("fail to configure cfg prop of EP.\n");
1237 result = -EFAULT;
1238 goto fail_and_disable_clocks;
1239 }
1240 IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
1241 sys_in->client, ipa_ep_idx, ep->sys);
1242 ep->client_notify = sys_in->notify;
1243 ep->priv = sys_in->priv;
1244 *clnt_hdl = ipa_ep_idx;
1245 if (!ep->keep_ipa_awake)
1246 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1247
1248 return 0;
1249 }
1250 }
1251
1252 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
1253
1254 if (!ep->sys) {
1255 ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL);
1256 if (!ep->sys) {
1257 IPAERR("failed to sys ctx for client %d\n",
1258 sys_in->client);
1259 result = -ENOMEM;
1260 goto fail_and_disable_clocks;
1261 }
1262
1263 ep->sys->ep = ep;
1264 snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
1265 sys_in->client);
1266 ep->sys->wq = alloc_workqueue(buff,
1267 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1268 if (!ep->sys->wq) {
1269 IPAERR("failed to create wq for client %d\n",
1270 sys_in->client);
1271 result = -EFAULT;
1272 goto fail_wq;
1273 }
1274
1275 snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
1276 sys_in->client);
1277 ep->sys->repl_wq = alloc_workqueue(buff,
1278 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1279 if (!ep->sys->repl_wq) {
1280 IPAERR("failed to create rep wq for client %d\n",
1281 sys_in->client);
1282 result = -EFAULT;
1283 goto fail_wq2;
1284 }
1285
1286 INIT_LIST_HEAD(&ep->sys->head_desc_list);
1287 INIT_LIST_HEAD(&ep->sys->rcycl_list);
1288 spin_lock_init(&ep->sys->spinlock);
1289 } else {
1290 memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
1291 }
1292
1293 ep->skip_ep_cfg = sys_in->skip_ep_cfg;
1294 if (ipa_assign_policy(sys_in, ep->sys)) {
1295 IPAERR("failed to sys ctx for client %d\n", sys_in->client);
1296 result = -ENOMEM;
1297 goto fail_gen2;
1298 }
1299
1300 ep->valid = 1;
1301 ep->client = sys_in->client;
1302 ep->client_notify = sys_in->notify;
1303 ep->napi_enabled = sys_in->napi_enabled;
1304 ep->priv = sys_in->priv;
1305 ep->keep_ipa_awake = sys_in->keep_ipa_awake;
1306 atomic_set(&ep->avail_fifo_desc,
1307 ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
1308
1309 if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
1310 ep->sys->status_stat == NULL) {
1311 ep->sys->status_stat =
1312 kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL);
1313 if (!ep->sys->status_stat) {
1314 IPAERR("no memory\n");
1315 goto fail_gen2;
1316 }
1317 }
1318
1319 result = ipa_enable_data_path(ipa_ep_idx);
1320 if (result) {
1321 IPAERR("enable data path failed res=%d clnt=%d.\n", result,
1322 ipa_ep_idx);
1323 goto fail_gen2;
1324 }
1325
1326 if (!ep->skip_ep_cfg) {
1327 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
1328 IPAERR("fail to configure EP.\n");
1329 goto fail_gen2;
1330 }
1331 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
1332 IPAERR("fail to configure status of EP.\n");
1333 goto fail_gen2;
1334 }
1335 IPADBG("ep configuration successful\n");
1336 } else {
1337 IPADBG("skipping ep configuration\n");
1338 }
1339
1340 /* Default Config */
1341 ep->ep_hdl = sps_alloc_endpoint();
1342 if (ep->ep_hdl == NULL) {
1343 IPAERR("SPS EP allocation failed.\n");
1344 goto fail_gen2;
1345 }
1346
1347 result = sps_get_config(ep->ep_hdl, &ep->connect);
1348 if (result) {
1349 IPAERR("fail to get config.\n");
1350 goto fail_sps_cfg;
1351 }
1352
1353 /* Specific Config */
1354 if (IPA_CLIENT_IS_CONS(sys_in->client)) {
1355 ep->connect.mode = SPS_MODE_SRC;
1356 ep->connect.destination = SPS_DEV_HANDLE_MEM;
1357 ep->connect.source = ipa_ctx->bam_handle;
1358 ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++;
1359 ep->connect.src_pipe_index = ipa_ep_idx;
1360 /*
1361 * Determine how many buffers/descriptors remaining will
1362 * cause to drop below the yellow WM bar.
1363 */
1364 ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
1365 / ep->sys->rx_buff_sz;
1366 /* Only when the WAN pipes are setup, actual threshold will
1367 * be read from the register. So update LAN_CONS ep again with
1368 * right value.
1369 */
1370 if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS)
1371 ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS);
1372 } else {
1373 ep->connect.mode = SPS_MODE_DEST;
1374 ep->connect.source = SPS_DEV_HANDLE_MEM;
1375 ep->connect.destination = ipa_ctx->bam_handle;
1376 ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++;
1377 ep->connect.dest_pipe_index = ipa_ep_idx;
1378 }
1379
1380 IPADBG("client:%d ep:%d",
1381 sys_in->client, ipa_ep_idx);
1382
1383 IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
1384 ep->connect.dest_pipe_index,
1385 ep->connect.src_pipe_index);
1386
1387 ep->connect.options = ep->sys->sps_option;
1388 ep->connect.desc.size = sys_in->desc_fifo_sz;
1389 ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
1390 ep->connect.desc.size, &dma_addr, GFP_KERNEL);
1391 if (ipa_ctx->smmu_s1_bypass) {
1392 ep->connect.desc.phys_base = dma_addr;
1393 } else {
1394 ep->connect.desc.iova = dma_addr;
1395 smmu_domain = ipa2_get_smmu_domain();
1396 if (smmu_domain != NULL) {
1397 ep->connect.desc.phys_base =
1398 iommu_iova_to_phys(smmu_domain, dma_addr);
1399 }
1400 }
1401 if (ep->connect.desc.base == NULL) {
1402 IPAERR("fail to get DMA desc memory.\n");
1403 goto fail_sps_cfg;
1404 }
1405
1406 ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
1407
1408 result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client);
1409 if (result) {
1410 IPAERR("sps_connect fails.\n");
1411 goto fail_sps_connect;
1412 }
1413
1414 ep->sys->event.options = SPS_O_EOT;
1415 ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
1416 ep->sys->event.xfer_done = NULL;
1417 ep->sys->event.user = ep->sys;
1418 ep->sys->event.callback = ep->sys->sps_callback;
1419 result = sps_register_event(ep->ep_hdl, &ep->sys->event);
1420 if (result < 0) {
1421 IPAERR("register event error %d\n", result);
1422 goto fail_register_event;
1423 }
1424
1425 *clnt_hdl = ipa_ep_idx;
1426
1427 if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
1428 ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
1429 ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
1430 sizeof(void *), GFP_KERNEL);
1431 if (!ep->sys->repl.cache) {
1432 IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
1433 ep->sys->repl_hdlr = ipa_replenish_rx_cache;
1434 ep->sys->repl.capacity = 0;
1435 } else {
1436 atomic_set(&ep->sys->repl.head_idx, 0);
1437 atomic_set(&ep->sys->repl.tail_idx, 0);
1438 ipa_wq_repl_rx(&ep->sys->repl_work);
1439 }
1440 }
1441
1442 if (IPA_CLIENT_IS_CONS(sys_in->client))
1443 ipa_replenish_rx_cache(ep->sys);
1444
1445 if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
1446 ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
1447 atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
1448 }
1449
1450 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
1451 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
1452 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1453 sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1454 IPADBG("modem cfg emb pipe flt\n");
1455 else
1456 ipa_install_dflt_flt_rules(ipa_ep_idx);
1457 }
1458
1459 if (!ep->keep_ipa_awake)
1460 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1461
1462 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
1463 ipa_ep_idx, ep->sys);
1464
1465 return 0;
1466
1467fail_register_event:
1468 sps_disconnect(ep->ep_hdl);
1469fail_sps_connect:
1470 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1471 ep->connect.desc.base,
1472 ep->connect.desc.phys_base);
1473fail_sps_cfg:
1474 sps_free_endpoint(ep->ep_hdl);
1475fail_gen2:
1476 destroy_workqueue(ep->sys->repl_wq);
1477fail_wq2:
1478 destroy_workqueue(ep->sys->wq);
1479fail_wq:
1480 kfree(ep->sys);
1481 memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
1482fail_and_disable_clocks:
1483 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1484fail_gen:
1485 return result;
1486}
1487
1488/**
1489 * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
1490 * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe
1491 *
1492 * Returns: 0 on success, negative on failure
1493 */
1494int ipa2_teardown_sys_pipe(u32 clnt_hdl)
1495{
1496 struct ipa_ep_context *ep;
1497 int empty;
1498
1499 if (unlikely(!ipa_ctx)) {
1500 IPAERR("IPA driver was not initialized\n");
1501 return -EINVAL;
1502 }
1503
1504 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1505 ipa_ctx->ep[clnt_hdl].valid == 0) {
1506 IPAERR("bad parm.\n");
1507 return -EINVAL;
1508 }
1509
1510 ep = &ipa_ctx->ep[clnt_hdl];
1511
1512 if (!ep->keep_ipa_awake)
1513 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
1514
1515 ipa_disable_data_path(clnt_hdl);
1516 if (ep->napi_enabled) {
1517 ep->switch_to_intr = true;
1518 do {
1519 usleep_range(95, 105);
1520 } while (atomic_read(&ep->sys->curr_polling_state));
1521 }
1522
1523 if (IPA_CLIENT_IS_PROD(ep->client)) {
1524 do {
1525 spin_lock_bh(&ep->sys->spinlock);
1526 empty = list_empty(&ep->sys->head_desc_list);
1527 spin_unlock_bh(&ep->sys->spinlock);
1528 if (!empty)
1529 usleep_range(95, 105);
1530 else
1531 break;
1532 } while (1);
1533 }
1534
1535 if (IPA_CLIENT_IS_CONS(ep->client)) {
1536 cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
1537 cancel_delayed_work_sync(&ep->sys->switch_to_intr_work);
1538 }
1539
1540 flush_workqueue(ep->sys->wq);
1541 sps_disconnect(ep->ep_hdl);
1542 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1543 ep->connect.desc.base,
1544 ep->connect.desc.phys_base);
1545 sps_free_endpoint(ep->ep_hdl);
1546 if (ep->sys->repl_wq)
1547 flush_workqueue(ep->sys->repl_wq);
1548 if (IPA_CLIENT_IS_CONS(ep->client))
1549 ipa_cleanup_rx(ep->sys);
1550
1551 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
1552 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1553 ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1554 IPADBG("modem cfg emb pipe flt\n");
1555 else
1556 ipa_delete_dflt_flt_rules(clnt_hdl);
1557 }
1558
1559 if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
1560 atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt);
1561
1562 memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats));
1563
1564 if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
1565 ipa_cleanup_wlan_rx_common_cache();
1566
1567 ep->valid = 0;
1568 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
1569
1570 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
1571
1572 return 0;
1573}
1574
1575/**
1576 * ipa_tx_comp_usr_notify_release() - Callback function which will call the
1577 * user supplied callback function to release the skb, or release it on
1578 * its own if no callback function was supplied.
1579 * @user1
1580 * @user2
1581 *
1582 * This notified callback is for the destination client.
1583 * This function is supplied in ipa_connect.
1584 */
1585static void ipa_tx_comp_usr_notify_release(void *user1, int user2)
1586{
1587 struct sk_buff *skb = (struct sk_buff *)user1;
1588 int ep_idx = user2;
1589
1590 IPADBG("skb=%p ep=%d\n", skb, ep_idx);
1591
1592 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl);
1593
1594 if (ipa_ctx->ep[ep_idx].client_notify)
1595 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
1596 IPA_WRITE_DONE, (unsigned long)skb);
1597 else
1598 dev_kfree_skb_any(skb);
1599}
1600
1601static void ipa_tx_cmd_comp(void *user1, int user2)
1602{
1603 kfree(user1);
1604}
1605
1606/**
1607 * ipa2_tx_dp() - Data-path tx handler
1608 * @dst: [in] which IPA destination to route tx packets to
1609 * @skb: [in] the packet to send
1610 * @metadata: [in] TX packet meta-data
1611 *
1612 * Data-path tx handler, this is used for both SW data-path which by-passes most
1613 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
1614 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
1615 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
1616 * is an error. For errors, client needs to free the skb as needed. For success,
1617 * IPA driver will later invoke client callback if one was supplied. That
1618 * callback should free the skb. If no callback supplied, IPA driver will free
1619 * the skb internally
1620 *
1621 * The function will use two descriptors for this send command
1622 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
1623 * the first descriptor will be used to inform the IPA hardware that
1624 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
1625 * Once this send was done from SPS point-of-view the IPA driver will
1626 * get notified by the supplied callback - ipa_sps_irq_tx_comp()
1627 *
1628 * ipa_sps_irq_tx_comp will call to the user supplied
1629 * callback (from ipa_connect)
1630 *
1631 * Returns: 0 on success, negative on failure
1632 */
1633int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
1634 struct ipa_tx_meta *meta)
1635{
1636 struct ipa_desc *desc;
1637 struct ipa_desc _desc[2];
1638 int dst_ep_idx;
1639 struct ipa_ip_packet_init *cmd;
1640 struct ipa_sys_context *sys;
1641 int src_ep_idx;
1642 int num_frags, f;
1643
1644 if (unlikely(!ipa_ctx)) {
1645 IPAERR("IPA driver was not initialized\n");
1646 return -EINVAL;
1647 }
1648
1649 if (skb->len == 0) {
1650 IPAERR("packet size is 0\n");
1651 return -EINVAL;
1652 }
1653
1654 num_frags = skb_shinfo(skb)->nr_frags;
1655 if (num_frags) {
1656 /* 1 desc is needed for the linear portion of skb;
1657 * 1 desc may be needed for the PACKET_INIT;
1658 * 1 desc for each frag
1659 */
1660 desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
1661 if (!desc) {
1662 IPAERR("failed to alloc desc array\n");
1663 goto fail_mem;
1664 }
1665 } else {
1666 memset(_desc, 0, 2 * sizeof(struct ipa_desc));
1667 desc = &_desc[0];
1668 }
1669
1670 /*
1671 * USB_CONS: PKT_INIT ep_idx = dst pipe
1672 * Q6_CONS: PKT_INIT ep_idx = sender pipe
1673 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
1674 *
1675 * LAN TX: all PKT_INIT
1676 * WAN TX: PKT_INIT (cmd) + HW (data)
1677 *
1678 */
1679 if (IPA_CLIENT_IS_CONS(dst)) {
1680 src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1681 if (-1 == src_ep_idx) {
1682 IPAERR("Client %u is not mapped\n",
1683 IPA_CLIENT_APPS_LAN_WAN_PROD);
1684 goto fail_gen;
1685 }
1686 dst_ep_idx = ipa2_get_ep_mapping(dst);
1687 } else {
1688 src_ep_idx = ipa2_get_ep_mapping(dst);
1689 if (-1 == src_ep_idx) {
1690 IPAERR("Client %u is not mapped\n", dst);
1691 goto fail_gen;
1692 }
1693 if (meta && meta->pkt_init_dst_ep_valid)
1694 dst_ep_idx = meta->pkt_init_dst_ep;
1695 else
1696 dst_ep_idx = -1;
1697 }
1698
1699 sys = ipa_ctx->ep[src_ep_idx].sys;
1700
1701 if (!sys->ep->valid) {
1702 IPAERR("pipe not valid\n");
1703 goto fail_gen;
1704 }
1705
1706 if (dst_ep_idx != -1) {
1707 /* SW data path */
1708 cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
1709 if (!cmd) {
1710 IPAERR("failed to alloc immediate command object\n");
1711 goto fail_gen;
1712 }
1713
1714 cmd->destination_pipe_index = dst_ep_idx;
1715 desc[0].opcode = IPA_IP_PACKET_INIT;
1716 desc[0].pyld = cmd;
1717 desc[0].len = sizeof(struct ipa_ip_packet_init);
1718 desc[0].type = IPA_IMM_CMD_DESC;
1719 desc[0].callback = ipa_tx_cmd_comp;
1720 desc[0].user1 = cmd;
1721 desc[1].pyld = skb->data;
1722 desc[1].len = skb_headlen(skb);
1723 desc[1].type = IPA_DATA_DESC_SKB;
1724 desc[1].callback = ipa_tx_comp_usr_notify_release;
1725 desc[1].user1 = skb;
1726 desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid &&
1727 meta->pkt_init_dst_ep_remote) ?
1728 src_ep_idx :
1729 dst_ep_idx;
1730 if (meta && meta->dma_address_valid) {
1731 desc[1].dma_address_valid = true;
1732 desc[1].dma_address = meta->dma_address;
1733 }
1734
1735 for (f = 0; f < num_frags; f++) {
1736 desc[2+f].frag = &skb_shinfo(skb)->frags[f];
1737 desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
1738 desc[2+f].len = skb_frag_size(desc[2+f].frag);
1739 }
1740
1741 /* don't free skb till frag mappings are released */
1742 if (num_frags) {
1743 desc[2+f-1].callback = desc[1].callback;
1744 desc[2+f-1].user1 = desc[1].user1;
1745 desc[2+f-1].user2 = desc[1].user2;
1746 desc[1].callback = NULL;
1747 }
1748
1749 if (ipa_send(sys, num_frags + 2, desc, true)) {
1750 IPAERR("fail to send skb %p num_frags %u SWP\n",
1751 skb, num_frags);
1752 goto fail_send;
1753 }
1754 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
1755 } else {
1756 /* HW data path */
1757 desc[0].pyld = skb->data;
1758 desc[0].len = skb_headlen(skb);
1759 desc[0].type = IPA_DATA_DESC_SKB;
1760 desc[0].callback = ipa_tx_comp_usr_notify_release;
1761 desc[0].user1 = skb;
1762 desc[0].user2 = src_ep_idx;
1763
1764 if (meta && meta->dma_address_valid) {
1765 desc[0].dma_address_valid = true;
1766 desc[0].dma_address = meta->dma_address;
1767 }
1768
1769 if (num_frags == 0) {
1770 if (ipa_send_one(sys, desc, true)) {
1771 IPAERR("fail to send skb %p HWP\n", skb);
1772 goto fail_gen;
1773 }
1774 } else {
1775 for (f = 0; f < num_frags; f++) {
1776 desc[1+f].frag = &skb_shinfo(skb)->frags[f];
1777 desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
1778 desc[1+f].len = skb_frag_size(desc[1+f].frag);
1779 }
1780
1781 /* don't free skb till frag mappings are released */
1782 desc[1+f-1].callback = desc[0].callback;
1783 desc[1+f-1].user1 = desc[0].user1;
1784 desc[1+f-1].user2 = desc[0].user2;
1785 desc[0].callback = NULL;
1786
1787 if (ipa_send(sys, num_frags + 1, desc, true)) {
1788 IPAERR("fail to send skb %p num_frags %u HWP\n",
1789 skb, num_frags);
1790 goto fail_gen;
1791 }
1792 }
1793
1794 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
1795 }
1796
1797 if (num_frags) {
1798 kfree(desc);
1799 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
1800 }
1801
1802 return 0;
1803
1804fail_send:
1805 kfree(cmd);
1806fail_gen:
1807 if (num_frags)
1808 kfree(desc);
1809fail_mem:
1810 return -EFAULT;
1811}
1812
1813static void ipa_wq_handle_rx(struct work_struct *work)
1814{
1815 struct ipa_sys_context *sys;
1816
1817 sys = container_of(work, struct ipa_sys_context, work);
1818
1819 if (sys->ep->napi_enabled) {
1820 IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
1821 sys->ep->client_notify(sys->ep->priv,
1822 IPA_CLIENT_START_POLL, 0);
1823 } else
1824 ipa_handle_rx(sys);
1825}
1826
1827static void ipa_wq_repl_rx(struct work_struct *work)
1828{
1829 struct ipa_sys_context *sys;
1830 void *ptr;
1831 struct ipa_rx_pkt_wrapper *rx_pkt;
1832 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
1833 u32 next;
1834 u32 curr;
1835
1836 sys = container_of(work, struct ipa_sys_context, repl_work);
1837 curr = atomic_read(&sys->repl.tail_idx);
1838
1839begin:
1840 while (1) {
1841 next = (curr + 1) % sys->repl.capacity;
1842 if (next == atomic_read(&sys->repl.head_idx))
1843 goto fail_kmem_cache_alloc;
1844
1845 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
1846 flag);
1847 if (!rx_pkt) {
1848 pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
1849 __func__, sys);
1850 goto fail_kmem_cache_alloc;
1851 }
1852
1853 INIT_LIST_HEAD(&rx_pkt->link);
1854 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
1855 rx_pkt->sys = sys;
1856
1857 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1858 if (rx_pkt->data.skb == NULL) {
1859 pr_err_ratelimited("%s fail alloc skb sys=%p\n",
1860 __func__, sys);
1861 goto fail_skb_alloc;
1862 }
1863 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1864 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
1865 sys->rx_buff_sz,
1866 DMA_FROM_DEVICE);
1867 if (rx_pkt->data.dma_addr == 0 ||
1868 rx_pkt->data.dma_addr == ~0) {
1869 pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
1870 __func__, (void *)rx_pkt->data.dma_addr,
1871 ptr, sys);
1872 goto fail_dma_mapping;
1873 }
1874
1875 sys->repl.cache[curr] = rx_pkt;
1876 curr = next;
1877 /* ensure write is done before setting tail index */
1878 mb();
1879 atomic_set(&sys->repl.tail_idx, next);
1880 }
1881
1882 return;
1883
1884fail_dma_mapping:
1885 sys->free_skb(rx_pkt->data.skb);
1886fail_skb_alloc:
1887 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1888fail_kmem_cache_alloc:
1889 if (atomic_read(&sys->repl.tail_idx) ==
1890 atomic_read(&sys->repl.head_idx)) {
1891 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1892 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty);
1893 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1894 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty);
1895 else
1896 WARN_ON(1);
1897 pr_err_ratelimited("%s sys=%p repl ring empty\n",
1898 __func__, sys);
1899 goto begin;
1900 }
1901}
1902
1903static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys)
1904{
1905 struct ipa_rx_pkt_wrapper *rx_pkt = NULL;
1906 struct ipa_rx_pkt_wrapper *tmp;
1907 int ret;
1908 u32 rx_len_cached = 0;
1909
1910 IPADBG("\n");
1911
1912 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1913 rx_len_cached = sys->len;
1914
1915 if (rx_len_cached < sys->rx_pool_sz) {
1916 list_for_each_entry_safe(rx_pkt, tmp,
1917 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1918 list_del(&rx_pkt->link);
1919
1920 if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0)
1921 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1922
1923 INIT_LIST_HEAD(&rx_pkt->link);
1924 rx_pkt->len = 0;
1925 rx_pkt->sys = sys;
1926
1927 ret = sps_transfer_one(sys->ep->ep_hdl,
1928 rx_pkt->data.dma_addr,
1929 IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
1930
1931 if (ret) {
1932 IPAERR("sps_transfer_one failed %d\n", ret);
1933 goto fail_sps_transfer;
1934 }
1935
1936 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1937 rx_len_cached = ++sys->len;
1938
1939 if (rx_len_cached >= sys->rx_pool_sz) {
1940 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1941 return;
1942 }
1943 }
1944 }
1945 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1946
1947 if (rx_len_cached < sys->rx_pool_sz &&
1948 ipa_ctx->wc_memb.wlan_comm_total_cnt <
1949 IPA_WLAN_COMM_RX_POOL_HIGH) {
1950 ipa_replenish_rx_cache(sys);
1951 ipa_ctx->wc_memb.wlan_comm_total_cnt +=
1952 (sys->rx_pool_sz - rx_len_cached);
1953 }
1954
1955 return;
1956
1957fail_sps_transfer:
1958 list_del(&rx_pkt->link);
1959 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1960}
1961
1962static void ipa_cleanup_wlan_rx_common_cache(void)
1963{
1964 struct ipa_rx_pkt_wrapper *rx_pkt;
1965 struct ipa_rx_pkt_wrapper *tmp;
1966
1967 list_for_each_entry_safe(rx_pkt, tmp,
1968 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1969 list_del(&rx_pkt->link);
1970 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
1971 IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
1972 dev_kfree_skb_any(rx_pkt->data.skb);
1973 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1974 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1975 ipa_ctx->wc_memb.wlan_comm_total_cnt--;
1976 }
1977 ipa_ctx->wc_memb.total_tx_pkts_freed = 0;
1978
1979 if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0)
1980 IPAERR("wlan comm buff free cnt: %d\n",
1981 ipa_ctx->wc_memb.wlan_comm_free_cnt);
1982
1983 if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0)
1984 IPAERR("wlan comm buff total cnt: %d\n",
1985 ipa_ctx->wc_memb.wlan_comm_total_cnt);
1986
1987}
1988
1989static void ipa_alloc_wlan_rx_common_cache(u32 size)
1990{
1991 void *ptr;
1992 struct ipa_rx_pkt_wrapper *rx_pkt;
1993 int rx_len_cached = 0;
1994 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
1995 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
1996
1997 rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt;
1998 while (rx_len_cached < size) {
1999 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2000 flag);
2001 if (!rx_pkt) {
2002 IPAERR("failed to alloc rx wrapper\n");
2003 goto fail_kmem_cache_alloc;
2004 }
2005
2006 INIT_LIST_HEAD(&rx_pkt->link);
2007 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2008
2009 rx_pkt->data.skb =
2010 ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
2011 flag);
2012 if (rx_pkt->data.skb == NULL) {
2013 IPAERR("failed to alloc skb\n");
2014 goto fail_skb_alloc;
2015 }
2016 ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
2017 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2018 IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
2019 if (rx_pkt->data.dma_addr == 0 ||
2020 rx_pkt->data.dma_addr == ~0) {
2021 IPAERR("dma_map_single failure %p for %p\n",
2022 (void *)rx_pkt->data.dma_addr, ptr);
2023 goto fail_dma_mapping;
2024 }
2025
2026 list_add_tail(&rx_pkt->link,
2027 &ipa_ctx->wc_memb.wlan_comm_desc_list);
2028 rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
2029
2030 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
2031
2032 }
2033
2034 return;
2035
2036fail_dma_mapping:
2037 dev_kfree_skb_any(rx_pkt->data.skb);
2038fail_skb_alloc:
2039 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2040fail_kmem_cache_alloc:
2041 return;
2042}
2043
2044
2045/**
2046 * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
2047 *
2048 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
2049 * are IPA_RX_POOL_CEIL buffers in the cache.
2050 * - Allocate a buffer in the cache
2051 * - Initialized the packets link
2052 * - Initialize the packets work struct
2053 * - Allocate the packets socket buffer (skb)
2054 * - Fill the packets skb with data
2055 * - Make the packet DMAable
2056 * - Add the packet to the system pipe linked list
2057 * - Initiate a SPS transfer so that SPS driver will use this packet later.
2058 */
2059static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
2060{
2061 void *ptr;
2062 struct ipa_rx_pkt_wrapper *rx_pkt;
2063 int ret;
2064 int rx_len_cached = 0;
2065 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2066 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2067
2068 rx_len_cached = sys->len;
2069
2070 while (rx_len_cached < sys->rx_pool_sz) {
2071 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2072 flag);
2073 if (!rx_pkt) {
2074 IPAERR("failed to alloc rx wrapper\n");
2075 goto fail_kmem_cache_alloc;
2076 }
2077
2078 INIT_LIST_HEAD(&rx_pkt->link);
2079 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2080 rx_pkt->sys = sys;
2081
2082 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
2083 if (rx_pkt->data.skb == NULL) {
2084 IPAERR("failed to alloc skb\n");
2085 goto fail_skb_alloc;
2086 }
2087 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2088 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2089 sys->rx_buff_sz,
2090 DMA_FROM_DEVICE);
2091 if (rx_pkt->data.dma_addr == 0 ||
2092 rx_pkt->data.dma_addr == ~0) {
2093 IPAERR("dma_map_single failure %p for %p\n",
2094 (void *)rx_pkt->data.dma_addr, ptr);
2095 goto fail_dma_mapping;
2096 }
2097
2098 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2099 rx_len_cached = ++sys->len;
2100
2101 ret = sps_transfer_one(sys->ep->ep_hdl,
2102 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2103
2104 if (ret) {
2105 IPAERR("sps_transfer_one failed %d\n", ret);
2106 goto fail_sps_transfer;
2107 }
2108 }
2109
2110 return;
2111
2112fail_sps_transfer:
2113 list_del(&rx_pkt->link);
2114 rx_len_cached = --sys->len;
2115 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2116 sys->rx_buff_sz, DMA_FROM_DEVICE);
2117fail_dma_mapping:
2118 sys->free_skb(rx_pkt->data.skb);
2119fail_skb_alloc:
2120 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2121fail_kmem_cache_alloc:
2122 if (rx_len_cached == 0)
2123 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2124 msecs_to_jiffies(1));
2125}
2126
2127static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
2128{
2129 void *ptr;
2130 struct ipa_rx_pkt_wrapper *rx_pkt;
2131 int ret;
2132 int rx_len_cached = 0;
2133
2134 rx_len_cached = sys->len;
2135
2136 while (rx_len_cached < sys->rx_pool_sz) {
2137 spin_lock_bh(&sys->spinlock);
2138 if (list_empty(&sys->rcycl_list))
2139 goto fail_kmem_cache_alloc;
2140
2141 rx_pkt = list_first_entry(&sys->rcycl_list,
2142 struct ipa_rx_pkt_wrapper, link);
2143 list_del(&rx_pkt->link);
2144 spin_unlock_bh(&sys->spinlock);
2145 INIT_LIST_HEAD(&rx_pkt->link);
2146 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2147 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
2148 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
2149 if (rx_pkt->data.dma_addr == 0 ||
2150 rx_pkt->data.dma_addr == ~0)
2151 goto fail_dma_mapping;
2152
2153 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2154 rx_len_cached = ++sys->len;
2155
2156 ret = sps_transfer_one(sys->ep->ep_hdl,
2157 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2158
2159 if (ret) {
2160 IPAERR("sps_transfer_one failed %d\n", ret);
2161 goto fail_sps_transfer;
2162 }
2163 }
2164
2165 return;
2166fail_sps_transfer:
2167 rx_len_cached = --sys->len;
2168 list_del(&rx_pkt->link);
2169 INIT_LIST_HEAD(&rx_pkt->link);
2170 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2171 sys->rx_buff_sz, DMA_FROM_DEVICE);
2172fail_dma_mapping:
2173 spin_lock_bh(&sys->spinlock);
2174 list_add_tail(&rx_pkt->link, &sys->rcycl_list);
2175 INIT_LIST_HEAD(&rx_pkt->link);
2176 spin_unlock_bh(&sys->spinlock);
2177fail_kmem_cache_alloc:
2178 spin_unlock_bh(&sys->spinlock);
2179 if (rx_len_cached == 0)
2180 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2181 msecs_to_jiffies(1));
2182}
2183
2184static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
2185{
2186 struct ipa_rx_pkt_wrapper *rx_pkt;
2187 int ret;
2188 int rx_len_cached = 0;
2189 u32 curr;
2190
2191 rx_len_cached = sys->len;
2192 curr = atomic_read(&sys->repl.head_idx);
2193
2194 while (rx_len_cached < sys->rx_pool_sz) {
2195 if (curr == atomic_read(&sys->repl.tail_idx)) {
2196 queue_work(sys->repl_wq, &sys->repl_work);
2197 break;
2198 }
2199
2200 rx_pkt = sys->repl.cache[curr];
2201 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2202
2203 ret = sps_transfer_one(sys->ep->ep_hdl,
2204 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2205
2206 if (ret) {
2207 IPAERR("sps_transfer_one failed %d\n", ret);
2208 list_del(&rx_pkt->link);
2209 break;
2210 }
2211 rx_len_cached = ++sys->len;
2212 sys->repl_trig_cnt++;
2213 curr = (curr + 1) % sys->repl.capacity;
2214 /* ensure write is done before setting head index */
2215 mb();
2216 atomic_set(&sys->repl.head_idx, curr);
2217 }
2218
2219 if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0)
2220 queue_work(sys->repl_wq, &sys->repl_work);
2221
2222 if (rx_len_cached <= sys->ep->rx_replenish_threshold) {
2223 if (rx_len_cached == 0) {
2224 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
2225 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty);
2226 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
2227 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty);
2228 else
2229 WARN_ON(1);
2230 }
2231 sys->repl_trig_cnt = 0;
2232 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2233 msecs_to_jiffies(1));
2234 }
2235}
2236
2237static void replenish_rx_work_func(struct work_struct *work)
2238{
2239 struct delayed_work *dwork;
2240 struct ipa_sys_context *sys;
2241
2242 dwork = container_of(work, struct delayed_work, work);
2243 sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
2244 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2245 sys->repl_hdlr(sys);
2246 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2247}
2248
2249/**
2250 * ipa_cleanup_rx() - release RX queue resources
2251 *
2252 */
2253static void ipa_cleanup_rx(struct ipa_sys_context *sys)
2254{
2255 struct ipa_rx_pkt_wrapper *rx_pkt;
2256 struct ipa_rx_pkt_wrapper *r;
2257 u32 head;
2258 u32 tail;
2259
2260 list_for_each_entry_safe(rx_pkt, r,
2261 &sys->head_desc_list, link) {
2262 list_del(&rx_pkt->link);
2263 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2264 sys->rx_buff_sz, DMA_FROM_DEVICE);
2265 sys->free_skb(rx_pkt->data.skb);
2266 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2267 }
2268
2269 list_for_each_entry_safe(rx_pkt, r,
2270 &sys->rcycl_list, link) {
2271 list_del(&rx_pkt->link);
2272 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2273 sys->rx_buff_sz, DMA_FROM_DEVICE);
2274 sys->free_skb(rx_pkt->data.skb);
2275 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2276 }
2277
2278 if (sys->repl.cache) {
2279 head = atomic_read(&sys->repl.head_idx);
2280 tail = atomic_read(&sys->repl.tail_idx);
2281 while (head != tail) {
2282 rx_pkt = sys->repl.cache[head];
2283 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2284 sys->rx_buff_sz, DMA_FROM_DEVICE);
2285 sys->free_skb(rx_pkt->data.skb);
2286 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2287 head = (head + 1) % sys->repl.capacity;
2288 }
2289 kfree(sys->repl.cache);
2290 }
2291}
2292
2293static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
2294{
2295 struct sk_buff *skb2 = NULL;
2296
2297 skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
2298 if (likely(skb2)) {
2299 /* Set the data pointer */
2300 skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
2301 memcpy(skb2->data, skb->data, len);
2302 skb2->len = len;
2303 skb_set_tail_pointer(skb2, len);
2304 }
2305
2306 return skb2;
2307}
2308
2309static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
2310 struct ipa_sys_context *sys)
2311{
2312 int rc = 0;
2313 struct ipa_hw_pkt_status *status;
2314 struct sk_buff *skb2;
2315 int pad_len_byte;
2316 int len;
2317 unsigned char *buf;
2318 int src_pipe;
2319 unsigned int used = *(unsigned int *)skb->cb;
2320 unsigned int used_align = ALIGN(used, 32);
2321 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2322
2323 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2324
2325 if (skb->len == 0) {
2326 IPAERR("ZLT\n");
2327 sys->free_skb(skb);
2328 return rc;
2329 }
2330
2331 if (sys->len_partial) {
2332 IPADBG("len_partial %d\n", sys->len_partial);
2333 buf = skb_push(skb, sys->len_partial);
2334 memcpy(buf, sys->prev_skb->data, sys->len_partial);
2335 sys->len_partial = 0;
2336 sys->free_skb(sys->prev_skb);
2337 sys->prev_skb = NULL;
2338 goto begin;
2339 }
2340
2341 /* this pipe has TX comp (status only) + mux-ed LAN RX data
2342 * (status+data)
2343 */
2344 if (sys->len_rem) {
2345 IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
2346 sys->len_pad);
2347 if (sys->len_rem <= skb->len) {
2348 if (sys->prev_skb) {
2349 skb2 = skb_copy_expand(sys->prev_skb, 0,
2350 sys->len_rem, GFP_KERNEL);
2351 if (likely(skb2)) {
2352 memcpy(skb_put(skb2, sys->len_rem),
2353 skb->data, sys->len_rem);
2354 skb_trim(skb2,
2355 skb2->len - sys->len_pad);
2356 skb2->truesize = skb2->len +
2357 sizeof(struct sk_buff);
2358 if (sys->drop_packet)
2359 dev_kfree_skb_any(skb2);
2360 else
2361 sys->ep->client_notify(
2362 sys->ep->priv,
2363 IPA_RECEIVE,
2364 (unsigned long)(skb2));
2365 } else {
2366 IPAERR("copy expand failed\n");
2367 }
2368 dev_kfree_skb_any(sys->prev_skb);
2369 }
2370 skb_pull(skb, sys->len_rem);
2371 sys->prev_skb = NULL;
2372 sys->len_rem = 0;
2373 sys->len_pad = 0;
2374 } else {
2375 if (sys->prev_skb) {
2376 skb2 = skb_copy_expand(sys->prev_skb, 0,
2377 skb->len, GFP_KERNEL);
2378 if (likely(skb2)) {
2379 memcpy(skb_put(skb2, skb->len),
2380 skb->data, skb->len);
2381 } else {
2382 IPAERR("copy expand failed\n");
2383 }
2384 dev_kfree_skb_any(sys->prev_skb);
2385 sys->prev_skb = skb2;
2386 }
2387 sys->len_rem -= skb->len;
2388 sys->free_skb(skb);
2389 return rc;
2390 }
2391 }
2392
2393begin:
2394 while (skb->len) {
2395 sys->drop_packet = false;
2396 IPADBG("LEN_REM %d\n", skb->len);
2397
2398 if (skb->len < IPA_PKT_STATUS_SIZE) {
2399 WARN_ON(sys->prev_skb != NULL);
2400 IPADBG("status straddles buffer\n");
2401 sys->prev_skb = skb;
2402 sys->len_partial = skb->len;
2403 return rc;
2404 }
2405
2406 status = (struct ipa_hw_pkt_status *)skb->data;
2407 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2408 status->status_opcode, status->endp_src_idx,
2409 status->endp_dest_idx, status->pkt_len);
2410 if (sys->status_stat) {
2411 sys->status_stat->status[sys->status_stat->curr] =
2412 *status;
2413 sys->status_stat->curr++;
2414 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2415 sys->status_stat->curr = 0;
2416 }
2417
2418 if (status->status_opcode !=
2419 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2420 status->status_opcode !=
2421 IPA_HW_STATUS_OPCODE_PACKET &&
2422 status->status_opcode !=
2423 IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET &&
2424 status->status_opcode !=
2425 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2426 IPAERR("unsupported opcode(%d)\n",
2427 status->status_opcode);
2428 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2429 continue;
2430 }
2431 IPA_STATS_EXCP_CNT(status->exception,
2432 ipa_ctx->stats.rx_excp_pkts);
2433 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2434 status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
2435 IPAERR("status fields invalid\n");
2436 IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
2437 status->status_opcode, status->endp_src_idx,
2438 status->endp_dest_idx, status->pkt_len);
2439 WARN_ON(1);
2440 BUG();
2441 }
2442 if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
2443 struct ipa_tag_completion *comp;
2444
2445 IPADBG("TAG packet arrived\n");
2446 if (status->tag_f_2 == IPA_COOKIE) {
2447 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2448 if (skb->len < sizeof(comp)) {
2449 IPAERR("TAG arrived without packet\n");
2450 return rc;
2451 }
2452 memcpy(&comp, skb->data, sizeof(comp));
2453 skb_pull(skb, sizeof(comp) +
2454 IPA_SIZE_DL_CSUM_META_TRAILER);
2455 complete(&comp->comp);
2456 if (atomic_dec_return(&comp->cnt) == 0)
2457 kfree(comp);
2458 continue;
2459 } else {
2460 IPADBG("ignoring TAG with wrong cookie\n");
2461 }
2462 }
2463 if (status->pkt_len == 0) {
2464 IPADBG("Skip aggr close status\n");
2465 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2466 IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close);
2467 IPA_STATS_DEC_CNT(
2468 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2469 continue;
2470 }
2471 if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
2472 /* RX data */
2473 src_pipe = status->endp_src_idx;
2474
2475 /*
2476 * A packet which is received back to the AP after
2477 * there was no route match.
2478 */
2479 if (!status->exception && !status->route_match)
2480 sys->drop_packet = true;
2481
2482 if (skb->len == IPA_PKT_STATUS_SIZE &&
2483 !status->exception) {
2484 WARN_ON(sys->prev_skb != NULL);
2485 IPADBG("Ins header in next buffer\n");
2486 sys->prev_skb = skb;
2487 sys->len_partial = skb->len;
2488 return rc;
2489 }
2490
2491 pad_len_byte = ((status->pkt_len + 3) & ~3) -
2492 status->pkt_len;
2493
2494 len = status->pkt_len + pad_len_byte +
2495 IPA_SIZE_DL_CSUM_META_TRAILER;
2496 IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
2497 status->pkt_len, len);
2498
2499 if (status->exception ==
2500 IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
2501 IPADBG("Dropping packet on DeAggr Exception\n");
2502 sys->drop_packet = true;
2503 }
2504
2505 skb2 = ipa_skb_copy_for_client(skb,
2506 status->pkt_len + IPA_PKT_STATUS_SIZE);
2507 if (likely(skb2)) {
2508 if (skb->len < len + IPA_PKT_STATUS_SIZE) {
2509 IPADBG("SPL skb len %d len %d\n",
2510 skb->len, len);
2511 sys->prev_skb = skb2;
2512 sys->len_rem = len - skb->len +
2513 IPA_PKT_STATUS_SIZE;
2514 sys->len_pad = pad_len_byte;
2515 skb_pull(skb, skb->len);
2516 } else {
2517 skb_trim(skb2, status->pkt_len +
2518 IPA_PKT_STATUS_SIZE);
2519 IPADBG("rx avail for %d\n",
2520 status->endp_dest_idx);
2521 if (sys->drop_packet) {
2522 dev_kfree_skb_any(skb2);
2523 } else if (status->pkt_len >
2524 IPA_GENERIC_AGGR_BYTE_LIMIT *
2525 1024) {
2526 IPAERR("packet size invalid\n");
2527 IPAERR("STATUS opcode=%d\n",
2528 status->status_opcode);
2529 IPAERR("src=%d dst=%d len=%d\n",
2530 status->endp_src_idx,
2531 status->endp_dest_idx,
2532 status->pkt_len);
2533 BUG();
2534 } else {
2535 skb2->truesize = skb2->len +
2536 sizeof(struct sk_buff) +
2537 (ALIGN(len +
2538 IPA_PKT_STATUS_SIZE, 32) *
2539 unused / used_align);
2540 sys->ep->client_notify(
2541 sys->ep->priv,
2542 IPA_RECEIVE,
2543 (unsigned long)(skb2));
2544 }
2545 skb_pull(skb, len +
2546 IPA_PKT_STATUS_SIZE);
2547 }
2548 } else {
2549 IPAERR("fail to alloc skb\n");
2550 if (skb->len < len) {
2551 sys->prev_skb = NULL;
2552 sys->len_rem = len - skb->len +
2553 IPA_PKT_STATUS_SIZE;
2554 sys->len_pad = pad_len_byte;
2555 skb_pull(skb, skb->len);
2556 } else {
2557 skb_pull(skb, len +
2558 IPA_PKT_STATUS_SIZE);
2559 }
2560 }
2561 /* TX comp */
2562 ipa_wq_write_done_status(src_pipe);
2563 IPADBG("tx comp imp for %d\n", src_pipe);
2564 } else {
2565 /* TX comp */
2566 ipa_wq_write_done_status(status->endp_src_idx);
2567 IPADBG("tx comp exp for %d\n", status->endp_src_idx);
2568 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2569 IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl);
2570 IPA_STATS_DEC_CNT(
2571 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2572 }
2573 };
2574
2575 sys->free_skb(skb);
2576 return rc;
2577}
2578
2579static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb,
2580 struct sk_buff *skb, unsigned int len)
2581{
2582 struct sk_buff *skb2;
2583
2584 skb2 = skb_copy_expand(prev_skb, 0,
2585 len, GFP_KERNEL);
2586 if (likely(skb2)) {
2587 memcpy(skb_put(skb2, len),
2588 skb->data, len);
2589 } else {
2590 IPAERR("copy expand failed\n");
2591 skb2 = NULL;
2592 }
2593 dev_kfree_skb_any(prev_skb);
2594
2595 return skb2;
2596}
2597
2598static void wan_rx_handle_splt_pyld(struct sk_buff *skb,
2599 struct ipa_sys_context *sys)
2600{
2601 struct sk_buff *skb2;
2602
2603 IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
2604 if (sys->len_rem <= skb->len) {
2605 if (sys->prev_skb) {
2606 skb2 = join_prev_skb(sys->prev_skb, skb,
2607 sys->len_rem);
2608 if (likely(skb2)) {
2609 IPADBG(
2610 "removing Status element from skb and sending to WAN client");
2611 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2612 skb2->truesize = skb2->len +
2613 sizeof(struct sk_buff);
2614 sys->ep->client_notify(sys->ep->priv,
2615 IPA_RECEIVE,
2616 (unsigned long)(skb2));
2617 }
2618 }
2619 skb_pull(skb, sys->len_rem);
2620 sys->prev_skb = NULL;
2621 sys->len_rem = 0;
2622 } else {
2623 if (sys->prev_skb) {
2624 skb2 = join_prev_skb(sys->prev_skb, skb,
2625 skb->len);
2626 sys->prev_skb = skb2;
2627 }
2628 sys->len_rem -= skb->len;
2629 skb_pull(skb, skb->len);
2630 }
2631}
2632
2633static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
2634 struct ipa_sys_context *sys)
2635{
2636 int rc = 0;
2637 struct ipa_hw_pkt_status *status;
2638 struct sk_buff *skb2;
2639 u16 pkt_len_with_pad;
2640 u32 qmap_hdr;
2641 int checksum_trailer_exists;
2642 int frame_len;
2643 int ep_idx;
2644 unsigned int used = *(unsigned int *)skb->cb;
2645 unsigned int used_align = ALIGN(used, 32);
2646 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2647
2648 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2649 if (skb->len == 0) {
2650 IPAERR("ZLT\n");
2651 goto bail;
2652 }
2653
2654 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
2655 sys->ep->client_notify(sys->ep->priv,
2656 IPA_RECEIVE, (unsigned long)(skb));
2657 return rc;
2658 }
2659 if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
2660 IPAERR("Recycle should enable only with GRO Aggr\n");
2661 ipa_assert();
2662 }
2663 /*
2664 * payload splits across 2 buff or more,
2665 * take the start of the payload from prev_skb
2666 */
2667 if (sys->len_rem)
2668 wan_rx_handle_splt_pyld(skb, sys);
2669
2670
2671 while (skb->len) {
2672 IPADBG("LEN_REM %d\n", skb->len);
2673 if (skb->len < IPA_PKT_STATUS_SIZE) {
2674 IPAERR("status straddles buffer\n");
2675 WARN_ON(1);
2676 goto bail;
2677 }
2678 status = (struct ipa_hw_pkt_status *)skb->data;
2679 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2680 status->status_opcode, status->endp_src_idx,
2681 status->endp_dest_idx, status->pkt_len);
2682
2683 if (sys->status_stat) {
2684 sys->status_stat->status[sys->status_stat->curr] =
2685 *status;
2686 sys->status_stat->curr++;
2687 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2688 sys->status_stat->curr = 0;
2689 }
2690
2691 if (status->status_opcode !=
2692 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2693 status->status_opcode !=
2694 IPA_HW_STATUS_OPCODE_PACKET &&
2695 status->status_opcode !=
2696 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2697 IPAERR("unsupported opcode\n");
2698 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2699 continue;
2700 }
2701 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2702 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2703 status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
2704 status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
2705 IPAERR("status fields invalid\n");
2706 WARN_ON(1);
2707 goto bail;
2708 }
2709 if (status->pkt_len == 0) {
2710 IPADBG("Skip aggr close status\n");
2711 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2712 IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts);
2713 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close);
2714 continue;
2715 }
2716 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
2717 if (status->endp_dest_idx != ep_idx) {
2718 IPAERR("expected endp_dest_idx %d received %d\n",
2719 ep_idx, status->endp_dest_idx);
2720 WARN_ON(1);
2721 goto bail;
2722 }
2723 /* RX data */
2724 if (skb->len == IPA_PKT_STATUS_SIZE) {
2725 IPAERR("Ins header in next buffer\n");
2726 WARN_ON(1);
2727 goto bail;
2728 }
2729 qmap_hdr = *(u32 *)(status+1);
2730 /*
2731 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
2732 * header
2733 */
2734
2735 /*QMAP is BE: convert the pkt_len field from BE to LE*/
2736 pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
2737 IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
2738 /*get the CHECKSUM_PROCESS bit*/
2739 checksum_trailer_exists = status->status_mask &
2740 IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
2741 IPADBG("checksum_trailer_exists %d\n",
2742 checksum_trailer_exists);
2743
2744 frame_len = IPA_PKT_STATUS_SIZE +
2745 IPA_QMAP_HEADER_LENGTH +
2746 pkt_len_with_pad;
2747 if (checksum_trailer_exists)
2748 frame_len += IPA_DL_CHECKSUM_LENGTH;
2749 IPADBG("frame_len %d\n", frame_len);
2750
2751 skb2 = skb_clone(skb, GFP_KERNEL);
2752 if (likely(skb2)) {
2753 /*
2754 * the len of actual data is smaller than expected
2755 * payload split across 2 buff
2756 */
2757 if (skb->len < frame_len) {
2758 IPADBG("SPL skb len %d len %d\n",
2759 skb->len, frame_len);
2760 sys->prev_skb = skb2;
2761 sys->len_rem = frame_len - skb->len;
2762 skb_pull(skb, skb->len);
2763 } else {
2764 skb_trim(skb2, frame_len);
2765 IPADBG("rx avail for %d\n",
2766 status->endp_dest_idx);
2767 IPADBG(
2768 "removing Status element from skb and sending to WAN client");
2769 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2770 skb2->truesize = skb2->len +
2771 sizeof(struct sk_buff) +
2772 (ALIGN(frame_len, 32) *
2773 unused / used_align);
2774 sys->ep->client_notify(sys->ep->priv,
2775 IPA_RECEIVE, (unsigned long)(skb2));
2776 skb_pull(skb, frame_len);
2777 }
2778 } else {
2779 IPAERR("fail to clone\n");
2780 if (skb->len < frame_len) {
2781 sys->prev_skb = NULL;
2782 sys->len_rem = frame_len - skb->len;
2783 skb_pull(skb, skb->len);
2784 } else {
2785 skb_pull(skb, frame_len);
2786 }
2787 }
2788 };
2789bail:
2790 sys->free_skb(skb);
2791 return rc;
2792}
2793
2794static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys)
2795{
2796 struct ipa_a5_mux_hdr *mux_hdr;
2797 unsigned int pull_len;
2798 unsigned int padding;
2799 struct ipa_ep_context *ep;
2800 unsigned int src_pipe;
2801
2802 mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
2803
2804 src_pipe = mux_hdr->src_pipe_index;
2805
2806 IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
2807 rx_skb->len, ntohs(mux_hdr->interface_id),
2808 src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
2809
2810 IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
2811
2812 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2813 IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
2814
2815 /*
2816 * Any packets arriving over AMPDU_TX should be dispatched
2817 * to the regular WLAN RX data-path.
2818 */
2819 if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
2820 src_pipe = WLAN_PROD_TX_EP;
2821
2822 ep = &ipa_ctx->ep[src_pipe];
2823 spin_lock(&ipa_ctx->disconnect_lock);
2824 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2825 !ep->valid || !ep->client_notify)) {
2826 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2827 src_pipe, ep->valid, ep->client_notify);
2828 dev_kfree_skb_any(rx_skb);
2829 spin_unlock(&ipa_ctx->disconnect_lock);
2830 return 0;
2831 }
2832
2833 pull_len = sizeof(struct ipa_a5_mux_hdr);
2834
2835 /*
2836 * IP packet starts on word boundary
2837 * remove the MUX header and any padding and pass the frame to
2838 * the client which registered a rx callback on the "src pipe"
2839 */
2840 padding = ep->cfg.hdr.hdr_len & 0x3;
2841 if (padding)
2842 pull_len += 4 - padding;
2843
2844 IPADBG("pulling %d bytes from skb\n", pull_len);
2845 skb_pull(rx_skb, pull_len);
2846 ep->client_notify(ep->priv, IPA_RECEIVE,
2847 (unsigned long)(rx_skb));
2848 spin_unlock(&ipa_ctx->disconnect_lock);
2849 return 0;
2850}
2851
2852static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags)
2853{
2854 return __dev_alloc_skb(len, flags);
2855}
2856
2857static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len,
2858 gfp_t flags)
2859{
2860 struct sk_buff *skb;
2861
2862 skb = __dev_alloc_skb(len + IPA_HEADROOM, flags);
2863 if (skb)
2864 skb_reserve(skb, IPA_HEADROOM);
2865
2866 return skb;
2867}
2868
2869static void ipa_free_skb_rx(struct sk_buff *skb)
2870{
2871 dev_kfree_skb_any(skb);
2872}
2873
2874void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
2875{
2876 struct sk_buff *rx_skb = (struct sk_buff *)data;
2877 struct ipa_hw_pkt_status *status;
2878 struct ipa_ep_context *ep;
2879 unsigned int src_pipe;
2880 u32 metadata;
2881
2882 status = (struct ipa_hw_pkt_status *)rx_skb->data;
2883 src_pipe = status->endp_src_idx;
2884 metadata = status->metadata;
2885 ep = &ipa_ctx->ep[src_pipe];
2886 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2887 !ep->valid ||
2888 !ep->client_notify)) {
2889 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2890 src_pipe, ep->valid, ep->client_notify);
2891 dev_kfree_skb_any(rx_skb);
2892 return;
2893 }
2894 if (!status->exception)
2895 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE +
2896 IPA_LAN_RX_HEADER_LENGTH);
2897 else
2898 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE);
2899
2900 /*
2901 * Metadata Info
2902 * ------------------------------------------
2903 * | 3 | 2 | 1 | 0 |
2904 * | fw_desc | vdev_id | qmap mux id | Resv |
2905 * ------------------------------------------
2906 */
2907 *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
2908 IPADBG("meta_data: 0x%x cb: 0x%x\n",
2909 metadata, *(u32 *)rx_skb->cb);
2910
2911 ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
2912}
2913
2914void ipa2_recycle_wan_skb(struct sk_buff *skb)
2915{
2916 struct ipa_rx_pkt_wrapper *rx_pkt;
2917 int ep_idx = ipa2_get_ep_mapping(
2918 IPA_CLIENT_APPS_WAN_CONS);
2919 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2920 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2921
2922 if (unlikely(ep_idx == -1)) {
2923 IPAERR("dest EP does not exist\n");
2924 ipa_assert();
2925 }
2926
2927 rx_pkt = kmem_cache_zalloc(
2928 ipa_ctx->rx_pkt_wrapper_cache, flag);
2929 if (!rx_pkt)
2930 ipa_assert();
2931
2932 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2933 rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
2934
2935 rx_pkt->data.skb = skb;
2936 rx_pkt->data.dma_addr = 0;
2937 ipa_skb_recycle(rx_pkt->data.skb);
2938 skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
2939 INIT_LIST_HEAD(&rx_pkt->link);
2940 spin_lock_bh(&rx_pkt->sys->spinlock);
2941 list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
2942 spin_unlock_bh(&rx_pkt->sys->spinlock);
2943}
2944
2945static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2946{
2947 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2948 struct sk_buff *rx_skb;
2949
2950 if (unlikely(list_empty(&sys->head_desc_list))) {
2951 WARN_ON(1);
2952 return;
2953 }
2954 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2955 struct ipa_rx_pkt_wrapper,
2956 link);
2957 list_del(&rx_pkt_expected->link);
2958 sys->len--;
2959 if (size)
2960 rx_pkt_expected->len = size;
2961 rx_skb = rx_pkt_expected->data.skb;
2962 dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
2963 sys->rx_buff_sz, DMA_FROM_DEVICE);
2964 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2965 rx_skb->len = rx_pkt_expected->len;
2966 *(unsigned int *)rx_skb->cb = rx_skb->len;
2967 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2968 sys->pyld_hdlr(rx_skb, sys);
2969 sys->repl_hdlr(sys);
2970 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
2971
2972}
2973
2974static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2975{
2976 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2977 struct sk_buff *rx_skb;
2978
2979 if (unlikely(list_empty(&sys->head_desc_list))) {
2980 WARN_ON(1);
2981 return;
2982 }
2983 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2984 struct ipa_rx_pkt_wrapper,
2985 link);
2986 list_del(&rx_pkt_expected->link);
2987 sys->len--;
2988
2989 if (size)
2990 rx_pkt_expected->len = size;
2991
2992 rx_skb = rx_pkt_expected->data.skb;
2993 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2994 rx_skb->len = rx_pkt_expected->len;
2995 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2996 sys->ep->wstats.tx_pkts_rcvd++;
2997 if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
2998 ipa2_free_skb(&rx_pkt_expected->data);
2999 sys->ep->wstats.tx_pkts_dropped++;
3000 } else {
3001 sys->ep->wstats.tx_pkts_sent++;
3002 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3003 (unsigned long)(&rx_pkt_expected->data));
3004 }
3005 ipa_replenish_wlan_rx_cache(sys);
3006}
3007
3008static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
3009 struct sps_iovec *iovec)
3010{
3011 IPADBG("ENTER.\n");
3012 if (unlikely(list_empty(&sys->head_desc_list))) {
3013 IPAERR("descriptor list is empty!\n");
3014 WARN_ON(1);
3015 return;
3016 }
3017 if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) {
3018 IPAERR("received unexpected event. sps flag is 0x%x\n"
3019 , iovec->flags);
3020 WARN_ON(1);
3021 return;
3022 }
3023 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3024 (unsigned long)(iovec));
3025 IPADBG("EXIT\n");
3026}
3027
3028static void ipa_wq_rx_avail(struct work_struct *work)
3029{
3030 struct ipa_rx_pkt_wrapper *rx_pkt;
3031 struct ipa_sys_context *sys;
3032
3033 rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work);
3034 if (unlikely(rx_pkt == NULL))
3035 WARN_ON(1);
3036 sys = rx_pkt->sys;
3037 ipa_wq_rx_common(sys, 0);
3038}
3039
3040/**
3041 * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
3042 * the SPS driver after a Rx operation is complete.
3043 * Called in an interrupt context.
3044 * @notify: SPS driver supplied notification struct
3045 *
3046 * This function defer the work for this event to a workqueue.
3047 */
3048void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
3049{
3050 struct ipa_rx_pkt_wrapper *rx_pkt;
3051
3052 switch (notify->event_id) {
3053 case SPS_EVENT_EOT:
3054 rx_pkt = notify->data.transfer.user;
3055 if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
3056 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
3057 rx_pkt->len = notify->data.transfer.iovec.size;
3058 IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
3059 notify->user, rx_pkt->len);
3060 queue_work(rx_pkt->sys->wq, &rx_pkt->work);
3061 break;
3062 default:
3063 IPAERR("received unexpected event id %d sys=%p\n",
3064 notify->event_id, notify->user);
3065 }
3066}
3067
3068static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
3069 struct ipa_sys_context *sys)
3070{
3071 if (sys->ep->client_notify) {
3072 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3073 (unsigned long)(rx_skb));
3074 } else {
3075 dev_kfree_skb_any(rx_skb);
3076 WARN_ON(1);
3077 }
3078
3079 return 0;
3080}
3081
3082static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
3083 struct ipa_sys_context *sys)
3084{
3085 unsigned long int aggr_byte_limit;
3086
3087 sys->ep->status.status_en = true;
3088 sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
3089 if (IPA_CLIENT_IS_PROD(in->client)) {
3090 if (!sys->ep->skip_ep_cfg) {
3091 sys->policy = IPA_POLICY_NOINTR_MODE;
3092 sys->sps_option = SPS_O_AUTO_ENABLE;
3093 sys->sps_callback = NULL;
3094 sys->ep->status.status_ep = ipa2_get_ep_mapping(
3095 IPA_CLIENT_APPS_LAN_CONS);
3096 if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
3097 sys->ep->status.status_en = false;
3098 } else {
3099 sys->policy = IPA_POLICY_INTR_MODE;
3100 sys->sps_option = (SPS_O_AUTO_ENABLE |
3101 SPS_O_EOT);
3102 sys->sps_callback =
3103 ipa_sps_irq_tx_no_aggr_notify;
3104 }
3105 return 0;
3106 }
3107
3108 aggr_byte_limit =
3109 (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
3110 ipa_adjust_ra_buff_base_sz(
3111 in->ipa_ep_cfg.aggr.aggr_byte_limit));
3112
3113 if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
3114 in->client == IPA_CLIENT_APPS_WAN_CONS) {
3115 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3116 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3117 | SPS_O_ACK_TRANSFERS);
3118 sys->sps_callback = ipa_sps_irq_rx_notify;
3119 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3120 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3121 switch_to_intr_rx_work_func);
3122 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3123 replenish_rx_work_func);
3124 INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
3125 atomic_set(&sys->curr_polling_state, 0);
3126 sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
3127 IPA_GENERIC_RX_BUFF_BASE_SZ) -
3128 IPA_HEADROOM;
3129 sys->get_skb = ipa_get_skb_ipa_rx_headroom;
3130 sys->free_skb = ipa_free_skb_rx;
3131 in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
3132 in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
3133 in->ipa_ep_cfg.aggr.aggr_time_limit =
3134 IPA_GENERIC_AGGR_TIME_LIMIT;
3135 if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3136 sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
3137 if (nr_cpu_ids > 1) {
3138 sys->repl_hdlr =
3139 ipa_fast_replenish_rx_cache;
3140 sys->repl_trig_thresh =
3141 sys->rx_pool_sz / 8;
3142 } else {
3143 sys->repl_hdlr =
3144 ipa_replenish_rx_cache;
3145 }
3146 sys->rx_pool_sz =
3147 ipa_ctx->lan_rx_ring_size;
3148 in->ipa_ep_cfg.aggr.aggr_byte_limit =
3149 IPA_GENERIC_AGGR_BYTE_LIMIT;
3150 in->ipa_ep_cfg.aggr.aggr_pkt_limit =
3151 IPA_GENERIC_AGGR_PKT_LIMIT;
3152 sys->ep->wakelock_client =
3153 IPA_WAKELOCK_REF_CLIENT_LAN_RX;
3154 } else if (in->client ==
3155 IPA_CLIENT_APPS_WAN_CONS) {
3156 sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
3157 if (in->napi_enabled) {
3158 sys->repl_hdlr =
3159 ipa_replenish_rx_cache_recycle;
3160 sys->rx_pool_sz =
3161 IPA_WAN_NAPI_CONS_RX_POOL_SZ;
3162 } else {
3163 if (nr_cpu_ids > 1) {
3164 sys->repl_hdlr =
3165 ipa_fast_replenish_rx_cache;
3166 sys->repl_trig_thresh =
3167 sys->rx_pool_sz / 8;
3168 } else {
3169 sys->repl_hdlr =
3170 ipa_replenish_rx_cache;
3171 }
3172 sys->rx_pool_sz =
3173 ipa_ctx->wan_rx_ring_size;
3174 }
3175 sys->ep->wakelock_client =
3176 IPA_WAKELOCK_REF_CLIENT_WAN_RX;
3177 in->ipa_ep_cfg.aggr.aggr_sw_eof_active
3178 = true;
3179 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
3180 IPAERR("get close-by %u\n",
3181 ipa_adjust_ra_buff_base_sz(
3182 in->ipa_ep_cfg.aggr.
3183 aggr_byte_limit));
3184 IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
3185 /* disable ipa_status */
3186 sys->ep->status.
3187 status_en = false;
3188 sys->rx_buff_sz =
3189 IPA_GENERIC_RX_BUFF_SZ(
3190 ipa_adjust_ra_buff_base_sz(
3191 in->ipa_ep_cfg.aggr.
3192 aggr_byte_limit - IPA_HEADROOM));
3193 in->ipa_ep_cfg.aggr.
3194 aggr_byte_limit =
3195 sys->rx_buff_sz < in->
3196 ipa_ep_cfg.aggr.aggr_byte_limit ?
3197 IPA_ADJUST_AGGR_BYTE_LIMIT(
3198 sys->rx_buff_sz) :
3199 IPA_ADJUST_AGGR_BYTE_LIMIT(
3200 in->ipa_ep_cfg.
3201 aggr.aggr_byte_limit);
3202 IPAERR("set aggr_limit %lu\n",
3203 (unsigned long int)
3204 in->ipa_ep_cfg.aggr.
3205 aggr_byte_limit);
3206 } else {
3207 in->ipa_ep_cfg.aggr.
3208 aggr_byte_limit =
3209 IPA_GENERIC_AGGR_BYTE_LIMIT;
3210 in->ipa_ep_cfg.aggr.
3211 aggr_pkt_limit =
3212 IPA_GENERIC_AGGR_PKT_LIMIT;
3213 }
3214 }
3215 } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
3216 IPADBG("assigning policy to client:%d",
3217 in->client);
3218
3219 sys->ep->status.status_en = false;
3220 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3221 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3222 | SPS_O_ACK_TRANSFERS);
3223 sys->sps_callback = ipa_sps_irq_rx_notify;
3224 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3225 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3226 switch_to_intr_rx_work_func);
3227 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3228 replenish_rx_work_func);
3229 atomic_set(&sys->curr_polling_state, 0);
3230 sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
3231 sys->rx_pool_sz = in->desc_fifo_sz /
3232 sizeof(struct sps_iovec) - 1;
3233 if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
3234 sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
3235 sys->pyld_hdlr = NULL;
3236 sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
3237 sys->get_skb = ipa_get_skb_ipa_rx;
3238 sys->free_skb = ipa_free_skb_rx;
3239 in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
3240 sys->ep->wakelock_client =
3241 IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
3242 } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
3243 IPADBG("assigning policy to client:%d",
3244 in->client);
3245
3246 sys->ep->status.status_en = false;
3247 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3248 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3249 | SPS_O_ACK_TRANSFERS);
3250 sys->sps_callback = ipa_sps_irq_rx_notify;
3251 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3252 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3253 switch_to_intr_rx_work_func);
3254 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3255 replenish_rx_work_func);
3256 atomic_set(&sys->curr_polling_state, 0);
3257 sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
3258 sys->rx_pool_sz = in->desc_fifo_sz /
3259 sizeof(struct sps_iovec) - 1;
3260 if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
3261 sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
3262 sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
3263 sys->get_skb = ipa_get_skb_ipa_rx;
3264 sys->free_skb = ipa_free_skb_rx;
3265 sys->repl_hdlr = ipa_replenish_rx_cache;
3266 sys->ep->wakelock_client =
3267 IPA_WAKELOCK_REF_CLIENT_ODU_RX;
3268 } else if (in->client ==
3269 IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
3270 IPADBG("assigning policy to client:%d",
3271 in->client);
3272 sys->ep->status.status_en = false;
3273 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3274 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3275 | SPS_O_ACK_TRANSFERS);
3276 sys->sps_callback = ipa_sps_irq_rx_notify;
3277 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3278 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3279 switch_to_intr_rx_work_func);
3280 } else if (in->client ==
3281 IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
3282 IPADBG("assigning policy to client:%d",
3283 in->client);
3284 sys->ep->status.status_en = false;
3285 sys->policy = IPA_POLICY_NOINTR_MODE;
3286 sys->sps_option = SPS_O_AUTO_ENABLE |
3287 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
3288 } else {
3289 IPAERR("Need to install a RX pipe hdlr\n");
3290 WARN_ON(1);
3291 return -EINVAL;
3292 }
3293 return 0;
3294}
3295
3296static int ipa_assign_policy(struct ipa_sys_connect_params *in,
3297 struct ipa_sys_context *sys)
3298{
3299 if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
3300 sys->policy = IPA_POLICY_INTR_MODE;
3301 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3302 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3303 return 0;
3304 }
3305
3306 if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
3307 if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) {
3308 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3309 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3310 SPS_O_ACK_TRANSFERS);
3311 sys->sps_callback = ipa_sps_irq_tx_notify;
3312 INIT_WORK(&sys->work, ipa_wq_handle_tx);
3313 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3314 switch_to_intr_tx_work_func);
3315 atomic_set(&sys->curr_polling_state, 0);
3316 } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3317 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3318 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3319 SPS_O_ACK_TRANSFERS);
3320 sys->sps_callback = ipa_sps_irq_rx_notify;
3321 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3322 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3323 switch_to_intr_rx_work_func);
3324 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3325 replenish_rx_work_func);
3326 atomic_set(&sys->curr_polling_state, 0);
3327 sys->rx_buff_sz = IPA_RX_SKB_SIZE;
3328 sys->rx_pool_sz = IPA_RX_POOL_CEIL;
3329 sys->pyld_hdlr = ipa_rx_pyld_hdlr;
3330 sys->get_skb = ipa_get_skb_ipa_rx;
3331 sys->free_skb = ipa_free_skb_rx;
3332 sys->repl_hdlr = ipa_replenish_rx_cache;
3333 } else if (IPA_CLIENT_IS_PROD(in->client)) {
3334 sys->policy = IPA_POLICY_INTR_MODE;
3335 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3336 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3337 } else {
3338 IPAERR("Need to install a RX pipe hdlr\n");
3339 WARN_ON(1);
3340 return -EINVAL;
3341 }
3342
3343 return 0;
3344 } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3345 return ipa_assign_policy_v2(in, sys);
3346
3347 IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
3348 WARN_ON(1);
3349 return -EINVAL;
3350}
3351
3352/**
3353 * ipa_tx_client_rx_notify_release() - Callback function
3354 * which will call the user supplied callback function to
3355 * release the skb, or release it on its own if no callback
3356 * function was supplied
3357 *
3358 * @user1: [in] - Data Descriptor
3359 * @user2: [in] - endpoint idx
3360 *
3361 * This notified callback is for the destination client
3362 * This function is supplied in ipa_tx_dp_mul
3363 */
3364static void ipa_tx_client_rx_notify_release(void *user1, int user2)
3365{
3366 struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
3367 int ep_idx = user2;
3368
3369 IPADBG("Received data desc anchor:%p\n", dd);
3370
3371 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3372 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3373
3374 /* wlan host driver waits till tx complete before unload */
3375 IPADBG("ep=%d fifo_desc_free_count=%d\n",
3376 ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc));
3377 IPADBG("calling client notify callback with priv:%p\n",
3378 ipa_ctx->ep[ep_idx].priv);
3379
3380 if (ipa_ctx->ep[ep_idx].client_notify) {
3381 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
3382 IPA_WRITE_DONE, (unsigned long)user1);
3383 ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++;
3384 }
3385}
3386/**
3387 * ipa_tx_client_rx_pkt_status() - Callback function
3388 * which will call the user supplied callback function to
3389 * increase the available fifo descriptor
3390 *
3391 * @user1: [in] - Data Descriptor
3392 * @user2: [in] - endpoint idx
3393 *
3394 * This notified callback is for the destination client
3395 * This function is supplied in ipa_tx_dp_mul
3396 */
3397static void ipa_tx_client_rx_pkt_status(void *user1, int user2)
3398{
3399 int ep_idx = user2;
3400
3401 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3402 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3403}
3404
3405
3406/**
3407 * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets
3408 * @src: [in] - Client that is sending data
3409 * @ipa_tx_data_desc: [in] data descriptors from wlan
3410 *
3411 * this is used for to transfer data descriptors that received
3412 * from WLAN1_PROD pipe to IPA HW
3413 *
3414 * The function will send data descriptors from WLAN1_PROD (one
3415 * at a time) using sps_transfer_one. Will set EOT flag for last
3416 * descriptor Once this send was done from SPS point-of-view the
3417 * IPA driver will get notified by the supplied callback -
3418 * ipa_sps_irq_tx_no_aggr_notify()
3419 *
3420 * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
3421 * callback (from ipa_connect)
3422 *
3423 * Returns: 0 on success, negative on failure
3424 */
3425int ipa2_tx_dp_mul(enum ipa_client_type src,
3426 struct ipa_tx_data_desc *data_desc)
3427{
3428 /* The second byte in wlan header holds qmap id */
3429#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
3430 struct ipa_tx_data_desc *entry;
3431 struct ipa_sys_context *sys;
3432 struct ipa_desc desc = { 0 };
3433 u32 num_desc, cnt;
3434 int ep_idx;
3435
3436 if (unlikely(!ipa_ctx)) {
3437 IPAERR("IPA driver was not initialized\n");
3438 return -EINVAL;
3439 }
3440
3441 IPADBG("Received data desc anchor:%p\n", data_desc);
3442
3443 spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3444
3445 ep_idx = ipa2_get_ep_mapping(src);
3446 if (unlikely(ep_idx == -1)) {
3447 IPAERR("dest EP does not exist.\n");
3448 goto fail_send;
3449 }
3450 IPADBG("ep idx:%d\n", ep_idx);
3451 sys = ipa_ctx->ep[ep_idx].sys;
3452
3453 if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) {
3454 IPAERR("dest EP not valid.\n");
3455 goto fail_send;
3456 }
3457 sys->ep->wstats.rx_hd_rcvd++;
3458
3459 /* Calculate the number of descriptors */
3460 num_desc = 0;
3461 list_for_each_entry(entry, &data_desc->link, link) {
3462 num_desc++;
3463 }
3464 IPADBG("Number of Data Descriptors:%d", num_desc);
3465
3466 if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
3467 IPAERR("Insufficient data descriptors available\n");
3468 goto fail_send;
3469 }
3470
3471 /* Assign callback only for last data descriptor */
3472 cnt = 0;
3473 list_for_each_entry(entry, &data_desc->link, link) {
3474 IPADBG("Parsing data desc :%d\n", cnt);
3475 cnt++;
3476 ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
3477 (u8)sys->ep->cfg.meta.qmap_id;
3478 desc.pyld = entry->pyld_buffer;
3479 desc.len = entry->pyld_len;
3480 desc.type = IPA_DATA_DESC_SKB;
3481 desc.user1 = data_desc;
3482 desc.user2 = ep_idx;
3483 IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
3484 entry->priv, desc.pyld, desc.len);
3485
3486 /* In case of last descriptor populate callback */
3487 if (cnt == num_desc) {
3488 IPADBG("data desc:%p\n", data_desc);
3489 desc.callback = ipa_tx_client_rx_notify_release;
3490 } else {
3491 desc.callback = ipa_tx_client_rx_pkt_status;
3492 }
3493
3494 IPADBG("calling ipa_send_one()\n");
3495 if (ipa_send_one(sys, &desc, true)) {
3496 IPAERR("fail to send skb\n");
3497 sys->ep->wstats.rx_pkt_leak += (cnt-1);
3498 sys->ep->wstats.rx_dp_fail++;
3499 goto fail_send;
3500 }
3501
3502 if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
3503 atomic_dec(&sys->ep->avail_fifo_desc);
3504
3505 sys->ep->wstats.rx_pkts_rcvd++;
3506 IPADBG("ep=%d fifo desc=%d\n",
3507 ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
3508 }
3509
3510 sys->ep->wstats.rx_hd_processed++;
3511 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3512 return 0;
3513
3514fail_send:
3515 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3516 return -EFAULT;
3517
3518}
3519
3520void ipa2_free_skb(struct ipa_rx_data *data)
3521{
3522 struct ipa_rx_pkt_wrapper *rx_pkt;
3523
3524 if (unlikely(!ipa_ctx)) {
3525 IPAERR("IPA driver was not initialized\n");
3526 return;
3527 }
3528
3529 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3530
3531 ipa_ctx->wc_memb.total_tx_pkts_freed++;
3532 rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data);
3533
3534 ipa_skb_recycle(rx_pkt->data.skb);
3535 (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
3536
3537 list_add_tail(&rx_pkt->link,
3538 &ipa_ctx->wc_memb.wlan_comm_desc_list);
3539 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
3540
3541 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3542}
3543
3544
3545/* Functions added to support kernel tests */
3546
3547int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
3548 unsigned long *ipa_bam_hdl,
3549 u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
3550{
3551 struct ipa_ep_context *ep;
3552 int ipa_ep_idx;
3553 int result = -EINVAL;
3554
3555 if (sys_in == NULL || clnt_hdl == NULL) {
3556 IPAERR("NULL args\n");
3557 goto fail_gen;
3558 }
3559
3560 if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) {
3561 IPAERR("NULL args\n");
3562 goto fail_gen;
3563 }
3564 if (sys_in->client >= IPA_CLIENT_MAX) {
3565 IPAERR("bad parm client:%d\n", sys_in->client);
3566 goto fail_gen;
3567 }
3568
3569 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
3570 if (ipa_ep_idx == -1) {
3571 IPAERR("Invalid client :%d\n", sys_in->client);
3572 goto fail_gen;
3573 }
3574
3575 ep = &ipa_ctx->ep[ipa_ep_idx];
3576
3577 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
3578
3579 if (ep->valid == 1) {
3580 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
3581 IPAERR("EP %d already allocated\n", ipa_ep_idx);
3582 goto fail_and_disable_clocks;
3583 } else {
3584 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
3585 &sys_in->ipa_ep_cfg.hdr)) {
3586 IPAERR("fail to configure hdr prop of EP %d\n",
3587 ipa_ep_idx);
3588 result = -EFAULT;
3589 goto fail_and_disable_clocks;
3590 }
3591 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
3592 &sys_in->ipa_ep_cfg.cfg)) {
3593 IPAERR("fail to configure cfg prop of EP %d\n",
3594 ipa_ep_idx);
3595 result = -EFAULT;
3596 goto fail_and_disable_clocks;
3597 }
3598 IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
3599 sys_in->client, ipa_ep_idx, ep->sys);
3600 ep->client_notify = sys_in->notify;
3601 ep->priv = sys_in->priv;
3602 *clnt_hdl = ipa_ep_idx;
3603 if (!ep->keep_ipa_awake)
3604 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3605
3606 return 0;
3607 }
3608 }
3609
3610 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
3611
3612 ep->valid = 1;
3613 ep->client = sys_in->client;
3614 ep->client_notify = sys_in->notify;
3615 ep->priv = sys_in->priv;
3616 ep->keep_ipa_awake = true;
3617
3618 result = ipa_enable_data_path(ipa_ep_idx);
3619 if (result) {
3620 IPAERR("enable data path failed res=%d clnt=%d.\n",
3621 result, ipa_ep_idx);
3622 goto fail_gen2;
3623 }
3624
3625 if (!ep->skip_ep_cfg) {
3626 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
3627 IPAERR("fail to configure EP.\n");
3628 goto fail_gen2;
3629 }
3630 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
3631 IPAERR("fail to configure status of EP.\n");
3632 goto fail_gen2;
3633 }
3634 IPADBG("ep configuration successful\n");
3635 } else {
3636 IPADBG("skipping ep configuration\n");
3637 }
3638
3639 *clnt_hdl = ipa_ep_idx;
3640
3641 *ipa_pipe_num = ipa_ep_idx;
3642 *ipa_bam_hdl = ipa_ctx->bam_handle;
3643
3644 if (!ep->keep_ipa_awake)
3645 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3646
3647 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
3648 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
3649 ipa_ep_idx, ep->sys);
3650
3651 return 0;
3652
3653fail_gen2:
3654fail_and_disable_clocks:
3655 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3656fail_gen:
3657 return result;
3658}
3659
3660int ipa2_sys_teardown(u32 clnt_hdl)
3661{
3662 struct ipa_ep_context *ep;
3663
3664 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3665 ipa_ctx->ep[clnt_hdl].valid == 0) {
3666 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3667 return -EINVAL;
3668 }
3669
3670 ep = &ipa_ctx->ep[clnt_hdl];
3671
3672 if (!ep->keep_ipa_awake)
3673 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3674
3675 ipa_disable_data_path(clnt_hdl);
3676 ep->valid = 0;
3677
3678 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3679
3680 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
3681
3682 return 0;
3683}
3684
3685int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
3686 unsigned long gsi_ev_hdl)
3687{
3688 IPAERR("GSI not supported in IPAv2");
3689 return -EFAULT;
3690}
3691
3692
3693/**
3694 * ipa_adjust_ra_buff_base_sz()
3695 *
3696 * Return value: the largest power of two which is smaller
3697 * than the input value
3698 */
3699static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
3700{
3701 aggr_byte_limit += IPA_MTU;
3702 aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
3703 aggr_byte_limit--;
3704 aggr_byte_limit |= aggr_byte_limit >> 1;
3705 aggr_byte_limit |= aggr_byte_limit >> 2;
3706 aggr_byte_limit |= aggr_byte_limit >> 4;
3707 aggr_byte_limit |= aggr_byte_limit >> 8;
3708 aggr_byte_limit |= aggr_byte_limit >> 16;
3709 aggr_byte_limit++;
3710 return aggr_byte_limit >> 1;
3711}