blob: 964d6c85967e40e24121861f9b4ac491e94c3f53 [file] [log] [blame]
Utkarsh Saxena4badc042017-03-03 15:38:45 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include "ipa_i.h"
19#include "ipa_trace.h"
20
21#define IPA_LAST_DESC_CNT 0xFFFF
22#define POLLING_INACTIVITY_RX 40
23#define POLLING_INACTIVITY_TX 40
24#define POLLING_MIN_SLEEP_TX 400
25#define POLLING_MAX_SLEEP_TX 500
26/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
27#define IPA_MTU 1500
28#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
29#define IPA_GENERIC_AGGR_TIME_LIMIT 1
30#define IPA_GENERIC_AGGR_PKT_LIMIT 0
31
32#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
33#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
34 (X) + NET_SKB_PAD) +\
35 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
36#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
37 (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
38#define IPA_GENERIC_RX_BUFF_LIMIT (\
39 IPA_REAL_GENERIC_RX_BUFF_SZ(\
40 IPA_GENERIC_RX_BUFF_BASE_SZ) -\
41 IPA_GENERIC_RX_BUFF_BASE_SZ)
42
43#define IPA_RX_BUFF_CLIENT_HEADROOM 256
44
45/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
46#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
47
48#define IPA_WLAN_RX_POOL_SZ 100
49#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
50#define IPA_WLAN_RX_BUFF_SZ 2048
51#define IPA_WLAN_COMM_RX_POOL_LOW 100
52#define IPA_WLAN_COMM_RX_POOL_HIGH 900
53
54#define IPA_ODU_RX_BUFF_SZ 2048
55#define IPA_ODU_RX_POOL_SZ 32
56#define IPA_SIZE_DL_CSUM_META_TRAILER 8
57
58#define IPA_HEADROOM 128
59
60static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags);
61static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys);
62static void ipa_replenish_rx_cache(struct ipa_sys_context *sys);
63static void replenish_rx_work_func(struct work_struct *work);
64static void ipa_wq_handle_rx(struct work_struct *work);
65static void ipa_wq_handle_tx(struct work_struct *work);
66static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size);
67static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys,
68 u32 size);
69static int ipa_assign_policy(struct ipa_sys_connect_params *in,
70 struct ipa_sys_context *sys);
71static void ipa_cleanup_rx(struct ipa_sys_context *sys);
72static void ipa_wq_rx_avail(struct work_struct *work);
73static void ipa_alloc_wlan_rx_common_cache(u32 size);
74static void ipa_cleanup_wlan_rx_common_cache(void);
75static void ipa_wq_repl_rx(struct work_struct *work);
76static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
77 struct sps_iovec *iovec);
78
79static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
80static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
81
82static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
83{
84 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
85 int i;
86
87 for (i = 0; i < cnt; i++) {
88 spin_lock_bh(&sys->spinlock);
89 if (unlikely(list_empty(&sys->head_desc_list))) {
90 spin_unlock_bh(&sys->spinlock);
91 return;
92 }
93 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
94 struct ipa_tx_pkt_wrapper,
95 link);
96 list_del(&tx_pkt_expected->link);
97 sys->len--;
98 spin_unlock_bh(&sys->spinlock);
99 if (!tx_pkt_expected->no_unmap_dma) {
100 if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
101 dma_unmap_single(ipa_ctx->pdev,
102 tx_pkt_expected->mem.phys_base,
103 tx_pkt_expected->mem.size,
104 DMA_TO_DEVICE);
105 } else {
106 dma_unmap_page(ipa_ctx->pdev,
107 tx_pkt_expected->mem.phys_base,
108 tx_pkt_expected->mem.size,
109 DMA_TO_DEVICE);
110 }
111 }
112 if (tx_pkt_expected->callback)
113 tx_pkt_expected->callback(tx_pkt_expected->user1,
114 tx_pkt_expected->user2);
115 if (tx_pkt_expected->cnt > 1 &&
116 tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) {
117 if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) {
118 dma_pool_free(ipa_ctx->dma_pool,
119 tx_pkt_expected->mult.base,
120 tx_pkt_expected->mult.phys_base);
121 } else {
122 dma_unmap_single(ipa_ctx->pdev,
123 tx_pkt_expected->mult.phys_base,
124 tx_pkt_expected->mult.size,
125 DMA_TO_DEVICE);
126 kfree(tx_pkt_expected->mult.base);
127 }
128 }
129 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected);
130 }
131}
132
133static void ipa_wq_write_done_status(int src_pipe)
134{
135 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
136 struct ipa_sys_context *sys;
137 u32 cnt;
138
139 WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes);
140
141 if (!ipa_ctx->ep[src_pipe].status.status_en)
142 return;
143
144 sys = ipa_ctx->ep[src_pipe].sys;
145 if (!sys)
146 return;
147
148 spin_lock_bh(&sys->spinlock);
149 if (unlikely(list_empty(&sys->head_desc_list))) {
150 spin_unlock_bh(&sys->spinlock);
151 return;
152 }
153 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
154 struct ipa_tx_pkt_wrapper,
155 link);
156 cnt = tx_pkt_expected->cnt;
157 spin_unlock_bh(&sys->spinlock);
158 ipa_wq_write_done_common(sys, cnt);
159}
160
161/**
162 * ipa_write_done() - this function will be (eventually) called when a Tx
163 * operation is complete
164 * * @work: work_struct used by the work queue
165 *
166 * Will be called in deferred context.
167 * - invoke the callback supplied by the client who sent this command
168 * - iterate over all packets and validate that
169 * the order for sent packet is the same as expected
170 * - delete all the tx packet descriptors from the system
171 * pipe context (not needed anymore)
172 * - return the tx buffer back to dma_pool
173 */
174static void ipa_wq_write_done(struct work_struct *work)
175{
176 struct ipa_tx_pkt_wrapper *tx_pkt;
177 u32 cnt;
178 struct ipa_sys_context *sys;
179
180 tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
181 cnt = tx_pkt->cnt;
182 sys = tx_pkt->sys;
183
184 ipa_wq_write_done_common(sys, cnt);
185}
186
187static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
188 bool in_poll_state)
189{
190 struct sps_iovec iov;
191 int ret;
192 int cnt = 0;
193
194 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
195 !atomic_read(&sys->curr_polling_state))) {
196 if (cnt && !process_all)
197 break;
198 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
199 if (ret) {
200 IPAERR("sps_get_iovec failed %d\n", ret);
201 break;
202 }
203
204 if (iov.addr == 0)
205 break;
206
207 ipa_wq_write_done_common(sys, 1);
208 cnt++;
209 };
210
211 return cnt;
212}
213
214/**
215 * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
216 */
217static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
218{
219 int ret;
220
221 if (!atomic_read(&sys->curr_polling_state)) {
222 IPAERR("already in intr mode\n");
223 goto fail;
224 }
225
226 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
227 if (ret) {
228 IPAERR("sps_get_config() failed %d\n", ret);
229 goto fail;
230 }
231 sys->event.options = SPS_O_EOT;
232 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
233 if (ret) {
234 IPAERR("sps_register_event() failed %d\n", ret);
235 goto fail;
236 }
237 sys->ep->connect.options =
238 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
239 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
240 if (ret) {
241 IPAERR("sps_set_config() failed %d\n", ret);
242 goto fail;
243 }
244 atomic_set(&sys->curr_polling_state, 0);
245 ipa_handle_tx_core(sys, true, false);
246 return;
247
248fail:
249 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
250 msecs_to_jiffies(1));
251}
252
253static void ipa_handle_tx(struct ipa_sys_context *sys)
254{
255 int inactive_cycles = 0;
256 int cnt;
257
258 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
259 do {
260 cnt = ipa_handle_tx_core(sys, true, true);
261 if (cnt == 0) {
262 inactive_cycles++;
263 usleep_range(POLLING_MIN_SLEEP_TX,
264 POLLING_MAX_SLEEP_TX);
265 } else {
266 inactive_cycles = 0;
267 }
268 } while (inactive_cycles <= POLLING_INACTIVITY_TX);
269
270 ipa_tx_switch_to_intr_mode(sys);
271 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
272}
273
274static void ipa_wq_handle_tx(struct work_struct *work)
275{
276 struct ipa_sys_context *sys;
277
278 sys = container_of(work, struct ipa_sys_context, work);
279
280 ipa_handle_tx(sys);
281}
282
283/**
284 * ipa_send_one() - Send a single descriptor
285 * @sys: system pipe context
286 * @desc: descriptor to send
287 * @in_atomic: whether caller is in atomic context
288 *
289 * - Allocate tx_packet wrapper
290 * - transfer data to the IPA
291 * - after the transfer was done the SPS will
292 * notify the sending user via ipa_sps_irq_comp_tx()
293 *
294 * Return codes: 0: success, -EFAULT: failure
295 */
296int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
297 bool in_atomic)
298{
299 struct ipa_tx_pkt_wrapper *tx_pkt;
300 int result;
301 u16 sps_flags = SPS_IOVEC_FLAG_EOT;
302 dma_addr_t dma_address;
303 u16 len;
304 u32 mem_flag = GFP_ATOMIC;
305 struct sps_iovec iov;
306 int ret;
307
308 if (unlikely(!in_atomic))
309 mem_flag = GFP_KERNEL;
310
311 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
312 if (!tx_pkt) {
313 IPAERR("failed to alloc tx wrapper\n");
314 goto fail_mem_alloc;
315 }
316
317 if (!desc->dma_address_valid) {
318 dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
319 desc->len, DMA_TO_DEVICE);
320 } else {
321 dma_address = desc->dma_address;
322 tx_pkt->no_unmap_dma = true;
323 }
324 if (!dma_address) {
325 IPAERR("failed to DMA wrap\n");
326 goto fail_dma_map;
327 }
328
329 INIT_LIST_HEAD(&tx_pkt->link);
330 tx_pkt->type = desc->type;
331 tx_pkt->cnt = 1; /* only 1 desc in this "set" */
332
333 tx_pkt->mem.phys_base = dma_address;
334 tx_pkt->mem.base = desc->pyld;
335 tx_pkt->mem.size = desc->len;
336 tx_pkt->sys = sys;
337 tx_pkt->callback = desc->callback;
338 tx_pkt->user1 = desc->user1;
339 tx_pkt->user2 = desc->user2;
340
341 /*
342 * Special treatment for immediate commands, where the structure of the
343 * descriptor is different
344 */
345 if (desc->type == IPA_IMM_CMD_DESC) {
346 sps_flags |= SPS_IOVEC_FLAG_IMME;
347 len = desc->opcode;
348 IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
349 desc->opcode, desc->len, sps_flags);
350 IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
351 } else {
352 len = desc->len;
353 }
354
355 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
356
357 spin_lock_bh(&sys->spinlock);
358 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
359 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
360 do {
361 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
362 if (ret) {
363 IPADBG("sps_get_iovec failed %d\n", ret);
364 break;
365 }
366 if ((iov.addr == 0x0) && (iov.size == 0x0))
367 break;
368 } while (1);
369 }
370 result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
371 sps_flags);
372 if (result) {
373 IPAERR("sps_transfer_one failed rc=%d\n", result);
374 goto fail_sps_send;
375 }
376
377 spin_unlock_bh(&sys->spinlock);
378
379 return 0;
380
381fail_sps_send:
382 list_del(&tx_pkt->link);
383 spin_unlock_bh(&sys->spinlock);
384 dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
385fail_dma_map:
386 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
387fail_mem_alloc:
388 return -EFAULT;
389}
390
391/**
392 * ipa_send() - Send multiple descriptors in one HW transaction
393 * @sys: system pipe context
394 * @num_desc: number of packets
395 * @desc: packets to send (may be immediate command or data)
396 * @in_atomic: whether caller is in atomic context
397 *
398 * This function is used for system-to-bam connection.
399 * - SPS driver expect struct sps_transfer which will contain all the data
400 * for a transaction
401 * - ipa_tx_pkt_wrapper will be used for each ipa
402 * descriptor (allocated from wrappers cache)
403 * - The wrapper struct will be configured for each ipa-desc payload and will
404 * contain information which will be later used by the user callbacks
405 * - each transfer will be made by calling to sps_transfer()
406 * - Each packet (command or data) that will be sent will also be saved in
407 * ipa_sys_context for later check that all data was sent
408 *
409 * Return codes: 0: success, -EFAULT: failure
410 */
411int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
412 bool in_atomic)
413{
414 struct ipa_tx_pkt_wrapper *tx_pkt;
415 struct ipa_tx_pkt_wrapper *next_pkt;
416 struct sps_transfer transfer = { 0 };
417 struct sps_iovec *iovec;
418 dma_addr_t dma_addr;
419 int i = 0;
420 int j;
421 int result;
422 int fail_dma_wrap = 0;
423 uint size = num_desc * sizeof(struct sps_iovec);
424 u32 mem_flag = GFP_ATOMIC;
425 struct sps_iovec iov;
426 int ret;
427
428 if (unlikely(!in_atomic))
429 mem_flag = GFP_KERNEL;
430
431 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
432 transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
433 &dma_addr);
434 if (!transfer.iovec) {
435 IPAERR("fail to alloc dma mem for sps xfr buff\n");
436 return -EFAULT;
437 }
438 } else {
439 transfer.iovec = kmalloc(size, mem_flag);
440 if (!transfer.iovec) {
441 IPAERR("fail to alloc mem for sps xfr buff ");
442 IPAERR("num_desc = %d size = %d\n", num_desc, size);
443 return -EFAULT;
444 }
445 dma_addr = dma_map_single(ipa_ctx->pdev,
446 transfer.iovec, size, DMA_TO_DEVICE);
447 if (!dma_addr) {
448 IPAERR("dma_map_single failed for sps xfr buff\n");
449 kfree(transfer.iovec);
450 return -EFAULT;
451 }
452 }
453
454 transfer.iovec_phys = dma_addr;
455 transfer.iovec_count = num_desc;
456 spin_lock_bh(&sys->spinlock);
457
458 for (i = 0; i < num_desc; i++) {
459 fail_dma_wrap = 0;
460 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
461 mem_flag);
462 if (!tx_pkt) {
463 IPAERR("failed to alloc tx wrapper\n");
464 goto failure;
465 }
466 /*
467 * first desc of set is "special" as it holds the count and
468 * other info
469 */
470 if (i == 0) {
471 transfer.user = tx_pkt;
472 tx_pkt->mult.phys_base = dma_addr;
473 tx_pkt->mult.base = transfer.iovec;
474 tx_pkt->mult.size = size;
475 tx_pkt->cnt = num_desc;
476 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
477 }
478
479 iovec = &transfer.iovec[i];
480 iovec->flags = 0;
481
482 INIT_LIST_HEAD(&tx_pkt->link);
483 tx_pkt->type = desc[i].type;
484
485 if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
486 tx_pkt->mem.base = desc[i].pyld;
487 tx_pkt->mem.size = desc[i].len;
488
489 if (!desc[i].dma_address_valid) {
490 tx_pkt->mem.phys_base =
491 dma_map_single(ipa_ctx->pdev,
492 tx_pkt->mem.base,
493 tx_pkt->mem.size,
494 DMA_TO_DEVICE);
495 } else {
496 tx_pkt->mem.phys_base = desc[i].dma_address;
497 tx_pkt->no_unmap_dma = true;
498 }
499 } else {
500 tx_pkt->mem.base = desc[i].frag;
501 tx_pkt->mem.size = desc[i].len;
502
503 if (!desc[i].dma_address_valid) {
504 tx_pkt->mem.phys_base =
505 skb_frag_dma_map(ipa_ctx->pdev,
506 desc[i].frag,
507 0, tx_pkt->mem.size,
508 DMA_TO_DEVICE);
509 } else {
510 tx_pkt->mem.phys_base = desc[i].dma_address;
511 tx_pkt->no_unmap_dma = true;
512 }
513 }
514
515 if (!tx_pkt->mem.phys_base) {
516 IPAERR("failed to alloc tx wrapper\n");
517 fail_dma_wrap = 1;
518 goto failure;
519 }
520
521 tx_pkt->sys = sys;
522 tx_pkt->callback = desc[i].callback;
523 tx_pkt->user1 = desc[i].user1;
524 tx_pkt->user2 = desc[i].user2;
525
526 /*
527 * Point the iovec to the buffer and
528 * add this packet to system pipe context.
529 */
530 iovec->addr = tx_pkt->mem.phys_base;
531 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
532
533 /*
534 * Special treatment for immediate commands, where the structure
535 * of the descriptor is different
536 */
537 if (desc[i].type == IPA_IMM_CMD_DESC) {
538 iovec->size = desc[i].opcode;
539 iovec->flags |= SPS_IOVEC_FLAG_IMME;
540 IPA_DUMP_BUFF(desc[i].pyld,
541 tx_pkt->mem.phys_base, desc[i].len);
542 } else {
543 iovec->size = desc[i].len;
544 }
545
546 if (i == (num_desc - 1)) {
547 iovec->flags |= SPS_IOVEC_FLAG_EOT;
548 /* "mark" the last desc */
549 tx_pkt->cnt = IPA_LAST_DESC_CNT;
550 }
551 }
552
553 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
554 do {
555 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
556 if (ret) {
557 IPADBG("sps_get_iovec failed %d\n", ret);
558 break;
559 }
560 if ((iov.addr == 0x0) && (iov.size == 0x0))
561 break;
562 } while (1);
563 }
564 result = sps_transfer(sys->ep->ep_hdl, &transfer);
565 if (result) {
566 IPAERR("sps_transfer failed rc=%d\n", result);
567 goto failure;
568 }
569
570 spin_unlock_bh(&sys->spinlock);
571 return 0;
572
573failure:
574 tx_pkt = transfer.user;
575 for (j = 0; j < i; j++) {
576 next_pkt = list_next_entry(tx_pkt, link);
577 list_del(&tx_pkt->link);
578 if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
579 dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
580 tx_pkt->mem.size,
581 DMA_TO_DEVICE);
582 } else {
583 dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
584 tx_pkt->mem.size,
585 DMA_TO_DEVICE);
586 }
587 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
588 tx_pkt = next_pkt;
589 }
590 if (j < num_desc)
591 /* last desc failed */
592 if (fail_dma_wrap)
593 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
594 if (transfer.iovec_phys) {
595 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
596 dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
597 transfer.iovec_phys);
598 } else {
599 dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys,
600 size, DMA_TO_DEVICE);
601 kfree(transfer.iovec);
602 }
603 }
604 spin_unlock_bh(&sys->spinlock);
605 return -EFAULT;
606}
607
608/**
609 * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver
610 * after an immediate command is complete.
611 * @user1: pointer to the descriptor of the transfer
612 * @user2:
613 *
614 * Complete the immediate commands completion object, this will release the
615 * thread which waits on this completion object (ipa_send_cmd())
616 */
617static void ipa_sps_irq_cmd_ack(void *user1, int user2)
618{
619 struct ipa_desc *desc = (struct ipa_desc *)user1;
620
621 if (!desc) {
622 IPAERR("desc is NULL\n");
623 WARN_ON(1);
624 return;
625 }
626 IPADBG("got ack for cmd=%d\n", desc->opcode);
627 complete(&desc->xfer_done);
628}
629
630/**
631 * ipa_send_cmd - send immediate commands
632 * @num_desc: number of descriptors within the desc struct
633 * @descr: descriptor structure
634 *
635 * Function will block till command gets ACK from IPA HW, caller needs
636 * to free any resources it allocated after function returns
637 * The callback in ipa_desc should not be set by the caller
638 * for this function.
639 */
640int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
641{
642 struct ipa_desc *desc;
643 int result = 0;
644 struct ipa_sys_context *sys;
645 int ep_idx;
646
647 IPADBG("sending command\n");
648
649 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
650 if (-1 == ep_idx) {
651 IPAERR("Client %u is not mapped\n",
652 IPA_CLIENT_APPS_CMD_PROD);
653 return -EFAULT;
654 }
655 sys = ipa_ctx->ep[ep_idx].sys;
656
657 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
658
659 if (num_desc == 1) {
660 init_completion(&descr->xfer_done);
661
662 if (descr->callback || descr->user1)
663 WARN_ON(1);
664
665 descr->callback = ipa_sps_irq_cmd_ack;
666 descr->user1 = descr;
667 if (ipa_send_one(sys, descr, true)) {
668 IPAERR("fail to send immediate command\n");
669 result = -EFAULT;
670 goto bail;
671 }
672 wait_for_completion(&descr->xfer_done);
673 } else {
674 desc = &descr[num_desc - 1];
675 init_completion(&desc->xfer_done);
676
677 if (desc->callback || desc->user1)
678 WARN_ON(1);
679
680 desc->callback = ipa_sps_irq_cmd_ack;
681 desc->user1 = desc;
682 if (ipa_send(sys, num_desc, descr, true)) {
683 IPAERR("fail to send multiple immediate command set\n");
684 result = -EFAULT;
685 goto bail;
686 }
687 wait_for_completion(&desc->xfer_done);
688 }
689
690bail:
691 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
692 return result;
693}
694
695/**
696 * ipa_sps_irq_tx_notify() - Callback function which will be called by
697 * the SPS driver to start a Tx poll operation.
698 * Called in an interrupt context.
699 * @notify: SPS driver supplied notification struct
700 *
701 * This function defer the work for this event to the tx workqueue.
702 */
703static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
704{
705 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
706 int ret;
707
708 IPADBG("event %d notified\n", notify->event_id);
709
710 switch (notify->event_id) {
711 case SPS_EVENT_EOT:
712 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
713 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
714 if (!atomic_read(&sys->curr_polling_state)) {
715 ret = sps_get_config(sys->ep->ep_hdl,
716 &sys->ep->connect);
717 if (ret) {
718 IPAERR("sps_get_config() failed %d\n", ret);
719 break;
720 }
721 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
722 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
723 ret = sps_set_config(sys->ep->ep_hdl,
724 &sys->ep->connect);
725 if (ret) {
726 IPAERR("sps_set_config() failed %d\n", ret);
727 break;
728 }
729 atomic_set(&sys->curr_polling_state, 1);
730 queue_work(sys->wq, &sys->work);
731 }
732 break;
733 default:
734 IPAERR("received unexpected event id %d\n", notify->event_id);
735 }
736}
737
738/**
739 * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
740 * the SPS driver after a Tx operation is complete.
741 * Called in an interrupt context.
742 * @notify: SPS driver supplied notification struct
743 *
744 * This function defer the work for this event to the tx workqueue.
745 * This event will be later handled by ipa_write_done.
746 */
747static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
748{
749 struct ipa_tx_pkt_wrapper *tx_pkt;
750
751 IPADBG("event %d notified\n", notify->event_id);
752
753 switch (notify->event_id) {
754 case SPS_EVENT_EOT:
755 tx_pkt = notify->data.transfer.user;
756 if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
757 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
758 queue_work(tx_pkt->sys->wq, &tx_pkt->work);
759 break;
760 default:
761 IPAERR("received unexpected event id %d\n", notify->event_id);
762 }
763}
764
765/**
766 * ipa_poll_pkt() - Poll packet from SPS BAM
767 * return 0 to caller on poll successfully
768 * else -EIO
769 *
770 */
771static int ipa_poll_pkt(struct ipa_sys_context *sys,
772 struct sps_iovec *iov)
773{
774 int ret;
775
776 ret = sps_get_iovec(sys->ep->ep_hdl, iov);
777 if (ret) {
778 IPAERR("sps_get_iovec failed %d\n", ret);
779 return ret;
780 }
781
782 if (iov->addr == 0)
783 return -EIO;
784
785 return 0;
786}
787
788/**
789 * ipa_handle_rx_core() - The core functionality of packet reception. This
790 * function is read from multiple code paths.
791 *
792 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
793 * endpoint. The function runs as long as there are packets in the pipe.
794 * For each packet:
795 * - Disconnect the packet from the system pipe linked list
796 * - Unmap the packets skb, make it non DMAable
797 * - Free the packet from the cache
798 * - Prepare a proper skb
799 * - Call the endpoints notify function, passing the skb in the parameters
800 * - Replenish the rx cache
801 */
802static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
803 bool in_poll_state)
804{
805 struct sps_iovec iov;
806 int ret;
807 int cnt = 0;
808
809 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
810 !atomic_read(&sys->curr_polling_state))) {
811 if (cnt && !process_all)
812 break;
813
814 ret = ipa_poll_pkt(sys, &iov);
815 if (ret)
816 break;
817
818 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
819 ipa_dma_memcpy_notify(sys, &iov);
820 else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
821 ipa_wlan_wq_rx_common(sys, iov.size);
822 else
823 ipa_wq_rx_common(sys, iov.size);
824
825 cnt++;
826 };
827
828 return cnt;
829}
830
831/**
832 * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
833 */
834static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
835{
836 int ret;
837
838 if (!sys->ep || !sys->ep->valid) {
839 IPAERR("EP Not Valid, no need to cleanup.\n");
840 return;
841 }
842
843 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
844 if (ret) {
845 IPAERR("sps_get_config() failed %d\n", ret);
846 goto fail;
847 }
848
849 if (!atomic_read(&sys->curr_polling_state) &&
850 ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
851 IPADBG("already in intr mode\n");
852 return;
853 }
854
855 if (!atomic_read(&sys->curr_polling_state)) {
856 IPAERR("already in intr mode\n");
857 goto fail;
858 }
859
860 sys->event.options = SPS_O_EOT;
861 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
862 if (ret) {
863 IPAERR("sps_register_event() failed %d\n", ret);
864 goto fail;
865 }
866 sys->ep->connect.options =
867 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
868 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
869 if (ret) {
870 IPAERR("sps_set_config() failed %d\n", ret);
871 goto fail;
872 }
873 atomic_set(&sys->curr_polling_state, 0);
874 if (!sys->ep->napi_enabled)
875 ipa_handle_rx_core(sys, true, false);
876 ipa_dec_release_wakelock(sys->ep->wakelock_client);
877 return;
878
879fail:
880 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
881 msecs_to_jiffies(1));
882}
883
884
885/**
886 * ipa_sps_irq_control() - Function to enable or disable BAM IRQ.
887 */
888static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
889{
890 int ret;
891
892 /*
893 * Do not change sps config in case we are in polling mode as this
894 * indicates that sps driver already notified EOT event and sps config
895 * should not change until ipa driver processes the packet.
896 */
897 if (atomic_read(&sys->curr_polling_state)) {
898 IPADBG("in polling mode, do not change config\n");
899 return;
900 }
901
902 if (enable) {
903 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
904 if (ret) {
905 IPAERR("sps_get_config() failed %d\n", ret);
906 return;
907 }
908 sys->event.options = SPS_O_EOT;
909 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
910 if (ret) {
911 IPAERR("sps_register_event() failed %d\n", ret);
912 return;
913 }
914 sys->ep->connect.options =
915 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
916 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
917 if (ret) {
918 IPAERR("sps_set_config() failed %d\n", ret);
919 return;
920 }
921 } else {
922 ret = sps_get_config(sys->ep->ep_hdl,
923 &sys->ep->connect);
924 if (ret) {
925 IPAERR("sps_get_config() failed %d\n", ret);
926 return;
927 }
928 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
929 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
930 ret = sps_set_config(sys->ep->ep_hdl,
931 &sys->ep->connect);
932 if (ret) {
933 IPAERR("sps_set_config() failed %d\n", ret);
934 return;
935 }
936 }
937}
938
939void ipa_sps_irq_control_all(bool enable)
940{
941 struct ipa_ep_context *ep;
942 int ipa_ep_idx, client_num;
943
944 IPADBG("\n");
945
946 for (client_num = IPA_CLIENT_CONS;
947 client_num < IPA_CLIENT_MAX; client_num++) {
948 if (!IPA_CLIENT_IS_APPS_CONS(client_num))
949 continue;
950
951 ipa_ep_idx = ipa_get_ep_mapping(client_num);
952 if (ipa_ep_idx == -1) {
953 IPAERR("Invalid client.\n");
954 continue;
955 }
956 ep = &ipa_ctx->ep[ipa_ep_idx];
957 if (!ep->valid) {
958 IPAERR("EP (%d) not allocated.\n", ipa_ep_idx);
959 continue;
960 }
961 ipa_sps_irq_control(ep->sys, enable);
962 }
963}
964
965/**
966 * ipa_rx_notify() - Callback function which is called by the SPS driver when a
967 * a packet is received
968 * @notify: SPS driver supplied notification information
969 *
970 * Called in an interrupt context, therefore the majority of the work is
971 * deffered using a work queue.
972 *
973 * After receiving a packet, the driver goes to polling mode and keeps pulling
974 * packets until the rx buffer is empty, then it goes back to interrupt mode.
975 * This comes to prevent the CPU from handling too many interrupts when the
976 * throughput is high.
977 */
978static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
979{
980 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
981 int ret;
982
983 IPADBG("event %d notified\n", notify->event_id);
984
985 switch (notify->event_id) {
986 case SPS_EVENT_EOT:
987 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
988 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
989
990 if (atomic_read(&sys->curr_polling_state)) {
991 sys->ep->eot_in_poll_err++;
992 break;
993 }
994
995 ret = sps_get_config(sys->ep->ep_hdl,
996 &sys->ep->connect);
997 if (ret) {
998 IPAERR("sps_get_config() failed %d\n", ret);
999 break;
1000 }
1001 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
1002 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1003 ret = sps_set_config(sys->ep->ep_hdl,
1004 &sys->ep->connect);
1005 if (ret) {
1006 IPAERR("sps_set_config() failed %d\n", ret);
1007 break;
1008 }
1009 ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
1010 atomic_set(&sys->curr_polling_state, 1);
1011 trace_intr_to_poll(sys->ep->client);
1012 queue_work(sys->wq, &sys->work);
1013 break;
1014 default:
1015 IPAERR("received unexpected event id %d\n", notify->event_id);
1016 }
1017}
1018
1019static void switch_to_intr_tx_work_func(struct work_struct *work)
1020{
1021 struct delayed_work *dwork;
1022 struct ipa_sys_context *sys;
1023
1024 dwork = container_of(work, struct delayed_work, work);
1025 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1026 ipa_handle_tx(sys);
1027}
1028
1029/**
1030 * ipa_handle_rx() - handle packet reception. This function is executed in the
1031 * context of a work queue.
1032 * @work: work struct needed by the work queue
1033 *
1034 * ipa_handle_rx_core() is run in polling mode. After all packets has been
1035 * received, the driver switches back to interrupt mode.
1036 */
1037static void ipa_handle_rx(struct ipa_sys_context *sys)
1038{
1039 int inactive_cycles = 0;
1040 int cnt;
1041
1042 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1043 do {
1044 cnt = ipa_handle_rx_core(sys, true, true);
1045 if (cnt == 0) {
1046 inactive_cycles++;
1047 trace_idle_sleep_enter(sys->ep->client);
1048 usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
1049 ipa_ctx->ipa_rx_max_timeout_usec);
1050 trace_idle_sleep_exit(sys->ep->client);
1051 } else {
1052 inactive_cycles = 0;
1053 }
1054
1055 /* if pipe is out of buffers there is no point polling for
1056 * completed descs; release the worker so delayed work can
1057 * run in a timely manner
1058 */
1059 if (sys->len == 0)
1060 break;
1061
1062 } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
1063
1064 trace_poll_to_intr(sys->ep->client);
1065 ipa_rx_switch_to_intr_mode(sys);
1066 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1067}
1068
1069/**
1070 * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
1071 * function is exectued in the softirq context
1072 *
1073 * if input budget is zero, the driver switches back to
1074 * interrupt mode
1075 *
1076 * return number of polled packets, on error 0(zero)
1077 */
1078int ipa2_rx_poll(u32 clnt_hdl, int weight)
1079{
1080 struct ipa_ep_context *ep;
1081 int ret;
1082 int cnt = 0;
1083 unsigned int delay = 1;
1084 struct sps_iovec iov;
1085
1086 IPADBG("\n");
1087 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1088 ipa_ctx->ep[clnt_hdl].valid == 0) {
1089 IPAERR("bad parm 0x%x\n", clnt_hdl);
1090 return cnt;
1091 }
1092
1093 ep = &ipa_ctx->ep[clnt_hdl];
1094 while (cnt < weight &&
1095 atomic_read(&ep->sys->curr_polling_state)) {
1096
1097 ret = ipa_poll_pkt(ep->sys, &iov);
1098 if (ret)
1099 break;
1100
1101 ipa_wq_rx_common(ep->sys, iov.size);
1102 cnt += 5;
1103 };
1104
1105 if (cnt == 0) {
1106 ep->inactive_cycles++;
1107 ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
1108
1109 if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
1110 ep->switch_to_intr = true;
1111 delay = 0;
1112 }
1113 queue_delayed_work(ep->sys->wq,
1114 &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
1115 } else
1116 ep->inactive_cycles = 0;
1117
1118 return cnt;
1119}
1120
1121static void switch_to_intr_rx_work_func(struct work_struct *work)
1122{
1123 struct delayed_work *dwork;
1124 struct ipa_sys_context *sys;
1125
1126 dwork = container_of(work, struct delayed_work, work);
1127 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1128
1129 if (sys->ep->napi_enabled) {
1130 if (sys->ep->switch_to_intr) {
1131 ipa_rx_switch_to_intr_mode(sys);
1132 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
1133 sys->ep->switch_to_intr = false;
1134 sys->ep->inactive_cycles = 0;
1135 } else
1136 sys->ep->client_notify(sys->ep->priv,
1137 IPA_CLIENT_START_POLL, 0);
1138 } else
1139 ipa_handle_rx(sys);
1140}
1141
1142/**
1143 * ipa_update_repl_threshold()- Update the repl_threshold for the client.
1144 *
1145 * Return value: None.
1146 */
1147void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
1148{
1149 int ep_idx;
1150 struct ipa_ep_context *ep;
1151
1152 /* Check if ep is valid. */
1153 ep_idx = ipa2_get_ep_mapping(ipa_client);
1154 if (ep_idx == -1) {
1155 IPADBG("Invalid IPA client\n");
1156 return;
1157 }
1158
1159 ep = &ipa_ctx->ep[ep_idx];
1160 if (!ep->valid) {
1161 IPADBG("EP not valid/Not applicable for client.\n");
1162 return;
1163 }
1164 /*
1165 * Determine how many buffers/descriptors remaining will
1166 * cause to drop below the yellow WM bar.
1167 */
Skylar Chang50b21692016-11-01 16:48:30 -07001168 if (ep->sys->rx_buff_sz)
1169 ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
1170 / ep->sys->rx_buff_sz;
1171 else
1172 ep->rx_replenish_threshold = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001173}
1174
1175/**
1176 * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
1177 * IPA EP configuration
1178 * @sys_in: [in] input needed to setup BAM pipe and configure EP
1179 * @clnt_hdl: [out] client handle
1180 *
1181 * - configure the end-point registers with the supplied
1182 * parameters from the user.
1183 * - call SPS APIs to create a system-to-bam connection with IPA.
1184 * - allocate descriptor FIFO
1185 * - register callback function(ipa_sps_irq_rx_notify or
1186 * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
1187 * not configured to pulling mode
1188 *
1189 * Returns: 0 on success, negative on failure
1190 */
1191int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
1192{
1193 struct ipa_ep_context *ep;
1194 int ipa_ep_idx;
1195 int result = -EINVAL;
1196 dma_addr_t dma_addr;
1197 char buff[IPA_RESOURCE_NAME_MAX];
1198 struct iommu_domain *smmu_domain;
1199
1200 if (unlikely(!ipa_ctx)) {
1201 IPAERR("IPA driver was not initialized\n");
1202 return -EINVAL;
1203 }
1204
1205 if (sys_in == NULL || clnt_hdl == NULL) {
1206 IPAERR("NULL args\n");
1207 goto fail_gen;
1208 }
1209
1210 if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
1211 IPAERR("bad parm client:%d fifo_sz:%d\n",
1212 sys_in->client, sys_in->desc_fifo_sz);
1213 goto fail_gen;
1214 }
1215
1216 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
1217 if (ipa_ep_idx == -1) {
1218 IPAERR("Invalid client.\n");
1219 goto fail_gen;
1220 }
1221
1222 ep = &ipa_ctx->ep[ipa_ep_idx];
1223
1224 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
1225
1226 if (ep->valid == 1) {
1227 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
1228 IPAERR("EP already allocated.\n");
1229 goto fail_and_disable_clocks;
1230 } else {
1231 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
1232 &sys_in->ipa_ep_cfg.hdr)) {
1233 IPAERR("fail to configure hdr prop of EP.\n");
1234 result = -EFAULT;
1235 goto fail_and_disable_clocks;
1236 }
1237 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
1238 &sys_in->ipa_ep_cfg.cfg)) {
1239 IPAERR("fail to configure cfg prop of EP.\n");
1240 result = -EFAULT;
1241 goto fail_and_disable_clocks;
1242 }
1243 IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
1244 sys_in->client, ipa_ep_idx, ep->sys);
1245 ep->client_notify = sys_in->notify;
1246 ep->priv = sys_in->priv;
1247 *clnt_hdl = ipa_ep_idx;
1248 if (!ep->keep_ipa_awake)
1249 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1250
1251 return 0;
1252 }
1253 }
1254
1255 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
1256
1257 if (!ep->sys) {
1258 ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL);
1259 if (!ep->sys) {
1260 IPAERR("failed to sys ctx for client %d\n",
1261 sys_in->client);
1262 result = -ENOMEM;
1263 goto fail_and_disable_clocks;
1264 }
1265
1266 ep->sys->ep = ep;
1267 snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
1268 sys_in->client);
1269 ep->sys->wq = alloc_workqueue(buff,
1270 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1271 if (!ep->sys->wq) {
1272 IPAERR("failed to create wq for client %d\n",
1273 sys_in->client);
1274 result = -EFAULT;
1275 goto fail_wq;
1276 }
1277
1278 snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
1279 sys_in->client);
1280 ep->sys->repl_wq = alloc_workqueue(buff,
1281 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1282 if (!ep->sys->repl_wq) {
1283 IPAERR("failed to create rep wq for client %d\n",
1284 sys_in->client);
1285 result = -EFAULT;
1286 goto fail_wq2;
1287 }
1288
1289 INIT_LIST_HEAD(&ep->sys->head_desc_list);
1290 INIT_LIST_HEAD(&ep->sys->rcycl_list);
1291 spin_lock_init(&ep->sys->spinlock);
1292 } else {
1293 memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
1294 }
1295
1296 ep->skip_ep_cfg = sys_in->skip_ep_cfg;
1297 if (ipa_assign_policy(sys_in, ep->sys)) {
1298 IPAERR("failed to sys ctx for client %d\n", sys_in->client);
1299 result = -ENOMEM;
1300 goto fail_gen2;
1301 }
1302
1303 ep->valid = 1;
1304 ep->client = sys_in->client;
1305 ep->client_notify = sys_in->notify;
1306 ep->napi_enabled = sys_in->napi_enabled;
1307 ep->priv = sys_in->priv;
1308 ep->keep_ipa_awake = sys_in->keep_ipa_awake;
1309 atomic_set(&ep->avail_fifo_desc,
1310 ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
1311
1312 if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
1313 ep->sys->status_stat == NULL) {
1314 ep->sys->status_stat =
1315 kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL);
1316 if (!ep->sys->status_stat) {
1317 IPAERR("no memory\n");
1318 goto fail_gen2;
1319 }
1320 }
1321
1322 result = ipa_enable_data_path(ipa_ep_idx);
1323 if (result) {
1324 IPAERR("enable data path failed res=%d clnt=%d.\n", result,
1325 ipa_ep_idx);
1326 goto fail_gen2;
1327 }
1328
1329 if (!ep->skip_ep_cfg) {
1330 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
1331 IPAERR("fail to configure EP.\n");
1332 goto fail_gen2;
1333 }
1334 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
1335 IPAERR("fail to configure status of EP.\n");
1336 goto fail_gen2;
1337 }
1338 IPADBG("ep configuration successful\n");
1339 } else {
1340 IPADBG("skipping ep configuration\n");
1341 }
1342
1343 /* Default Config */
1344 ep->ep_hdl = sps_alloc_endpoint();
1345 if (ep->ep_hdl == NULL) {
1346 IPAERR("SPS EP allocation failed.\n");
1347 goto fail_gen2;
1348 }
1349
1350 result = sps_get_config(ep->ep_hdl, &ep->connect);
1351 if (result) {
1352 IPAERR("fail to get config.\n");
1353 goto fail_sps_cfg;
1354 }
1355
1356 /* Specific Config */
1357 if (IPA_CLIENT_IS_CONS(sys_in->client)) {
1358 ep->connect.mode = SPS_MODE_SRC;
1359 ep->connect.destination = SPS_DEV_HANDLE_MEM;
1360 ep->connect.source = ipa_ctx->bam_handle;
1361 ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++;
1362 ep->connect.src_pipe_index = ipa_ep_idx;
1363 /*
1364 * Determine how many buffers/descriptors remaining will
1365 * cause to drop below the yellow WM bar.
1366 */
Skylar Chang50b21692016-11-01 16:48:30 -07001367 if (ep->sys->rx_buff_sz)
1368 ep->rx_replenish_threshold =
1369 ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz;
1370 else
1371 ep->rx_replenish_threshold = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001372 /* Only when the WAN pipes are setup, actual threshold will
1373 * be read from the register. So update LAN_CONS ep again with
1374 * right value.
1375 */
1376 if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS)
1377 ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS);
1378 } else {
1379 ep->connect.mode = SPS_MODE_DEST;
1380 ep->connect.source = SPS_DEV_HANDLE_MEM;
1381 ep->connect.destination = ipa_ctx->bam_handle;
1382 ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++;
1383 ep->connect.dest_pipe_index = ipa_ep_idx;
1384 }
1385
1386 IPADBG("client:%d ep:%d",
1387 sys_in->client, ipa_ep_idx);
1388
1389 IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
1390 ep->connect.dest_pipe_index,
1391 ep->connect.src_pipe_index);
1392
1393 ep->connect.options = ep->sys->sps_option;
1394 ep->connect.desc.size = sys_in->desc_fifo_sz;
1395 ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
1396 ep->connect.desc.size, &dma_addr, GFP_KERNEL);
1397 if (ipa_ctx->smmu_s1_bypass) {
1398 ep->connect.desc.phys_base = dma_addr;
1399 } else {
1400 ep->connect.desc.iova = dma_addr;
1401 smmu_domain = ipa2_get_smmu_domain();
1402 if (smmu_domain != NULL) {
1403 ep->connect.desc.phys_base =
1404 iommu_iova_to_phys(smmu_domain, dma_addr);
1405 }
1406 }
1407 if (ep->connect.desc.base == NULL) {
1408 IPAERR("fail to get DMA desc memory.\n");
1409 goto fail_sps_cfg;
1410 }
1411
1412 ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
1413
1414 result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client);
1415 if (result) {
1416 IPAERR("sps_connect fails.\n");
1417 goto fail_sps_connect;
1418 }
1419
1420 ep->sys->event.options = SPS_O_EOT;
1421 ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
1422 ep->sys->event.xfer_done = NULL;
1423 ep->sys->event.user = ep->sys;
1424 ep->sys->event.callback = ep->sys->sps_callback;
1425 result = sps_register_event(ep->ep_hdl, &ep->sys->event);
1426 if (result < 0) {
1427 IPAERR("register event error %d\n", result);
1428 goto fail_register_event;
1429 }
1430
1431 *clnt_hdl = ipa_ep_idx;
1432
1433 if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
1434 ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
1435 ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
1436 sizeof(void *), GFP_KERNEL);
1437 if (!ep->sys->repl.cache) {
1438 IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
1439 ep->sys->repl_hdlr = ipa_replenish_rx_cache;
1440 ep->sys->repl.capacity = 0;
1441 } else {
1442 atomic_set(&ep->sys->repl.head_idx, 0);
1443 atomic_set(&ep->sys->repl.tail_idx, 0);
1444 ipa_wq_repl_rx(&ep->sys->repl_work);
1445 }
1446 }
1447
1448 if (IPA_CLIENT_IS_CONS(sys_in->client))
1449 ipa_replenish_rx_cache(ep->sys);
1450
1451 if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
1452 ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
1453 atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
1454 }
1455
1456 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
1457 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
1458 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1459 sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1460 IPADBG("modem cfg emb pipe flt\n");
1461 else
1462 ipa_install_dflt_flt_rules(ipa_ep_idx);
1463 }
1464
1465 if (!ep->keep_ipa_awake)
1466 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1467
1468 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
1469 ipa_ep_idx, ep->sys);
1470
1471 return 0;
1472
1473fail_register_event:
1474 sps_disconnect(ep->ep_hdl);
1475fail_sps_connect:
1476 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1477 ep->connect.desc.base,
1478 ep->connect.desc.phys_base);
1479fail_sps_cfg:
1480 sps_free_endpoint(ep->ep_hdl);
1481fail_gen2:
1482 destroy_workqueue(ep->sys->repl_wq);
1483fail_wq2:
1484 destroy_workqueue(ep->sys->wq);
1485fail_wq:
1486 kfree(ep->sys);
1487 memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
1488fail_and_disable_clocks:
1489 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1490fail_gen:
1491 return result;
1492}
1493
1494/**
1495 * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
1496 * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe
1497 *
1498 * Returns: 0 on success, negative on failure
1499 */
1500int ipa2_teardown_sys_pipe(u32 clnt_hdl)
1501{
1502 struct ipa_ep_context *ep;
1503 int empty;
1504
1505 if (unlikely(!ipa_ctx)) {
1506 IPAERR("IPA driver was not initialized\n");
1507 return -EINVAL;
1508 }
1509
1510 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1511 ipa_ctx->ep[clnt_hdl].valid == 0) {
1512 IPAERR("bad parm.\n");
1513 return -EINVAL;
1514 }
1515
1516 ep = &ipa_ctx->ep[clnt_hdl];
1517
1518 if (!ep->keep_ipa_awake)
1519 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
1520
1521 ipa_disable_data_path(clnt_hdl);
1522 if (ep->napi_enabled) {
1523 ep->switch_to_intr = true;
1524 do {
1525 usleep_range(95, 105);
1526 } while (atomic_read(&ep->sys->curr_polling_state));
1527 }
1528
1529 if (IPA_CLIENT_IS_PROD(ep->client)) {
1530 do {
1531 spin_lock_bh(&ep->sys->spinlock);
1532 empty = list_empty(&ep->sys->head_desc_list);
1533 spin_unlock_bh(&ep->sys->spinlock);
1534 if (!empty)
1535 usleep_range(95, 105);
1536 else
1537 break;
1538 } while (1);
1539 }
1540
1541 if (IPA_CLIENT_IS_CONS(ep->client)) {
1542 cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
1543 cancel_delayed_work_sync(&ep->sys->switch_to_intr_work);
1544 }
1545
1546 flush_workqueue(ep->sys->wq);
1547 sps_disconnect(ep->ep_hdl);
1548 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1549 ep->connect.desc.base,
1550 ep->connect.desc.phys_base);
1551 sps_free_endpoint(ep->ep_hdl);
1552 if (ep->sys->repl_wq)
1553 flush_workqueue(ep->sys->repl_wq);
1554 if (IPA_CLIENT_IS_CONS(ep->client))
1555 ipa_cleanup_rx(ep->sys);
1556
1557 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
1558 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1559 ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1560 IPADBG("modem cfg emb pipe flt\n");
1561 else
1562 ipa_delete_dflt_flt_rules(clnt_hdl);
1563 }
1564
1565 if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
1566 atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt);
1567
1568 memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats));
1569
1570 if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
1571 ipa_cleanup_wlan_rx_common_cache();
1572
1573 ep->valid = 0;
1574 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
1575
1576 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
1577
1578 return 0;
1579}
1580
1581/**
1582 * ipa_tx_comp_usr_notify_release() - Callback function which will call the
1583 * user supplied callback function to release the skb, or release it on
1584 * its own if no callback function was supplied.
1585 * @user1
1586 * @user2
1587 *
1588 * This notified callback is for the destination client.
1589 * This function is supplied in ipa_connect.
1590 */
1591static void ipa_tx_comp_usr_notify_release(void *user1, int user2)
1592{
1593 struct sk_buff *skb = (struct sk_buff *)user1;
1594 int ep_idx = user2;
1595
1596 IPADBG("skb=%p ep=%d\n", skb, ep_idx);
1597
1598 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl);
1599
1600 if (ipa_ctx->ep[ep_idx].client_notify)
1601 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
1602 IPA_WRITE_DONE, (unsigned long)skb);
1603 else
1604 dev_kfree_skb_any(skb);
1605}
1606
1607static void ipa_tx_cmd_comp(void *user1, int user2)
1608{
1609 kfree(user1);
1610}
1611
1612/**
1613 * ipa2_tx_dp() - Data-path tx handler
1614 * @dst: [in] which IPA destination to route tx packets to
1615 * @skb: [in] the packet to send
1616 * @metadata: [in] TX packet meta-data
1617 *
1618 * Data-path tx handler, this is used for both SW data-path which by-passes most
1619 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
1620 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
1621 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
1622 * is an error. For errors, client needs to free the skb as needed. For success,
1623 * IPA driver will later invoke client callback if one was supplied. That
1624 * callback should free the skb. If no callback supplied, IPA driver will free
1625 * the skb internally
1626 *
1627 * The function will use two descriptors for this send command
1628 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
1629 * the first descriptor will be used to inform the IPA hardware that
1630 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
1631 * Once this send was done from SPS point-of-view the IPA driver will
1632 * get notified by the supplied callback - ipa_sps_irq_tx_comp()
1633 *
1634 * ipa_sps_irq_tx_comp will call to the user supplied
1635 * callback (from ipa_connect)
1636 *
1637 * Returns: 0 on success, negative on failure
1638 */
1639int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
1640 struct ipa_tx_meta *meta)
1641{
1642 struct ipa_desc *desc;
1643 struct ipa_desc _desc[2];
1644 int dst_ep_idx;
1645 struct ipa_ip_packet_init *cmd;
1646 struct ipa_sys_context *sys;
1647 int src_ep_idx;
1648 int num_frags, f;
1649
1650 if (unlikely(!ipa_ctx)) {
1651 IPAERR("IPA driver was not initialized\n");
1652 return -EINVAL;
1653 }
1654
1655 if (skb->len == 0) {
1656 IPAERR("packet size is 0\n");
1657 return -EINVAL;
1658 }
1659
1660 num_frags = skb_shinfo(skb)->nr_frags;
1661 if (num_frags) {
1662 /* 1 desc is needed for the linear portion of skb;
1663 * 1 desc may be needed for the PACKET_INIT;
1664 * 1 desc for each frag
1665 */
1666 desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
1667 if (!desc) {
1668 IPAERR("failed to alloc desc array\n");
1669 goto fail_mem;
1670 }
1671 } else {
1672 memset(_desc, 0, 2 * sizeof(struct ipa_desc));
1673 desc = &_desc[0];
1674 }
1675
1676 /*
1677 * USB_CONS: PKT_INIT ep_idx = dst pipe
1678 * Q6_CONS: PKT_INIT ep_idx = sender pipe
1679 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
1680 *
1681 * LAN TX: all PKT_INIT
1682 * WAN TX: PKT_INIT (cmd) + HW (data)
1683 *
1684 */
1685 if (IPA_CLIENT_IS_CONS(dst)) {
1686 src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1687 if (-1 == src_ep_idx) {
1688 IPAERR("Client %u is not mapped\n",
1689 IPA_CLIENT_APPS_LAN_WAN_PROD);
1690 goto fail_gen;
1691 }
1692 dst_ep_idx = ipa2_get_ep_mapping(dst);
1693 } else {
1694 src_ep_idx = ipa2_get_ep_mapping(dst);
1695 if (-1 == src_ep_idx) {
1696 IPAERR("Client %u is not mapped\n", dst);
1697 goto fail_gen;
1698 }
1699 if (meta && meta->pkt_init_dst_ep_valid)
1700 dst_ep_idx = meta->pkt_init_dst_ep;
1701 else
1702 dst_ep_idx = -1;
1703 }
1704
1705 sys = ipa_ctx->ep[src_ep_idx].sys;
1706
1707 if (!sys->ep->valid) {
1708 IPAERR("pipe not valid\n");
1709 goto fail_gen;
1710 }
1711
1712 if (dst_ep_idx != -1) {
1713 /* SW data path */
1714 cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
1715 if (!cmd) {
1716 IPAERR("failed to alloc immediate command object\n");
1717 goto fail_gen;
1718 }
1719
1720 cmd->destination_pipe_index = dst_ep_idx;
1721 desc[0].opcode = IPA_IP_PACKET_INIT;
1722 desc[0].pyld = cmd;
1723 desc[0].len = sizeof(struct ipa_ip_packet_init);
1724 desc[0].type = IPA_IMM_CMD_DESC;
1725 desc[0].callback = ipa_tx_cmd_comp;
1726 desc[0].user1 = cmd;
1727 desc[1].pyld = skb->data;
1728 desc[1].len = skb_headlen(skb);
1729 desc[1].type = IPA_DATA_DESC_SKB;
1730 desc[1].callback = ipa_tx_comp_usr_notify_release;
1731 desc[1].user1 = skb;
1732 desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid &&
1733 meta->pkt_init_dst_ep_remote) ?
1734 src_ep_idx :
1735 dst_ep_idx;
1736 if (meta && meta->dma_address_valid) {
1737 desc[1].dma_address_valid = true;
1738 desc[1].dma_address = meta->dma_address;
1739 }
1740
1741 for (f = 0; f < num_frags; f++) {
1742 desc[2+f].frag = &skb_shinfo(skb)->frags[f];
1743 desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
1744 desc[2+f].len = skb_frag_size(desc[2+f].frag);
1745 }
1746
1747 /* don't free skb till frag mappings are released */
1748 if (num_frags) {
1749 desc[2+f-1].callback = desc[1].callback;
1750 desc[2+f-1].user1 = desc[1].user1;
1751 desc[2+f-1].user2 = desc[1].user2;
1752 desc[1].callback = NULL;
1753 }
1754
1755 if (ipa_send(sys, num_frags + 2, desc, true)) {
1756 IPAERR("fail to send skb %p num_frags %u SWP\n",
1757 skb, num_frags);
1758 goto fail_send;
1759 }
1760 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
1761 } else {
1762 /* HW data path */
1763 desc[0].pyld = skb->data;
1764 desc[0].len = skb_headlen(skb);
1765 desc[0].type = IPA_DATA_DESC_SKB;
1766 desc[0].callback = ipa_tx_comp_usr_notify_release;
1767 desc[0].user1 = skb;
1768 desc[0].user2 = src_ep_idx;
1769
1770 if (meta && meta->dma_address_valid) {
1771 desc[0].dma_address_valid = true;
1772 desc[0].dma_address = meta->dma_address;
1773 }
1774
1775 if (num_frags == 0) {
1776 if (ipa_send_one(sys, desc, true)) {
1777 IPAERR("fail to send skb %p HWP\n", skb);
1778 goto fail_gen;
1779 }
1780 } else {
1781 for (f = 0; f < num_frags; f++) {
1782 desc[1+f].frag = &skb_shinfo(skb)->frags[f];
1783 desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
1784 desc[1+f].len = skb_frag_size(desc[1+f].frag);
1785 }
1786
1787 /* don't free skb till frag mappings are released */
1788 desc[1+f-1].callback = desc[0].callback;
1789 desc[1+f-1].user1 = desc[0].user1;
1790 desc[1+f-1].user2 = desc[0].user2;
1791 desc[0].callback = NULL;
1792
1793 if (ipa_send(sys, num_frags + 1, desc, true)) {
1794 IPAERR("fail to send skb %p num_frags %u HWP\n",
1795 skb, num_frags);
1796 goto fail_gen;
1797 }
1798 }
1799
1800 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
1801 }
1802
1803 if (num_frags) {
1804 kfree(desc);
1805 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
1806 }
1807
1808 return 0;
1809
1810fail_send:
1811 kfree(cmd);
1812fail_gen:
1813 if (num_frags)
1814 kfree(desc);
1815fail_mem:
1816 return -EFAULT;
1817}
1818
1819static void ipa_wq_handle_rx(struct work_struct *work)
1820{
1821 struct ipa_sys_context *sys;
1822
1823 sys = container_of(work, struct ipa_sys_context, work);
1824
1825 if (sys->ep->napi_enabled) {
1826 IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
1827 sys->ep->client_notify(sys->ep->priv,
1828 IPA_CLIENT_START_POLL, 0);
1829 } else
1830 ipa_handle_rx(sys);
1831}
1832
1833static void ipa_wq_repl_rx(struct work_struct *work)
1834{
1835 struct ipa_sys_context *sys;
1836 void *ptr;
1837 struct ipa_rx_pkt_wrapper *rx_pkt;
1838 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
1839 u32 next;
1840 u32 curr;
1841
1842 sys = container_of(work, struct ipa_sys_context, repl_work);
1843 curr = atomic_read(&sys->repl.tail_idx);
1844
1845begin:
1846 while (1) {
1847 next = (curr + 1) % sys->repl.capacity;
1848 if (next == atomic_read(&sys->repl.head_idx))
1849 goto fail_kmem_cache_alloc;
1850
1851 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
1852 flag);
1853 if (!rx_pkt) {
1854 pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
1855 __func__, sys);
1856 goto fail_kmem_cache_alloc;
1857 }
1858
1859 INIT_LIST_HEAD(&rx_pkt->link);
1860 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
1861 rx_pkt->sys = sys;
1862
1863 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1864 if (rx_pkt->data.skb == NULL) {
1865 pr_err_ratelimited("%s fail alloc skb sys=%p\n",
1866 __func__, sys);
1867 goto fail_skb_alloc;
1868 }
1869 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1870 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
1871 sys->rx_buff_sz,
1872 DMA_FROM_DEVICE);
1873 if (rx_pkt->data.dma_addr == 0 ||
1874 rx_pkt->data.dma_addr == ~0) {
1875 pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
1876 __func__, (void *)rx_pkt->data.dma_addr,
1877 ptr, sys);
1878 goto fail_dma_mapping;
1879 }
1880
1881 sys->repl.cache[curr] = rx_pkt;
1882 curr = next;
1883 /* ensure write is done before setting tail index */
1884 mb();
1885 atomic_set(&sys->repl.tail_idx, next);
1886 }
1887
1888 return;
1889
1890fail_dma_mapping:
1891 sys->free_skb(rx_pkt->data.skb);
1892fail_skb_alloc:
1893 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1894fail_kmem_cache_alloc:
1895 if (atomic_read(&sys->repl.tail_idx) ==
1896 atomic_read(&sys->repl.head_idx)) {
1897 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1898 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty);
1899 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1900 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty);
1901 else
1902 WARN_ON(1);
1903 pr_err_ratelimited("%s sys=%p repl ring empty\n",
1904 __func__, sys);
1905 goto begin;
1906 }
1907}
1908
1909static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys)
1910{
1911 struct ipa_rx_pkt_wrapper *rx_pkt = NULL;
1912 struct ipa_rx_pkt_wrapper *tmp;
1913 int ret;
1914 u32 rx_len_cached = 0;
1915
1916 IPADBG("\n");
1917
1918 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1919 rx_len_cached = sys->len;
1920
1921 if (rx_len_cached < sys->rx_pool_sz) {
1922 list_for_each_entry_safe(rx_pkt, tmp,
1923 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1924 list_del(&rx_pkt->link);
1925
1926 if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0)
1927 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1928
1929 INIT_LIST_HEAD(&rx_pkt->link);
1930 rx_pkt->len = 0;
1931 rx_pkt->sys = sys;
1932
1933 ret = sps_transfer_one(sys->ep->ep_hdl,
1934 rx_pkt->data.dma_addr,
1935 IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
1936
1937 if (ret) {
1938 IPAERR("sps_transfer_one failed %d\n", ret);
1939 goto fail_sps_transfer;
1940 }
1941
1942 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1943 rx_len_cached = ++sys->len;
1944
1945 if (rx_len_cached >= sys->rx_pool_sz) {
1946 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1947 return;
1948 }
1949 }
1950 }
1951 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1952
1953 if (rx_len_cached < sys->rx_pool_sz &&
1954 ipa_ctx->wc_memb.wlan_comm_total_cnt <
1955 IPA_WLAN_COMM_RX_POOL_HIGH) {
1956 ipa_replenish_rx_cache(sys);
1957 ipa_ctx->wc_memb.wlan_comm_total_cnt +=
1958 (sys->rx_pool_sz - rx_len_cached);
1959 }
1960
1961 return;
1962
1963fail_sps_transfer:
1964 list_del(&rx_pkt->link);
1965 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1966}
1967
1968static void ipa_cleanup_wlan_rx_common_cache(void)
1969{
1970 struct ipa_rx_pkt_wrapper *rx_pkt;
1971 struct ipa_rx_pkt_wrapper *tmp;
1972
Utkarsh Saxena69f307d2017-03-13 14:58:40 +05301973 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1974
Amir Levy9659e592016-10-27 18:08:27 +03001975 list_for_each_entry_safe(rx_pkt, tmp,
1976 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1977 list_del(&rx_pkt->link);
1978 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
1979 IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
1980 dev_kfree_skb_any(rx_pkt->data.skb);
1981 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1982 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1983 ipa_ctx->wc_memb.wlan_comm_total_cnt--;
1984 }
1985 ipa_ctx->wc_memb.total_tx_pkts_freed = 0;
1986
1987 if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0)
1988 IPAERR("wlan comm buff free cnt: %d\n",
1989 ipa_ctx->wc_memb.wlan_comm_free_cnt);
1990
1991 if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0)
1992 IPAERR("wlan comm buff total cnt: %d\n",
1993 ipa_ctx->wc_memb.wlan_comm_total_cnt);
1994
Utkarsh Saxena69f307d2017-03-13 14:58:40 +05301995 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1996
Amir Levy9659e592016-10-27 18:08:27 +03001997}
1998
1999static void ipa_alloc_wlan_rx_common_cache(u32 size)
2000{
2001 void *ptr;
2002 struct ipa_rx_pkt_wrapper *rx_pkt;
2003 int rx_len_cached = 0;
2004 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2005 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2006
2007 rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt;
2008 while (rx_len_cached < size) {
2009 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2010 flag);
2011 if (!rx_pkt) {
2012 IPAERR("failed to alloc rx wrapper\n");
2013 goto fail_kmem_cache_alloc;
2014 }
2015
2016 INIT_LIST_HEAD(&rx_pkt->link);
2017 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2018
2019 rx_pkt->data.skb =
2020 ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
2021 flag);
2022 if (rx_pkt->data.skb == NULL) {
2023 IPAERR("failed to alloc skb\n");
2024 goto fail_skb_alloc;
2025 }
2026 ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
2027 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2028 IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
2029 if (rx_pkt->data.dma_addr == 0 ||
2030 rx_pkt->data.dma_addr == ~0) {
2031 IPAERR("dma_map_single failure %p for %p\n",
2032 (void *)rx_pkt->data.dma_addr, ptr);
2033 goto fail_dma_mapping;
2034 }
2035
2036 list_add_tail(&rx_pkt->link,
2037 &ipa_ctx->wc_memb.wlan_comm_desc_list);
2038 rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
2039
2040 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
2041
2042 }
2043
2044 return;
2045
2046fail_dma_mapping:
2047 dev_kfree_skb_any(rx_pkt->data.skb);
2048fail_skb_alloc:
2049 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2050fail_kmem_cache_alloc:
2051 return;
2052}
2053
2054
2055/**
2056 * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
2057 *
2058 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
2059 * are IPA_RX_POOL_CEIL buffers in the cache.
2060 * - Allocate a buffer in the cache
2061 * - Initialized the packets link
2062 * - Initialize the packets work struct
2063 * - Allocate the packets socket buffer (skb)
2064 * - Fill the packets skb with data
2065 * - Make the packet DMAable
2066 * - Add the packet to the system pipe linked list
2067 * - Initiate a SPS transfer so that SPS driver will use this packet later.
2068 */
2069static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
2070{
2071 void *ptr;
2072 struct ipa_rx_pkt_wrapper *rx_pkt;
2073 int ret;
2074 int rx_len_cached = 0;
2075 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2076 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2077
2078 rx_len_cached = sys->len;
2079
2080 while (rx_len_cached < sys->rx_pool_sz) {
2081 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2082 flag);
2083 if (!rx_pkt) {
2084 IPAERR("failed to alloc rx wrapper\n");
2085 goto fail_kmem_cache_alloc;
2086 }
2087
2088 INIT_LIST_HEAD(&rx_pkt->link);
2089 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2090 rx_pkt->sys = sys;
2091
2092 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
2093 if (rx_pkt->data.skb == NULL) {
2094 IPAERR("failed to alloc skb\n");
2095 goto fail_skb_alloc;
2096 }
2097 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2098 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2099 sys->rx_buff_sz,
2100 DMA_FROM_DEVICE);
2101 if (rx_pkt->data.dma_addr == 0 ||
2102 rx_pkt->data.dma_addr == ~0) {
2103 IPAERR("dma_map_single failure %p for %p\n",
2104 (void *)rx_pkt->data.dma_addr, ptr);
2105 goto fail_dma_mapping;
2106 }
2107
2108 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2109 rx_len_cached = ++sys->len;
2110
2111 ret = sps_transfer_one(sys->ep->ep_hdl,
2112 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2113
2114 if (ret) {
2115 IPAERR("sps_transfer_one failed %d\n", ret);
2116 goto fail_sps_transfer;
2117 }
2118 }
2119
2120 return;
2121
2122fail_sps_transfer:
2123 list_del(&rx_pkt->link);
2124 rx_len_cached = --sys->len;
2125 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2126 sys->rx_buff_sz, DMA_FROM_DEVICE);
2127fail_dma_mapping:
2128 sys->free_skb(rx_pkt->data.skb);
2129fail_skb_alloc:
2130 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2131fail_kmem_cache_alloc:
2132 if (rx_len_cached == 0)
2133 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2134 msecs_to_jiffies(1));
2135}
2136
2137static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
2138{
2139 void *ptr;
2140 struct ipa_rx_pkt_wrapper *rx_pkt;
2141 int ret;
2142 int rx_len_cached = 0;
2143
2144 rx_len_cached = sys->len;
2145
2146 while (rx_len_cached < sys->rx_pool_sz) {
2147 spin_lock_bh(&sys->spinlock);
2148 if (list_empty(&sys->rcycl_list))
2149 goto fail_kmem_cache_alloc;
2150
2151 rx_pkt = list_first_entry(&sys->rcycl_list,
2152 struct ipa_rx_pkt_wrapper, link);
2153 list_del(&rx_pkt->link);
2154 spin_unlock_bh(&sys->spinlock);
2155 INIT_LIST_HEAD(&rx_pkt->link);
2156 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2157 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
2158 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
2159 if (rx_pkt->data.dma_addr == 0 ||
2160 rx_pkt->data.dma_addr == ~0)
2161 goto fail_dma_mapping;
2162
2163 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2164 rx_len_cached = ++sys->len;
2165
2166 ret = sps_transfer_one(sys->ep->ep_hdl,
2167 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2168
2169 if (ret) {
2170 IPAERR("sps_transfer_one failed %d\n", ret);
2171 goto fail_sps_transfer;
2172 }
2173 }
2174
2175 return;
2176fail_sps_transfer:
2177 rx_len_cached = --sys->len;
2178 list_del(&rx_pkt->link);
2179 INIT_LIST_HEAD(&rx_pkt->link);
2180 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2181 sys->rx_buff_sz, DMA_FROM_DEVICE);
2182fail_dma_mapping:
2183 spin_lock_bh(&sys->spinlock);
2184 list_add_tail(&rx_pkt->link, &sys->rcycl_list);
2185 INIT_LIST_HEAD(&rx_pkt->link);
2186 spin_unlock_bh(&sys->spinlock);
2187fail_kmem_cache_alloc:
2188 spin_unlock_bh(&sys->spinlock);
2189 if (rx_len_cached == 0)
2190 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2191 msecs_to_jiffies(1));
2192}
2193
2194static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
2195{
2196 struct ipa_rx_pkt_wrapper *rx_pkt;
2197 int ret;
2198 int rx_len_cached = 0;
2199 u32 curr;
2200
2201 rx_len_cached = sys->len;
2202 curr = atomic_read(&sys->repl.head_idx);
2203
2204 while (rx_len_cached < sys->rx_pool_sz) {
2205 if (curr == atomic_read(&sys->repl.tail_idx)) {
2206 queue_work(sys->repl_wq, &sys->repl_work);
2207 break;
2208 }
2209
2210 rx_pkt = sys->repl.cache[curr];
2211 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2212
2213 ret = sps_transfer_one(sys->ep->ep_hdl,
2214 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2215
2216 if (ret) {
2217 IPAERR("sps_transfer_one failed %d\n", ret);
2218 list_del(&rx_pkt->link);
2219 break;
2220 }
2221 rx_len_cached = ++sys->len;
2222 sys->repl_trig_cnt++;
2223 curr = (curr + 1) % sys->repl.capacity;
2224 /* ensure write is done before setting head index */
2225 mb();
2226 atomic_set(&sys->repl.head_idx, curr);
2227 }
2228
2229 if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0)
2230 queue_work(sys->repl_wq, &sys->repl_work);
2231
2232 if (rx_len_cached <= sys->ep->rx_replenish_threshold) {
2233 if (rx_len_cached == 0) {
2234 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
2235 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty);
2236 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
2237 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty);
2238 else
2239 WARN_ON(1);
2240 }
2241 sys->repl_trig_cnt = 0;
2242 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2243 msecs_to_jiffies(1));
2244 }
2245}
2246
2247static void replenish_rx_work_func(struct work_struct *work)
2248{
2249 struct delayed_work *dwork;
2250 struct ipa_sys_context *sys;
2251
2252 dwork = container_of(work, struct delayed_work, work);
2253 sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
2254 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2255 sys->repl_hdlr(sys);
2256 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2257}
2258
2259/**
2260 * ipa_cleanup_rx() - release RX queue resources
2261 *
2262 */
2263static void ipa_cleanup_rx(struct ipa_sys_context *sys)
2264{
2265 struct ipa_rx_pkt_wrapper *rx_pkt;
2266 struct ipa_rx_pkt_wrapper *r;
2267 u32 head;
2268 u32 tail;
2269
2270 list_for_each_entry_safe(rx_pkt, r,
2271 &sys->head_desc_list, link) {
2272 list_del(&rx_pkt->link);
2273 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2274 sys->rx_buff_sz, DMA_FROM_DEVICE);
2275 sys->free_skb(rx_pkt->data.skb);
2276 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2277 }
2278
2279 list_for_each_entry_safe(rx_pkt, r,
2280 &sys->rcycl_list, link) {
2281 list_del(&rx_pkt->link);
2282 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2283 sys->rx_buff_sz, DMA_FROM_DEVICE);
2284 sys->free_skb(rx_pkt->data.skb);
2285 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2286 }
2287
2288 if (sys->repl.cache) {
2289 head = atomic_read(&sys->repl.head_idx);
2290 tail = atomic_read(&sys->repl.tail_idx);
2291 while (head != tail) {
2292 rx_pkt = sys->repl.cache[head];
2293 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2294 sys->rx_buff_sz, DMA_FROM_DEVICE);
2295 sys->free_skb(rx_pkt->data.skb);
2296 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2297 head = (head + 1) % sys->repl.capacity;
2298 }
2299 kfree(sys->repl.cache);
2300 }
2301}
2302
2303static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
2304{
2305 struct sk_buff *skb2 = NULL;
2306
2307 skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
2308 if (likely(skb2)) {
2309 /* Set the data pointer */
2310 skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
2311 memcpy(skb2->data, skb->data, len);
2312 skb2->len = len;
2313 skb_set_tail_pointer(skb2, len);
2314 }
2315
2316 return skb2;
2317}
2318
2319static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
2320 struct ipa_sys_context *sys)
2321{
2322 int rc = 0;
2323 struct ipa_hw_pkt_status *status;
2324 struct sk_buff *skb2;
2325 int pad_len_byte;
2326 int len;
2327 unsigned char *buf;
2328 int src_pipe;
2329 unsigned int used = *(unsigned int *)skb->cb;
2330 unsigned int used_align = ALIGN(used, 32);
2331 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
Skylar Change985d272016-12-05 18:10:19 -08002332 u32 skb2_len;
Amir Levy9659e592016-10-27 18:08:27 +03002333
2334 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2335
2336 if (skb->len == 0) {
2337 IPAERR("ZLT\n");
2338 sys->free_skb(skb);
2339 return rc;
2340 }
2341
2342 if (sys->len_partial) {
2343 IPADBG("len_partial %d\n", sys->len_partial);
2344 buf = skb_push(skb, sys->len_partial);
2345 memcpy(buf, sys->prev_skb->data, sys->len_partial);
2346 sys->len_partial = 0;
2347 sys->free_skb(sys->prev_skb);
2348 sys->prev_skb = NULL;
2349 goto begin;
2350 }
2351
2352 /* this pipe has TX comp (status only) + mux-ed LAN RX data
2353 * (status+data)
2354 */
2355 if (sys->len_rem) {
2356 IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
2357 sys->len_pad);
2358 if (sys->len_rem <= skb->len) {
2359 if (sys->prev_skb) {
2360 skb2 = skb_copy_expand(sys->prev_skb, 0,
2361 sys->len_rem, GFP_KERNEL);
2362 if (likely(skb2)) {
2363 memcpy(skb_put(skb2, sys->len_rem),
2364 skb->data, sys->len_rem);
2365 skb_trim(skb2,
2366 skb2->len - sys->len_pad);
2367 skb2->truesize = skb2->len +
2368 sizeof(struct sk_buff);
2369 if (sys->drop_packet)
2370 dev_kfree_skb_any(skb2);
2371 else
2372 sys->ep->client_notify(
2373 sys->ep->priv,
2374 IPA_RECEIVE,
2375 (unsigned long)(skb2));
2376 } else {
2377 IPAERR("copy expand failed\n");
2378 }
2379 dev_kfree_skb_any(sys->prev_skb);
2380 }
2381 skb_pull(skb, sys->len_rem);
2382 sys->prev_skb = NULL;
2383 sys->len_rem = 0;
2384 sys->len_pad = 0;
2385 } else {
2386 if (sys->prev_skb) {
2387 skb2 = skb_copy_expand(sys->prev_skb, 0,
2388 skb->len, GFP_KERNEL);
2389 if (likely(skb2)) {
2390 memcpy(skb_put(skb2, skb->len),
2391 skb->data, skb->len);
2392 } else {
2393 IPAERR("copy expand failed\n");
2394 }
2395 dev_kfree_skb_any(sys->prev_skb);
2396 sys->prev_skb = skb2;
2397 }
2398 sys->len_rem -= skb->len;
2399 sys->free_skb(skb);
2400 return rc;
2401 }
2402 }
2403
2404begin:
2405 while (skb->len) {
2406 sys->drop_packet = false;
2407 IPADBG("LEN_REM %d\n", skb->len);
2408
2409 if (skb->len < IPA_PKT_STATUS_SIZE) {
2410 WARN_ON(sys->prev_skb != NULL);
2411 IPADBG("status straddles buffer\n");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002412 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002413 sys->len_partial = skb->len;
2414 return rc;
2415 }
2416
2417 status = (struct ipa_hw_pkt_status *)skb->data;
2418 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2419 status->status_opcode, status->endp_src_idx,
2420 status->endp_dest_idx, status->pkt_len);
2421 if (sys->status_stat) {
2422 sys->status_stat->status[sys->status_stat->curr] =
2423 *status;
2424 sys->status_stat->curr++;
2425 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2426 sys->status_stat->curr = 0;
2427 }
2428
2429 if (status->status_opcode !=
2430 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2431 status->status_opcode !=
2432 IPA_HW_STATUS_OPCODE_PACKET &&
2433 status->status_opcode !=
2434 IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET &&
2435 status->status_opcode !=
2436 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2437 IPAERR("unsupported opcode(%d)\n",
2438 status->status_opcode);
2439 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2440 continue;
2441 }
2442 IPA_STATS_EXCP_CNT(status->exception,
2443 ipa_ctx->stats.rx_excp_pkts);
2444 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2445 status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
2446 IPAERR("status fields invalid\n");
2447 IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
2448 status->status_opcode, status->endp_src_idx,
2449 status->endp_dest_idx, status->pkt_len);
2450 WARN_ON(1);
2451 BUG();
2452 }
2453 if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
2454 struct ipa_tag_completion *comp;
2455
2456 IPADBG("TAG packet arrived\n");
2457 if (status->tag_f_2 == IPA_COOKIE) {
2458 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2459 if (skb->len < sizeof(comp)) {
2460 IPAERR("TAG arrived without packet\n");
2461 return rc;
2462 }
2463 memcpy(&comp, skb->data, sizeof(comp));
2464 skb_pull(skb, sizeof(comp) +
2465 IPA_SIZE_DL_CSUM_META_TRAILER);
2466 complete(&comp->comp);
2467 if (atomic_dec_return(&comp->cnt) == 0)
2468 kfree(comp);
2469 continue;
2470 } else {
2471 IPADBG("ignoring TAG with wrong cookie\n");
2472 }
2473 }
2474 if (status->pkt_len == 0) {
2475 IPADBG("Skip aggr close status\n");
2476 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2477 IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close);
2478 IPA_STATS_DEC_CNT(
2479 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2480 continue;
2481 }
2482 if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
2483 /* RX data */
2484 src_pipe = status->endp_src_idx;
2485
2486 /*
2487 * A packet which is received back to the AP after
2488 * there was no route match.
2489 */
2490 if (!status->exception && !status->route_match)
2491 sys->drop_packet = true;
2492
2493 if (skb->len == IPA_PKT_STATUS_SIZE &&
2494 !status->exception) {
2495 WARN_ON(sys->prev_skb != NULL);
2496 IPADBG("Ins header in next buffer\n");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002497 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002498 sys->len_partial = skb->len;
2499 return rc;
2500 }
2501
2502 pad_len_byte = ((status->pkt_len + 3) & ~3) -
2503 status->pkt_len;
2504
2505 len = status->pkt_len + pad_len_byte +
2506 IPA_SIZE_DL_CSUM_META_TRAILER;
2507 IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
2508 status->pkt_len, len);
2509
2510 if (status->exception ==
2511 IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
2512 IPADBG("Dropping packet on DeAggr Exception\n");
2513 sys->drop_packet = true;
2514 }
2515
Skylar Change985d272016-12-05 18:10:19 -08002516 skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE;
2517 skb2_len = min(skb2_len, skb->len);
2518 skb2 = ipa_skb_copy_for_client(skb, skb2_len);
Amir Levy9659e592016-10-27 18:08:27 +03002519 if (likely(skb2)) {
2520 if (skb->len < len + IPA_PKT_STATUS_SIZE) {
2521 IPADBG("SPL skb len %d len %d\n",
2522 skb->len, len);
2523 sys->prev_skb = skb2;
2524 sys->len_rem = len - skb->len +
2525 IPA_PKT_STATUS_SIZE;
2526 sys->len_pad = pad_len_byte;
2527 skb_pull(skb, skb->len);
2528 } else {
2529 skb_trim(skb2, status->pkt_len +
2530 IPA_PKT_STATUS_SIZE);
2531 IPADBG("rx avail for %d\n",
2532 status->endp_dest_idx);
2533 if (sys->drop_packet) {
2534 dev_kfree_skb_any(skb2);
2535 } else if (status->pkt_len >
2536 IPA_GENERIC_AGGR_BYTE_LIMIT *
2537 1024) {
2538 IPAERR("packet size invalid\n");
2539 IPAERR("STATUS opcode=%d\n",
2540 status->status_opcode);
2541 IPAERR("src=%d dst=%d len=%d\n",
2542 status->endp_src_idx,
2543 status->endp_dest_idx,
2544 status->pkt_len);
2545 BUG();
2546 } else {
2547 skb2->truesize = skb2->len +
2548 sizeof(struct sk_buff) +
2549 (ALIGN(len +
2550 IPA_PKT_STATUS_SIZE, 32) *
2551 unused / used_align);
2552 sys->ep->client_notify(
2553 sys->ep->priv,
2554 IPA_RECEIVE,
2555 (unsigned long)(skb2));
2556 }
2557 skb_pull(skb, len +
2558 IPA_PKT_STATUS_SIZE);
2559 }
2560 } else {
2561 IPAERR("fail to alloc skb\n");
2562 if (skb->len < len) {
2563 sys->prev_skb = NULL;
2564 sys->len_rem = len - skb->len +
2565 IPA_PKT_STATUS_SIZE;
2566 sys->len_pad = pad_len_byte;
2567 skb_pull(skb, skb->len);
2568 } else {
2569 skb_pull(skb, len +
2570 IPA_PKT_STATUS_SIZE);
2571 }
2572 }
2573 /* TX comp */
2574 ipa_wq_write_done_status(src_pipe);
2575 IPADBG("tx comp imp for %d\n", src_pipe);
2576 } else {
2577 /* TX comp */
2578 ipa_wq_write_done_status(status->endp_src_idx);
2579 IPADBG("tx comp exp for %d\n", status->endp_src_idx);
2580 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2581 IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl);
2582 IPA_STATS_DEC_CNT(
2583 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2584 }
2585 };
2586
2587 sys->free_skb(skb);
2588 return rc;
2589}
2590
2591static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb,
2592 struct sk_buff *skb, unsigned int len)
2593{
2594 struct sk_buff *skb2;
2595
2596 skb2 = skb_copy_expand(prev_skb, 0,
2597 len, GFP_KERNEL);
2598 if (likely(skb2)) {
2599 memcpy(skb_put(skb2, len),
2600 skb->data, len);
2601 } else {
2602 IPAERR("copy expand failed\n");
2603 skb2 = NULL;
2604 }
2605 dev_kfree_skb_any(prev_skb);
2606
2607 return skb2;
2608}
2609
2610static void wan_rx_handle_splt_pyld(struct sk_buff *skb,
2611 struct ipa_sys_context *sys)
2612{
2613 struct sk_buff *skb2;
2614
2615 IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
2616 if (sys->len_rem <= skb->len) {
2617 if (sys->prev_skb) {
2618 skb2 = join_prev_skb(sys->prev_skb, skb,
2619 sys->len_rem);
2620 if (likely(skb2)) {
2621 IPADBG(
2622 "removing Status element from skb and sending to WAN client");
2623 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2624 skb2->truesize = skb2->len +
2625 sizeof(struct sk_buff);
2626 sys->ep->client_notify(sys->ep->priv,
2627 IPA_RECEIVE,
2628 (unsigned long)(skb2));
2629 }
2630 }
2631 skb_pull(skb, sys->len_rem);
2632 sys->prev_skb = NULL;
2633 sys->len_rem = 0;
2634 } else {
2635 if (sys->prev_skb) {
2636 skb2 = join_prev_skb(sys->prev_skb, skb,
2637 skb->len);
2638 sys->prev_skb = skb2;
2639 }
2640 sys->len_rem -= skb->len;
2641 skb_pull(skb, skb->len);
2642 }
2643}
2644
2645static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
2646 struct ipa_sys_context *sys)
2647{
2648 int rc = 0;
2649 struct ipa_hw_pkt_status *status;
2650 struct sk_buff *skb2;
2651 u16 pkt_len_with_pad;
2652 u32 qmap_hdr;
2653 int checksum_trailer_exists;
2654 int frame_len;
2655 int ep_idx;
2656 unsigned int used = *(unsigned int *)skb->cb;
2657 unsigned int used_align = ALIGN(used, 32);
2658 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2659
2660 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2661 if (skb->len == 0) {
2662 IPAERR("ZLT\n");
2663 goto bail;
2664 }
2665
2666 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
2667 sys->ep->client_notify(sys->ep->priv,
2668 IPA_RECEIVE, (unsigned long)(skb));
2669 return rc;
2670 }
2671 if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
2672 IPAERR("Recycle should enable only with GRO Aggr\n");
2673 ipa_assert();
2674 }
2675 /*
2676 * payload splits across 2 buff or more,
2677 * take the start of the payload from prev_skb
2678 */
2679 if (sys->len_rem)
2680 wan_rx_handle_splt_pyld(skb, sys);
2681
2682
2683 while (skb->len) {
2684 IPADBG("LEN_REM %d\n", skb->len);
2685 if (skb->len < IPA_PKT_STATUS_SIZE) {
2686 IPAERR("status straddles buffer\n");
2687 WARN_ON(1);
2688 goto bail;
2689 }
2690 status = (struct ipa_hw_pkt_status *)skb->data;
2691 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2692 status->status_opcode, status->endp_src_idx,
2693 status->endp_dest_idx, status->pkt_len);
2694
2695 if (sys->status_stat) {
2696 sys->status_stat->status[sys->status_stat->curr] =
2697 *status;
2698 sys->status_stat->curr++;
2699 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2700 sys->status_stat->curr = 0;
2701 }
2702
2703 if (status->status_opcode !=
2704 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2705 status->status_opcode !=
2706 IPA_HW_STATUS_OPCODE_PACKET &&
2707 status->status_opcode !=
2708 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2709 IPAERR("unsupported opcode\n");
2710 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2711 continue;
2712 }
2713 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2714 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2715 status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
2716 status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
2717 IPAERR("status fields invalid\n");
2718 WARN_ON(1);
2719 goto bail;
2720 }
2721 if (status->pkt_len == 0) {
2722 IPADBG("Skip aggr close status\n");
2723 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2724 IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts);
2725 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close);
2726 continue;
2727 }
2728 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
2729 if (status->endp_dest_idx != ep_idx) {
2730 IPAERR("expected endp_dest_idx %d received %d\n",
2731 ep_idx, status->endp_dest_idx);
2732 WARN_ON(1);
2733 goto bail;
2734 }
2735 /* RX data */
2736 if (skb->len == IPA_PKT_STATUS_SIZE) {
2737 IPAERR("Ins header in next buffer\n");
2738 WARN_ON(1);
2739 goto bail;
2740 }
2741 qmap_hdr = *(u32 *)(status+1);
2742 /*
2743 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
2744 * header
2745 */
2746
2747 /*QMAP is BE: convert the pkt_len field from BE to LE*/
2748 pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
2749 IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
2750 /*get the CHECKSUM_PROCESS bit*/
2751 checksum_trailer_exists = status->status_mask &
2752 IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
2753 IPADBG("checksum_trailer_exists %d\n",
2754 checksum_trailer_exists);
2755
2756 frame_len = IPA_PKT_STATUS_SIZE +
2757 IPA_QMAP_HEADER_LENGTH +
2758 pkt_len_with_pad;
2759 if (checksum_trailer_exists)
2760 frame_len += IPA_DL_CHECKSUM_LENGTH;
2761 IPADBG("frame_len %d\n", frame_len);
2762
2763 skb2 = skb_clone(skb, GFP_KERNEL);
2764 if (likely(skb2)) {
2765 /*
2766 * the len of actual data is smaller than expected
2767 * payload split across 2 buff
2768 */
2769 if (skb->len < frame_len) {
2770 IPADBG("SPL skb len %d len %d\n",
2771 skb->len, frame_len);
2772 sys->prev_skb = skb2;
2773 sys->len_rem = frame_len - skb->len;
2774 skb_pull(skb, skb->len);
2775 } else {
2776 skb_trim(skb2, frame_len);
2777 IPADBG("rx avail for %d\n",
2778 status->endp_dest_idx);
2779 IPADBG(
2780 "removing Status element from skb and sending to WAN client");
2781 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2782 skb2->truesize = skb2->len +
2783 sizeof(struct sk_buff) +
2784 (ALIGN(frame_len, 32) *
2785 unused / used_align);
2786 sys->ep->client_notify(sys->ep->priv,
2787 IPA_RECEIVE, (unsigned long)(skb2));
2788 skb_pull(skb, frame_len);
2789 }
2790 } else {
2791 IPAERR("fail to clone\n");
2792 if (skb->len < frame_len) {
2793 sys->prev_skb = NULL;
2794 sys->len_rem = frame_len - skb->len;
2795 skb_pull(skb, skb->len);
2796 } else {
2797 skb_pull(skb, frame_len);
2798 }
2799 }
2800 };
2801bail:
2802 sys->free_skb(skb);
2803 return rc;
2804}
2805
2806static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys)
2807{
2808 struct ipa_a5_mux_hdr *mux_hdr;
2809 unsigned int pull_len;
2810 unsigned int padding;
2811 struct ipa_ep_context *ep;
2812 unsigned int src_pipe;
2813
2814 mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
2815
2816 src_pipe = mux_hdr->src_pipe_index;
2817
2818 IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
2819 rx_skb->len, ntohs(mux_hdr->interface_id),
2820 src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
2821
2822 IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
2823
2824 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2825 IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
2826
2827 /*
2828 * Any packets arriving over AMPDU_TX should be dispatched
2829 * to the regular WLAN RX data-path.
2830 */
2831 if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
2832 src_pipe = WLAN_PROD_TX_EP;
2833
2834 ep = &ipa_ctx->ep[src_pipe];
2835 spin_lock(&ipa_ctx->disconnect_lock);
2836 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2837 !ep->valid || !ep->client_notify)) {
2838 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2839 src_pipe, ep->valid, ep->client_notify);
2840 dev_kfree_skb_any(rx_skb);
2841 spin_unlock(&ipa_ctx->disconnect_lock);
2842 return 0;
2843 }
2844
2845 pull_len = sizeof(struct ipa_a5_mux_hdr);
2846
2847 /*
2848 * IP packet starts on word boundary
2849 * remove the MUX header and any padding and pass the frame to
2850 * the client which registered a rx callback on the "src pipe"
2851 */
2852 padding = ep->cfg.hdr.hdr_len & 0x3;
2853 if (padding)
2854 pull_len += 4 - padding;
2855
2856 IPADBG("pulling %d bytes from skb\n", pull_len);
2857 skb_pull(rx_skb, pull_len);
2858 ep->client_notify(ep->priv, IPA_RECEIVE,
2859 (unsigned long)(rx_skb));
2860 spin_unlock(&ipa_ctx->disconnect_lock);
2861 return 0;
2862}
2863
2864static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags)
2865{
2866 return __dev_alloc_skb(len, flags);
2867}
2868
2869static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len,
2870 gfp_t flags)
2871{
2872 struct sk_buff *skb;
2873
2874 skb = __dev_alloc_skb(len + IPA_HEADROOM, flags);
2875 if (skb)
2876 skb_reserve(skb, IPA_HEADROOM);
2877
2878 return skb;
2879}
2880
2881static void ipa_free_skb_rx(struct sk_buff *skb)
2882{
2883 dev_kfree_skb_any(skb);
2884}
2885
2886void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
2887{
2888 struct sk_buff *rx_skb = (struct sk_buff *)data;
2889 struct ipa_hw_pkt_status *status;
2890 struct ipa_ep_context *ep;
2891 unsigned int src_pipe;
2892 u32 metadata;
2893
2894 status = (struct ipa_hw_pkt_status *)rx_skb->data;
2895 src_pipe = status->endp_src_idx;
2896 metadata = status->metadata;
2897 ep = &ipa_ctx->ep[src_pipe];
2898 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2899 !ep->valid ||
2900 !ep->client_notify)) {
2901 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2902 src_pipe, ep->valid, ep->client_notify);
2903 dev_kfree_skb_any(rx_skb);
2904 return;
2905 }
2906 if (!status->exception)
2907 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE +
2908 IPA_LAN_RX_HEADER_LENGTH);
2909 else
2910 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE);
2911
2912 /*
2913 * Metadata Info
2914 * ------------------------------------------
2915 * | 3 | 2 | 1 | 0 |
2916 * | fw_desc | vdev_id | qmap mux id | Resv |
2917 * ------------------------------------------
2918 */
2919 *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
2920 IPADBG("meta_data: 0x%x cb: 0x%x\n",
2921 metadata, *(u32 *)rx_skb->cb);
2922
2923 ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
2924}
2925
2926void ipa2_recycle_wan_skb(struct sk_buff *skb)
2927{
2928 struct ipa_rx_pkt_wrapper *rx_pkt;
2929 int ep_idx = ipa2_get_ep_mapping(
2930 IPA_CLIENT_APPS_WAN_CONS);
2931 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2932 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2933
2934 if (unlikely(ep_idx == -1)) {
2935 IPAERR("dest EP does not exist\n");
2936 ipa_assert();
2937 }
2938
2939 rx_pkt = kmem_cache_zalloc(
2940 ipa_ctx->rx_pkt_wrapper_cache, flag);
2941 if (!rx_pkt)
2942 ipa_assert();
2943
2944 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2945 rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
2946
2947 rx_pkt->data.skb = skb;
2948 rx_pkt->data.dma_addr = 0;
2949 ipa_skb_recycle(rx_pkt->data.skb);
2950 skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
2951 INIT_LIST_HEAD(&rx_pkt->link);
2952 spin_lock_bh(&rx_pkt->sys->spinlock);
2953 list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
2954 spin_unlock_bh(&rx_pkt->sys->spinlock);
2955}
2956
2957static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2958{
2959 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2960 struct sk_buff *rx_skb;
2961
2962 if (unlikely(list_empty(&sys->head_desc_list))) {
2963 WARN_ON(1);
2964 return;
2965 }
2966 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2967 struct ipa_rx_pkt_wrapper,
2968 link);
2969 list_del(&rx_pkt_expected->link);
2970 sys->len--;
2971 if (size)
2972 rx_pkt_expected->len = size;
2973 rx_skb = rx_pkt_expected->data.skb;
2974 dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
2975 sys->rx_buff_sz, DMA_FROM_DEVICE);
2976 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2977 rx_skb->len = rx_pkt_expected->len;
2978 *(unsigned int *)rx_skb->cb = rx_skb->len;
2979 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2980 sys->pyld_hdlr(rx_skb, sys);
2981 sys->repl_hdlr(sys);
2982 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
2983
2984}
2985
2986static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2987{
2988 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2989 struct sk_buff *rx_skb;
2990
2991 if (unlikely(list_empty(&sys->head_desc_list))) {
2992 WARN_ON(1);
2993 return;
2994 }
2995 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2996 struct ipa_rx_pkt_wrapper,
2997 link);
2998 list_del(&rx_pkt_expected->link);
2999 sys->len--;
3000
3001 if (size)
3002 rx_pkt_expected->len = size;
3003
3004 rx_skb = rx_pkt_expected->data.skb;
3005 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
3006 rx_skb->len = rx_pkt_expected->len;
3007 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
3008 sys->ep->wstats.tx_pkts_rcvd++;
3009 if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
3010 ipa2_free_skb(&rx_pkt_expected->data);
3011 sys->ep->wstats.tx_pkts_dropped++;
3012 } else {
3013 sys->ep->wstats.tx_pkts_sent++;
3014 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3015 (unsigned long)(&rx_pkt_expected->data));
3016 }
3017 ipa_replenish_wlan_rx_cache(sys);
3018}
3019
3020static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
3021 struct sps_iovec *iovec)
3022{
3023 IPADBG("ENTER.\n");
3024 if (unlikely(list_empty(&sys->head_desc_list))) {
3025 IPAERR("descriptor list is empty!\n");
3026 WARN_ON(1);
3027 return;
3028 }
3029 if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) {
3030 IPAERR("received unexpected event. sps flag is 0x%x\n"
3031 , iovec->flags);
3032 WARN_ON(1);
3033 return;
3034 }
3035 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3036 (unsigned long)(iovec));
3037 IPADBG("EXIT\n");
3038}
3039
3040static void ipa_wq_rx_avail(struct work_struct *work)
3041{
3042 struct ipa_rx_pkt_wrapper *rx_pkt;
3043 struct ipa_sys_context *sys;
3044
3045 rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work);
3046 if (unlikely(rx_pkt == NULL))
3047 WARN_ON(1);
3048 sys = rx_pkt->sys;
3049 ipa_wq_rx_common(sys, 0);
3050}
3051
3052/**
3053 * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
3054 * the SPS driver after a Rx operation is complete.
3055 * Called in an interrupt context.
3056 * @notify: SPS driver supplied notification struct
3057 *
3058 * This function defer the work for this event to a workqueue.
3059 */
3060void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
3061{
3062 struct ipa_rx_pkt_wrapper *rx_pkt;
3063
3064 switch (notify->event_id) {
3065 case SPS_EVENT_EOT:
3066 rx_pkt = notify->data.transfer.user;
3067 if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
3068 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
3069 rx_pkt->len = notify->data.transfer.iovec.size;
3070 IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
3071 notify->user, rx_pkt->len);
3072 queue_work(rx_pkt->sys->wq, &rx_pkt->work);
3073 break;
3074 default:
3075 IPAERR("received unexpected event id %d sys=%p\n",
3076 notify->event_id, notify->user);
3077 }
3078}
3079
3080static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
3081 struct ipa_sys_context *sys)
3082{
3083 if (sys->ep->client_notify) {
3084 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3085 (unsigned long)(rx_skb));
3086 } else {
3087 dev_kfree_skb_any(rx_skb);
3088 WARN_ON(1);
3089 }
3090
3091 return 0;
3092}
3093
3094static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
3095 struct ipa_sys_context *sys)
3096{
3097 unsigned long int aggr_byte_limit;
3098
3099 sys->ep->status.status_en = true;
3100 sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
3101 if (IPA_CLIENT_IS_PROD(in->client)) {
3102 if (!sys->ep->skip_ep_cfg) {
3103 sys->policy = IPA_POLICY_NOINTR_MODE;
3104 sys->sps_option = SPS_O_AUTO_ENABLE;
3105 sys->sps_callback = NULL;
3106 sys->ep->status.status_ep = ipa2_get_ep_mapping(
3107 IPA_CLIENT_APPS_LAN_CONS);
3108 if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
3109 sys->ep->status.status_en = false;
3110 } else {
3111 sys->policy = IPA_POLICY_INTR_MODE;
3112 sys->sps_option = (SPS_O_AUTO_ENABLE |
3113 SPS_O_EOT);
3114 sys->sps_callback =
3115 ipa_sps_irq_tx_no_aggr_notify;
3116 }
3117 return 0;
3118 }
3119
3120 aggr_byte_limit =
3121 (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
3122 ipa_adjust_ra_buff_base_sz(
3123 in->ipa_ep_cfg.aggr.aggr_byte_limit));
3124
3125 if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
3126 in->client == IPA_CLIENT_APPS_WAN_CONS) {
3127 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3128 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3129 | SPS_O_ACK_TRANSFERS);
3130 sys->sps_callback = ipa_sps_irq_rx_notify;
3131 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3132 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3133 switch_to_intr_rx_work_func);
3134 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3135 replenish_rx_work_func);
3136 INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
3137 atomic_set(&sys->curr_polling_state, 0);
3138 sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
3139 IPA_GENERIC_RX_BUFF_BASE_SZ) -
3140 IPA_HEADROOM;
3141 sys->get_skb = ipa_get_skb_ipa_rx_headroom;
3142 sys->free_skb = ipa_free_skb_rx;
3143 in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
3144 in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
3145 in->ipa_ep_cfg.aggr.aggr_time_limit =
3146 IPA_GENERIC_AGGR_TIME_LIMIT;
3147 if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3148 sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
Utkarsh Saxena4badc042017-03-03 15:38:45 +05303149 sys->rx_pool_sz =
3150 ipa_ctx->lan_rx_ring_size;
Amir Levy9659e592016-10-27 18:08:27 +03003151 if (nr_cpu_ids > 1) {
3152 sys->repl_hdlr =
3153 ipa_fast_replenish_rx_cache;
3154 sys->repl_trig_thresh =
3155 sys->rx_pool_sz / 8;
3156 } else {
3157 sys->repl_hdlr =
3158 ipa_replenish_rx_cache;
3159 }
Amir Levy9659e592016-10-27 18:08:27 +03003160 in->ipa_ep_cfg.aggr.aggr_byte_limit =
3161 IPA_GENERIC_AGGR_BYTE_LIMIT;
3162 in->ipa_ep_cfg.aggr.aggr_pkt_limit =
3163 IPA_GENERIC_AGGR_PKT_LIMIT;
3164 sys->ep->wakelock_client =
3165 IPA_WAKELOCK_REF_CLIENT_LAN_RX;
3166 } else if (in->client ==
3167 IPA_CLIENT_APPS_WAN_CONS) {
3168 sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003169 sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
3170 if (nr_cpu_ids > 1) {
Amir Levy9659e592016-10-27 18:08:27 +03003171 sys->repl_hdlr =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003172 ipa_fast_replenish_rx_cache;
3173 sys->repl_trig_thresh =
3174 sys->rx_pool_sz / 8;
Amir Levy9659e592016-10-27 18:08:27 +03003175 } else {
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003176 sys->repl_hdlr =
3177 ipa_replenish_rx_cache;
3178 }
3179 if (in->napi_enabled) {
Amir Levy9659e592016-10-27 18:08:27 +03003180 sys->rx_pool_sz =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003181 IPA_WAN_NAPI_CONS_RX_POOL_SZ;
3182 if (in->recycle_enabled) {
3183 sys->repl_hdlr =
3184 ipa_replenish_rx_cache_recycle;
3185 }
Amir Levy9659e592016-10-27 18:08:27 +03003186 }
3187 sys->ep->wakelock_client =
3188 IPA_WAKELOCK_REF_CLIENT_WAN_RX;
3189 in->ipa_ep_cfg.aggr.aggr_sw_eof_active
3190 = true;
3191 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
3192 IPAERR("get close-by %u\n",
3193 ipa_adjust_ra_buff_base_sz(
3194 in->ipa_ep_cfg.aggr.
3195 aggr_byte_limit));
3196 IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
3197 /* disable ipa_status */
3198 sys->ep->status.
3199 status_en = false;
3200 sys->rx_buff_sz =
3201 IPA_GENERIC_RX_BUFF_SZ(
3202 ipa_adjust_ra_buff_base_sz(
3203 in->ipa_ep_cfg.aggr.
3204 aggr_byte_limit - IPA_HEADROOM));
3205 in->ipa_ep_cfg.aggr.
3206 aggr_byte_limit =
3207 sys->rx_buff_sz < in->
3208 ipa_ep_cfg.aggr.aggr_byte_limit ?
3209 IPA_ADJUST_AGGR_BYTE_LIMIT(
3210 sys->rx_buff_sz) :
3211 IPA_ADJUST_AGGR_BYTE_LIMIT(
3212 in->ipa_ep_cfg.
3213 aggr.aggr_byte_limit);
3214 IPAERR("set aggr_limit %lu\n",
3215 (unsigned long int)
3216 in->ipa_ep_cfg.aggr.
3217 aggr_byte_limit);
3218 } else {
3219 in->ipa_ep_cfg.aggr.
3220 aggr_byte_limit =
3221 IPA_GENERIC_AGGR_BYTE_LIMIT;
3222 in->ipa_ep_cfg.aggr.
3223 aggr_pkt_limit =
3224 IPA_GENERIC_AGGR_PKT_LIMIT;
3225 }
3226 }
3227 } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
3228 IPADBG("assigning policy to client:%d",
3229 in->client);
3230
3231 sys->ep->status.status_en = false;
3232 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3233 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3234 | SPS_O_ACK_TRANSFERS);
3235 sys->sps_callback = ipa_sps_irq_rx_notify;
3236 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3237 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3238 switch_to_intr_rx_work_func);
3239 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3240 replenish_rx_work_func);
3241 atomic_set(&sys->curr_polling_state, 0);
3242 sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
3243 sys->rx_pool_sz = in->desc_fifo_sz /
3244 sizeof(struct sps_iovec) - 1;
3245 if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
3246 sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
3247 sys->pyld_hdlr = NULL;
3248 sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
3249 sys->get_skb = ipa_get_skb_ipa_rx;
3250 sys->free_skb = ipa_free_skb_rx;
3251 in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
3252 sys->ep->wakelock_client =
3253 IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
3254 } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
3255 IPADBG("assigning policy to client:%d",
3256 in->client);
3257
3258 sys->ep->status.status_en = false;
3259 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3260 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3261 | SPS_O_ACK_TRANSFERS);
3262 sys->sps_callback = ipa_sps_irq_rx_notify;
3263 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3264 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3265 switch_to_intr_rx_work_func);
3266 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3267 replenish_rx_work_func);
3268 atomic_set(&sys->curr_polling_state, 0);
3269 sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
3270 sys->rx_pool_sz = in->desc_fifo_sz /
3271 sizeof(struct sps_iovec) - 1;
3272 if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
3273 sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
3274 sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
3275 sys->get_skb = ipa_get_skb_ipa_rx;
3276 sys->free_skb = ipa_free_skb_rx;
3277 sys->repl_hdlr = ipa_replenish_rx_cache;
3278 sys->ep->wakelock_client =
3279 IPA_WAKELOCK_REF_CLIENT_ODU_RX;
3280 } else if (in->client ==
3281 IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
3282 IPADBG("assigning policy to client:%d",
3283 in->client);
3284 sys->ep->status.status_en = false;
3285 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3286 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3287 | SPS_O_ACK_TRANSFERS);
3288 sys->sps_callback = ipa_sps_irq_rx_notify;
3289 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3290 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3291 switch_to_intr_rx_work_func);
3292 } else if (in->client ==
3293 IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
3294 IPADBG("assigning policy to client:%d",
3295 in->client);
3296 sys->ep->status.status_en = false;
3297 sys->policy = IPA_POLICY_NOINTR_MODE;
3298 sys->sps_option = SPS_O_AUTO_ENABLE |
3299 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
3300 } else {
3301 IPAERR("Need to install a RX pipe hdlr\n");
3302 WARN_ON(1);
3303 return -EINVAL;
3304 }
3305 return 0;
3306}
3307
3308static int ipa_assign_policy(struct ipa_sys_connect_params *in,
3309 struct ipa_sys_context *sys)
3310{
3311 if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
3312 sys->policy = IPA_POLICY_INTR_MODE;
3313 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3314 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3315 return 0;
3316 }
3317
3318 if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
3319 if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) {
3320 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3321 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3322 SPS_O_ACK_TRANSFERS);
3323 sys->sps_callback = ipa_sps_irq_tx_notify;
3324 INIT_WORK(&sys->work, ipa_wq_handle_tx);
3325 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3326 switch_to_intr_tx_work_func);
3327 atomic_set(&sys->curr_polling_state, 0);
3328 } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3329 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3330 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3331 SPS_O_ACK_TRANSFERS);
3332 sys->sps_callback = ipa_sps_irq_rx_notify;
3333 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3334 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3335 switch_to_intr_rx_work_func);
3336 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3337 replenish_rx_work_func);
3338 atomic_set(&sys->curr_polling_state, 0);
3339 sys->rx_buff_sz = IPA_RX_SKB_SIZE;
3340 sys->rx_pool_sz = IPA_RX_POOL_CEIL;
3341 sys->pyld_hdlr = ipa_rx_pyld_hdlr;
3342 sys->get_skb = ipa_get_skb_ipa_rx;
3343 sys->free_skb = ipa_free_skb_rx;
3344 sys->repl_hdlr = ipa_replenish_rx_cache;
3345 } else if (IPA_CLIENT_IS_PROD(in->client)) {
3346 sys->policy = IPA_POLICY_INTR_MODE;
3347 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3348 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3349 } else {
3350 IPAERR("Need to install a RX pipe hdlr\n");
3351 WARN_ON(1);
3352 return -EINVAL;
3353 }
3354
3355 return 0;
3356 } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3357 return ipa_assign_policy_v2(in, sys);
3358
3359 IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
3360 WARN_ON(1);
3361 return -EINVAL;
3362}
3363
3364/**
3365 * ipa_tx_client_rx_notify_release() - Callback function
3366 * which will call the user supplied callback function to
3367 * release the skb, or release it on its own if no callback
3368 * function was supplied
3369 *
3370 * @user1: [in] - Data Descriptor
3371 * @user2: [in] - endpoint idx
3372 *
3373 * This notified callback is for the destination client
3374 * This function is supplied in ipa_tx_dp_mul
3375 */
3376static void ipa_tx_client_rx_notify_release(void *user1, int user2)
3377{
3378 struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
3379 int ep_idx = user2;
3380
3381 IPADBG("Received data desc anchor:%p\n", dd);
3382
3383 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3384 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3385
3386 /* wlan host driver waits till tx complete before unload */
3387 IPADBG("ep=%d fifo_desc_free_count=%d\n",
3388 ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc));
3389 IPADBG("calling client notify callback with priv:%p\n",
3390 ipa_ctx->ep[ep_idx].priv);
3391
3392 if (ipa_ctx->ep[ep_idx].client_notify) {
3393 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
3394 IPA_WRITE_DONE, (unsigned long)user1);
3395 ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++;
3396 }
3397}
3398/**
3399 * ipa_tx_client_rx_pkt_status() - Callback function
3400 * which will call the user supplied callback function to
3401 * increase the available fifo descriptor
3402 *
3403 * @user1: [in] - Data Descriptor
3404 * @user2: [in] - endpoint idx
3405 *
3406 * This notified callback is for the destination client
3407 * This function is supplied in ipa_tx_dp_mul
3408 */
3409static void ipa_tx_client_rx_pkt_status(void *user1, int user2)
3410{
3411 int ep_idx = user2;
3412
3413 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3414 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3415}
3416
3417
3418/**
3419 * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets
3420 * @src: [in] - Client that is sending data
3421 * @ipa_tx_data_desc: [in] data descriptors from wlan
3422 *
3423 * this is used for to transfer data descriptors that received
3424 * from WLAN1_PROD pipe to IPA HW
3425 *
3426 * The function will send data descriptors from WLAN1_PROD (one
3427 * at a time) using sps_transfer_one. Will set EOT flag for last
3428 * descriptor Once this send was done from SPS point-of-view the
3429 * IPA driver will get notified by the supplied callback -
3430 * ipa_sps_irq_tx_no_aggr_notify()
3431 *
3432 * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
3433 * callback (from ipa_connect)
3434 *
3435 * Returns: 0 on success, negative on failure
3436 */
3437int ipa2_tx_dp_mul(enum ipa_client_type src,
3438 struct ipa_tx_data_desc *data_desc)
3439{
3440 /* The second byte in wlan header holds qmap id */
3441#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
3442 struct ipa_tx_data_desc *entry;
3443 struct ipa_sys_context *sys;
3444 struct ipa_desc desc = { 0 };
3445 u32 num_desc, cnt;
3446 int ep_idx;
3447
3448 if (unlikely(!ipa_ctx)) {
3449 IPAERR("IPA driver was not initialized\n");
3450 return -EINVAL;
3451 }
3452
3453 IPADBG("Received data desc anchor:%p\n", data_desc);
3454
3455 spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3456
3457 ep_idx = ipa2_get_ep_mapping(src);
3458 if (unlikely(ep_idx == -1)) {
3459 IPAERR("dest EP does not exist.\n");
3460 goto fail_send;
3461 }
3462 IPADBG("ep idx:%d\n", ep_idx);
3463 sys = ipa_ctx->ep[ep_idx].sys;
3464
3465 if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) {
3466 IPAERR("dest EP not valid.\n");
3467 goto fail_send;
3468 }
3469 sys->ep->wstats.rx_hd_rcvd++;
3470
3471 /* Calculate the number of descriptors */
3472 num_desc = 0;
3473 list_for_each_entry(entry, &data_desc->link, link) {
3474 num_desc++;
3475 }
3476 IPADBG("Number of Data Descriptors:%d", num_desc);
3477
3478 if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
3479 IPAERR("Insufficient data descriptors available\n");
3480 goto fail_send;
3481 }
3482
3483 /* Assign callback only for last data descriptor */
3484 cnt = 0;
3485 list_for_each_entry(entry, &data_desc->link, link) {
3486 IPADBG("Parsing data desc :%d\n", cnt);
3487 cnt++;
3488 ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
3489 (u8)sys->ep->cfg.meta.qmap_id;
3490 desc.pyld = entry->pyld_buffer;
3491 desc.len = entry->pyld_len;
3492 desc.type = IPA_DATA_DESC_SKB;
3493 desc.user1 = data_desc;
3494 desc.user2 = ep_idx;
3495 IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
3496 entry->priv, desc.pyld, desc.len);
3497
3498 /* In case of last descriptor populate callback */
3499 if (cnt == num_desc) {
3500 IPADBG("data desc:%p\n", data_desc);
3501 desc.callback = ipa_tx_client_rx_notify_release;
3502 } else {
3503 desc.callback = ipa_tx_client_rx_pkt_status;
3504 }
3505
3506 IPADBG("calling ipa_send_one()\n");
3507 if (ipa_send_one(sys, &desc, true)) {
3508 IPAERR("fail to send skb\n");
3509 sys->ep->wstats.rx_pkt_leak += (cnt-1);
3510 sys->ep->wstats.rx_dp_fail++;
3511 goto fail_send;
3512 }
3513
3514 if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
3515 atomic_dec(&sys->ep->avail_fifo_desc);
3516
3517 sys->ep->wstats.rx_pkts_rcvd++;
3518 IPADBG("ep=%d fifo desc=%d\n",
3519 ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
3520 }
3521
3522 sys->ep->wstats.rx_hd_processed++;
3523 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3524 return 0;
3525
3526fail_send:
3527 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3528 return -EFAULT;
3529
3530}
3531
3532void ipa2_free_skb(struct ipa_rx_data *data)
3533{
3534 struct ipa_rx_pkt_wrapper *rx_pkt;
3535
3536 if (unlikely(!ipa_ctx)) {
3537 IPAERR("IPA driver was not initialized\n");
3538 return;
3539 }
3540
3541 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3542
3543 ipa_ctx->wc_memb.total_tx_pkts_freed++;
3544 rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data);
3545
3546 ipa_skb_recycle(rx_pkt->data.skb);
3547 (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
3548
3549 list_add_tail(&rx_pkt->link,
3550 &ipa_ctx->wc_memb.wlan_comm_desc_list);
3551 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
3552
3553 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3554}
3555
3556
3557/* Functions added to support kernel tests */
3558
3559int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
3560 unsigned long *ipa_bam_hdl,
3561 u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
3562{
3563 struct ipa_ep_context *ep;
3564 int ipa_ep_idx;
3565 int result = -EINVAL;
3566
3567 if (sys_in == NULL || clnt_hdl == NULL) {
3568 IPAERR("NULL args\n");
3569 goto fail_gen;
3570 }
3571
3572 if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) {
3573 IPAERR("NULL args\n");
3574 goto fail_gen;
3575 }
3576 if (sys_in->client >= IPA_CLIENT_MAX) {
3577 IPAERR("bad parm client:%d\n", sys_in->client);
3578 goto fail_gen;
3579 }
3580
3581 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
3582 if (ipa_ep_idx == -1) {
3583 IPAERR("Invalid client :%d\n", sys_in->client);
3584 goto fail_gen;
3585 }
3586
3587 ep = &ipa_ctx->ep[ipa_ep_idx];
3588
3589 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
3590
3591 if (ep->valid == 1) {
3592 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
3593 IPAERR("EP %d already allocated\n", ipa_ep_idx);
3594 goto fail_and_disable_clocks;
3595 } else {
3596 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
3597 &sys_in->ipa_ep_cfg.hdr)) {
3598 IPAERR("fail to configure hdr prop of EP %d\n",
3599 ipa_ep_idx);
3600 result = -EFAULT;
3601 goto fail_and_disable_clocks;
3602 }
3603 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
3604 &sys_in->ipa_ep_cfg.cfg)) {
3605 IPAERR("fail to configure cfg prop of EP %d\n",
3606 ipa_ep_idx);
3607 result = -EFAULT;
3608 goto fail_and_disable_clocks;
3609 }
3610 IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
3611 sys_in->client, ipa_ep_idx, ep->sys);
3612 ep->client_notify = sys_in->notify;
3613 ep->priv = sys_in->priv;
3614 *clnt_hdl = ipa_ep_idx;
3615 if (!ep->keep_ipa_awake)
3616 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3617
3618 return 0;
3619 }
3620 }
3621
3622 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
3623
3624 ep->valid = 1;
3625 ep->client = sys_in->client;
3626 ep->client_notify = sys_in->notify;
3627 ep->priv = sys_in->priv;
3628 ep->keep_ipa_awake = true;
3629
3630 result = ipa_enable_data_path(ipa_ep_idx);
3631 if (result) {
3632 IPAERR("enable data path failed res=%d clnt=%d.\n",
3633 result, ipa_ep_idx);
3634 goto fail_gen2;
3635 }
3636
3637 if (!ep->skip_ep_cfg) {
3638 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
3639 IPAERR("fail to configure EP.\n");
3640 goto fail_gen2;
3641 }
3642 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
3643 IPAERR("fail to configure status of EP.\n");
3644 goto fail_gen2;
3645 }
3646 IPADBG("ep configuration successful\n");
3647 } else {
3648 IPADBG("skipping ep configuration\n");
3649 }
3650
3651 *clnt_hdl = ipa_ep_idx;
3652
3653 *ipa_pipe_num = ipa_ep_idx;
3654 *ipa_bam_hdl = ipa_ctx->bam_handle;
3655
3656 if (!ep->keep_ipa_awake)
3657 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3658
3659 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
3660 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
3661 ipa_ep_idx, ep->sys);
3662
3663 return 0;
3664
3665fail_gen2:
3666fail_and_disable_clocks:
3667 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3668fail_gen:
3669 return result;
3670}
3671
3672int ipa2_sys_teardown(u32 clnt_hdl)
3673{
3674 struct ipa_ep_context *ep;
3675
3676 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3677 ipa_ctx->ep[clnt_hdl].valid == 0) {
3678 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3679 return -EINVAL;
3680 }
3681
3682 ep = &ipa_ctx->ep[clnt_hdl];
3683
3684 if (!ep->keep_ipa_awake)
3685 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3686
3687 ipa_disable_data_path(clnt_hdl);
3688 ep->valid = 0;
3689
3690 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3691
3692 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
3693
3694 return 0;
3695}
3696
3697int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
3698 unsigned long gsi_ev_hdl)
3699{
3700 IPAERR("GSI not supported in IPAv2");
3701 return -EFAULT;
3702}
3703
3704
3705/**
3706 * ipa_adjust_ra_buff_base_sz()
3707 *
3708 * Return value: the largest power of two which is smaller
3709 * than the input value
3710 */
3711static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
3712{
3713 aggr_byte_limit += IPA_MTU;
3714 aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
3715 aggr_byte_limit--;
3716 aggr_byte_limit |= aggr_byte_limit >> 1;
3717 aggr_byte_limit |= aggr_byte_limit >> 2;
3718 aggr_byte_limit |= aggr_byte_limit >> 4;
3719 aggr_byte_limit |= aggr_byte_limit >> 8;
3720 aggr_byte_limit |= aggr_byte_limit >> 16;
3721 aggr_byte_limit++;
3722 return aggr_byte_limit >> 1;
3723}