blob: 3dca3e68973a3fbe2bf24331c079d3edbdf9f92c [file] [log] [blame]
Utkarsh Saxena4badc042017-03-03 15:38:45 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include "ipa_i.h"
19#include "ipa_trace.h"
20
Sunil Paidimarri226cf032016-10-14 13:33:08 -070021#define IPA_WAN_AGGR_PKT_CNT 5
Amir Levy9659e592016-10-27 18:08:27 +030022#define IPA_LAST_DESC_CNT 0xFFFF
23#define POLLING_INACTIVITY_RX 40
24#define POLLING_INACTIVITY_TX 40
25#define POLLING_MIN_SLEEP_TX 400
26#define POLLING_MAX_SLEEP_TX 500
27/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
28#define IPA_MTU 1500
29#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
30#define IPA_GENERIC_AGGR_TIME_LIMIT 1
31#define IPA_GENERIC_AGGR_PKT_LIMIT 0
32
33#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
34#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
35 (X) + NET_SKB_PAD) +\
36 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
37#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
38 (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
39#define IPA_GENERIC_RX_BUFF_LIMIT (\
40 IPA_REAL_GENERIC_RX_BUFF_SZ(\
41 IPA_GENERIC_RX_BUFF_BASE_SZ) -\
42 IPA_GENERIC_RX_BUFF_BASE_SZ)
43
44#define IPA_RX_BUFF_CLIENT_HEADROOM 256
45
46/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
47#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
48
49#define IPA_WLAN_RX_POOL_SZ 100
50#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
51#define IPA_WLAN_RX_BUFF_SZ 2048
52#define IPA_WLAN_COMM_RX_POOL_LOW 100
53#define IPA_WLAN_COMM_RX_POOL_HIGH 900
54
55#define IPA_ODU_RX_BUFF_SZ 2048
56#define IPA_ODU_RX_POOL_SZ 32
57#define IPA_SIZE_DL_CSUM_META_TRAILER 8
58
59#define IPA_HEADROOM 128
60
61static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags);
62static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys);
63static void ipa_replenish_rx_cache(struct ipa_sys_context *sys);
64static void replenish_rx_work_func(struct work_struct *work);
65static void ipa_wq_handle_rx(struct work_struct *work);
66static void ipa_wq_handle_tx(struct work_struct *work);
67static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size);
68static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys,
69 u32 size);
70static int ipa_assign_policy(struct ipa_sys_connect_params *in,
71 struct ipa_sys_context *sys);
72static void ipa_cleanup_rx(struct ipa_sys_context *sys);
73static void ipa_wq_rx_avail(struct work_struct *work);
74static void ipa_alloc_wlan_rx_common_cache(u32 size);
75static void ipa_cleanup_wlan_rx_common_cache(void);
76static void ipa_wq_repl_rx(struct work_struct *work);
77static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
78 struct sps_iovec *iovec);
79
80static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
81static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
82
83static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
84{
85 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
86 int i;
87
88 for (i = 0; i < cnt; i++) {
89 spin_lock_bh(&sys->spinlock);
90 if (unlikely(list_empty(&sys->head_desc_list))) {
91 spin_unlock_bh(&sys->spinlock);
92 return;
93 }
94 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
95 struct ipa_tx_pkt_wrapper,
96 link);
97 list_del(&tx_pkt_expected->link);
98 sys->len--;
99 spin_unlock_bh(&sys->spinlock);
100 if (!tx_pkt_expected->no_unmap_dma) {
101 if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
102 dma_unmap_single(ipa_ctx->pdev,
103 tx_pkt_expected->mem.phys_base,
104 tx_pkt_expected->mem.size,
105 DMA_TO_DEVICE);
106 } else {
107 dma_unmap_page(ipa_ctx->pdev,
108 tx_pkt_expected->mem.phys_base,
109 tx_pkt_expected->mem.size,
110 DMA_TO_DEVICE);
111 }
112 }
113 if (tx_pkt_expected->callback)
114 tx_pkt_expected->callback(tx_pkt_expected->user1,
115 tx_pkt_expected->user2);
116 if (tx_pkt_expected->cnt > 1 &&
117 tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) {
118 if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) {
119 dma_pool_free(ipa_ctx->dma_pool,
120 tx_pkt_expected->mult.base,
121 tx_pkt_expected->mult.phys_base);
122 } else {
123 dma_unmap_single(ipa_ctx->pdev,
124 tx_pkt_expected->mult.phys_base,
125 tx_pkt_expected->mult.size,
126 DMA_TO_DEVICE);
127 kfree(tx_pkt_expected->mult.base);
128 }
129 }
130 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected);
131 }
132}
133
134static void ipa_wq_write_done_status(int src_pipe)
135{
136 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
137 struct ipa_sys_context *sys;
138 u32 cnt;
139
140 WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes);
141
142 if (!ipa_ctx->ep[src_pipe].status.status_en)
143 return;
144
145 sys = ipa_ctx->ep[src_pipe].sys;
146 if (!sys)
147 return;
148
149 spin_lock_bh(&sys->spinlock);
150 if (unlikely(list_empty(&sys->head_desc_list))) {
151 spin_unlock_bh(&sys->spinlock);
152 return;
153 }
154 tx_pkt_expected = list_first_entry(&sys->head_desc_list,
155 struct ipa_tx_pkt_wrapper,
156 link);
157 cnt = tx_pkt_expected->cnt;
158 spin_unlock_bh(&sys->spinlock);
159 ipa_wq_write_done_common(sys, cnt);
160}
161
162/**
163 * ipa_write_done() - this function will be (eventually) called when a Tx
164 * operation is complete
165 * * @work: work_struct used by the work queue
166 *
167 * Will be called in deferred context.
168 * - invoke the callback supplied by the client who sent this command
169 * - iterate over all packets and validate that
170 * the order for sent packet is the same as expected
171 * - delete all the tx packet descriptors from the system
172 * pipe context (not needed anymore)
173 * - return the tx buffer back to dma_pool
174 */
175static void ipa_wq_write_done(struct work_struct *work)
176{
177 struct ipa_tx_pkt_wrapper *tx_pkt;
178 u32 cnt;
179 struct ipa_sys_context *sys;
180
181 tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
182 cnt = tx_pkt->cnt;
183 sys = tx_pkt->sys;
184
185 ipa_wq_write_done_common(sys, cnt);
186}
187
188static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
189 bool in_poll_state)
190{
191 struct sps_iovec iov;
192 int ret;
193 int cnt = 0;
194
195 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
196 !atomic_read(&sys->curr_polling_state))) {
197 if (cnt && !process_all)
198 break;
199 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
200 if (ret) {
201 IPAERR("sps_get_iovec failed %d\n", ret);
202 break;
203 }
204
205 if (iov.addr == 0)
206 break;
207
208 ipa_wq_write_done_common(sys, 1);
209 cnt++;
210 };
211
212 return cnt;
213}
214
215/**
216 * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
217 */
218static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
219{
220 int ret;
221
222 if (!atomic_read(&sys->curr_polling_state)) {
223 IPAERR("already in intr mode\n");
224 goto fail;
225 }
226
227 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
228 if (ret) {
229 IPAERR("sps_get_config() failed %d\n", ret);
230 goto fail;
231 }
232 sys->event.options = SPS_O_EOT;
233 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
234 if (ret) {
235 IPAERR("sps_register_event() failed %d\n", ret);
236 goto fail;
237 }
238 sys->ep->connect.options =
239 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
240 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
241 if (ret) {
242 IPAERR("sps_set_config() failed %d\n", ret);
243 goto fail;
244 }
245 atomic_set(&sys->curr_polling_state, 0);
246 ipa_handle_tx_core(sys, true, false);
247 return;
248
249fail:
250 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
251 msecs_to_jiffies(1));
252}
253
254static void ipa_handle_tx(struct ipa_sys_context *sys)
255{
256 int inactive_cycles = 0;
257 int cnt;
258
259 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
260 do {
261 cnt = ipa_handle_tx_core(sys, true, true);
262 if (cnt == 0) {
263 inactive_cycles++;
264 usleep_range(POLLING_MIN_SLEEP_TX,
265 POLLING_MAX_SLEEP_TX);
266 } else {
267 inactive_cycles = 0;
268 }
269 } while (inactive_cycles <= POLLING_INACTIVITY_TX);
270
271 ipa_tx_switch_to_intr_mode(sys);
272 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
273}
274
275static void ipa_wq_handle_tx(struct work_struct *work)
276{
277 struct ipa_sys_context *sys;
278
279 sys = container_of(work, struct ipa_sys_context, work);
280
281 ipa_handle_tx(sys);
282}
283
284/**
285 * ipa_send_one() - Send a single descriptor
286 * @sys: system pipe context
287 * @desc: descriptor to send
288 * @in_atomic: whether caller is in atomic context
289 *
290 * - Allocate tx_packet wrapper
291 * - transfer data to the IPA
292 * - after the transfer was done the SPS will
293 * notify the sending user via ipa_sps_irq_comp_tx()
294 *
295 * Return codes: 0: success, -EFAULT: failure
296 */
297int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
298 bool in_atomic)
299{
300 struct ipa_tx_pkt_wrapper *tx_pkt;
301 int result;
302 u16 sps_flags = SPS_IOVEC_FLAG_EOT;
303 dma_addr_t dma_address;
304 u16 len;
305 u32 mem_flag = GFP_ATOMIC;
306 struct sps_iovec iov;
307 int ret;
308
309 if (unlikely(!in_atomic))
310 mem_flag = GFP_KERNEL;
311
312 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
313 if (!tx_pkt) {
314 IPAERR("failed to alloc tx wrapper\n");
315 goto fail_mem_alloc;
316 }
317
318 if (!desc->dma_address_valid) {
319 dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
320 desc->len, DMA_TO_DEVICE);
321 } else {
322 dma_address = desc->dma_address;
323 tx_pkt->no_unmap_dma = true;
324 }
325 if (!dma_address) {
326 IPAERR("failed to DMA wrap\n");
327 goto fail_dma_map;
328 }
329
330 INIT_LIST_HEAD(&tx_pkt->link);
331 tx_pkt->type = desc->type;
332 tx_pkt->cnt = 1; /* only 1 desc in this "set" */
333
334 tx_pkt->mem.phys_base = dma_address;
335 tx_pkt->mem.base = desc->pyld;
336 tx_pkt->mem.size = desc->len;
337 tx_pkt->sys = sys;
338 tx_pkt->callback = desc->callback;
339 tx_pkt->user1 = desc->user1;
340 tx_pkt->user2 = desc->user2;
341
342 /*
343 * Special treatment for immediate commands, where the structure of the
344 * descriptor is different
345 */
346 if (desc->type == IPA_IMM_CMD_DESC) {
347 sps_flags |= SPS_IOVEC_FLAG_IMME;
348 len = desc->opcode;
349 IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
350 desc->opcode, desc->len, sps_flags);
351 IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
352 } else {
353 len = desc->len;
354 }
355
356 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
357
358 spin_lock_bh(&sys->spinlock);
359 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
360 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
361 do {
362 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
363 if (ret) {
364 IPADBG("sps_get_iovec failed %d\n", ret);
365 break;
366 }
367 if ((iov.addr == 0x0) && (iov.size == 0x0))
368 break;
369 } while (1);
370 }
371 result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
372 sps_flags);
373 if (result) {
374 IPAERR("sps_transfer_one failed rc=%d\n", result);
375 goto fail_sps_send;
376 }
377
378 spin_unlock_bh(&sys->spinlock);
379
380 return 0;
381
382fail_sps_send:
383 list_del(&tx_pkt->link);
384 spin_unlock_bh(&sys->spinlock);
385 dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
386fail_dma_map:
387 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
388fail_mem_alloc:
389 return -EFAULT;
390}
391
392/**
393 * ipa_send() - Send multiple descriptors in one HW transaction
394 * @sys: system pipe context
395 * @num_desc: number of packets
396 * @desc: packets to send (may be immediate command or data)
397 * @in_atomic: whether caller is in atomic context
398 *
399 * This function is used for system-to-bam connection.
400 * - SPS driver expect struct sps_transfer which will contain all the data
401 * for a transaction
402 * - ipa_tx_pkt_wrapper will be used for each ipa
403 * descriptor (allocated from wrappers cache)
404 * - The wrapper struct will be configured for each ipa-desc payload and will
405 * contain information which will be later used by the user callbacks
406 * - each transfer will be made by calling to sps_transfer()
407 * - Each packet (command or data) that will be sent will also be saved in
408 * ipa_sys_context for later check that all data was sent
409 *
410 * Return codes: 0: success, -EFAULT: failure
411 */
412int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
413 bool in_atomic)
414{
415 struct ipa_tx_pkt_wrapper *tx_pkt;
416 struct ipa_tx_pkt_wrapper *next_pkt;
417 struct sps_transfer transfer = { 0 };
418 struct sps_iovec *iovec;
419 dma_addr_t dma_addr;
420 int i = 0;
421 int j;
422 int result;
423 int fail_dma_wrap = 0;
424 uint size = num_desc * sizeof(struct sps_iovec);
425 u32 mem_flag = GFP_ATOMIC;
426 struct sps_iovec iov;
427 int ret;
428
429 if (unlikely(!in_atomic))
430 mem_flag = GFP_KERNEL;
431
432 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
433 transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
434 &dma_addr);
435 if (!transfer.iovec) {
436 IPAERR("fail to alloc dma mem for sps xfr buff\n");
437 return -EFAULT;
438 }
439 } else {
440 transfer.iovec = kmalloc(size, mem_flag);
441 if (!transfer.iovec) {
442 IPAERR("fail to alloc mem for sps xfr buff ");
443 IPAERR("num_desc = %d size = %d\n", num_desc, size);
444 return -EFAULT;
445 }
446 dma_addr = dma_map_single(ipa_ctx->pdev,
447 transfer.iovec, size, DMA_TO_DEVICE);
448 if (!dma_addr) {
449 IPAERR("dma_map_single failed for sps xfr buff\n");
450 kfree(transfer.iovec);
451 return -EFAULT;
452 }
453 }
454
455 transfer.iovec_phys = dma_addr;
456 transfer.iovec_count = num_desc;
457 spin_lock_bh(&sys->spinlock);
458
459 for (i = 0; i < num_desc; i++) {
460 fail_dma_wrap = 0;
461 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
462 mem_flag);
463 if (!tx_pkt) {
464 IPAERR("failed to alloc tx wrapper\n");
465 goto failure;
466 }
467 /*
468 * first desc of set is "special" as it holds the count and
469 * other info
470 */
471 if (i == 0) {
472 transfer.user = tx_pkt;
473 tx_pkt->mult.phys_base = dma_addr;
474 tx_pkt->mult.base = transfer.iovec;
475 tx_pkt->mult.size = size;
476 tx_pkt->cnt = num_desc;
477 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
478 }
479
480 iovec = &transfer.iovec[i];
481 iovec->flags = 0;
482
483 INIT_LIST_HEAD(&tx_pkt->link);
484 tx_pkt->type = desc[i].type;
485
486 if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
487 tx_pkt->mem.base = desc[i].pyld;
488 tx_pkt->mem.size = desc[i].len;
489
490 if (!desc[i].dma_address_valid) {
491 tx_pkt->mem.phys_base =
492 dma_map_single(ipa_ctx->pdev,
493 tx_pkt->mem.base,
494 tx_pkt->mem.size,
495 DMA_TO_DEVICE);
496 } else {
497 tx_pkt->mem.phys_base = desc[i].dma_address;
498 tx_pkt->no_unmap_dma = true;
499 }
500 } else {
501 tx_pkt->mem.base = desc[i].frag;
502 tx_pkt->mem.size = desc[i].len;
503
504 if (!desc[i].dma_address_valid) {
505 tx_pkt->mem.phys_base =
506 skb_frag_dma_map(ipa_ctx->pdev,
507 desc[i].frag,
508 0, tx_pkt->mem.size,
509 DMA_TO_DEVICE);
510 } else {
511 tx_pkt->mem.phys_base = desc[i].dma_address;
512 tx_pkt->no_unmap_dma = true;
513 }
514 }
515
516 if (!tx_pkt->mem.phys_base) {
517 IPAERR("failed to alloc tx wrapper\n");
518 fail_dma_wrap = 1;
519 goto failure;
520 }
521
522 tx_pkt->sys = sys;
523 tx_pkt->callback = desc[i].callback;
524 tx_pkt->user1 = desc[i].user1;
525 tx_pkt->user2 = desc[i].user2;
526
527 /*
528 * Point the iovec to the buffer and
529 * add this packet to system pipe context.
530 */
531 iovec->addr = tx_pkt->mem.phys_base;
532 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
533
534 /*
535 * Special treatment for immediate commands, where the structure
536 * of the descriptor is different
537 */
538 if (desc[i].type == IPA_IMM_CMD_DESC) {
539 iovec->size = desc[i].opcode;
540 iovec->flags |= SPS_IOVEC_FLAG_IMME;
541 IPA_DUMP_BUFF(desc[i].pyld,
542 tx_pkt->mem.phys_base, desc[i].len);
543 } else {
544 iovec->size = desc[i].len;
545 }
546
547 if (i == (num_desc - 1)) {
548 iovec->flags |= SPS_IOVEC_FLAG_EOT;
549 /* "mark" the last desc */
550 tx_pkt->cnt = IPA_LAST_DESC_CNT;
551 }
552 }
553
554 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
555 do {
556 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
557 if (ret) {
558 IPADBG("sps_get_iovec failed %d\n", ret);
559 break;
560 }
561 if ((iov.addr == 0x0) && (iov.size == 0x0))
562 break;
563 } while (1);
564 }
565 result = sps_transfer(sys->ep->ep_hdl, &transfer);
566 if (result) {
567 IPAERR("sps_transfer failed rc=%d\n", result);
568 goto failure;
569 }
570
571 spin_unlock_bh(&sys->spinlock);
572 return 0;
573
574failure:
575 tx_pkt = transfer.user;
576 for (j = 0; j < i; j++) {
577 next_pkt = list_next_entry(tx_pkt, link);
578 list_del(&tx_pkt->link);
579 if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
580 dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
581 tx_pkt->mem.size,
582 DMA_TO_DEVICE);
583 } else {
584 dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
585 tx_pkt->mem.size,
586 DMA_TO_DEVICE);
587 }
588 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
589 tx_pkt = next_pkt;
590 }
591 if (j < num_desc)
592 /* last desc failed */
593 if (fail_dma_wrap)
594 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
595 if (transfer.iovec_phys) {
596 if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
597 dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
598 transfer.iovec_phys);
599 } else {
600 dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys,
601 size, DMA_TO_DEVICE);
602 kfree(transfer.iovec);
603 }
604 }
605 spin_unlock_bh(&sys->spinlock);
606 return -EFAULT;
607}
608
609/**
610 * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver
611 * after an immediate command is complete.
612 * @user1: pointer to the descriptor of the transfer
613 * @user2:
614 *
615 * Complete the immediate commands completion object, this will release the
616 * thread which waits on this completion object (ipa_send_cmd())
617 */
618static void ipa_sps_irq_cmd_ack(void *user1, int user2)
619{
620 struct ipa_desc *desc = (struct ipa_desc *)user1;
621
622 if (!desc) {
623 IPAERR("desc is NULL\n");
624 WARN_ON(1);
625 return;
626 }
627 IPADBG("got ack for cmd=%d\n", desc->opcode);
628 complete(&desc->xfer_done);
629}
630
631/**
632 * ipa_send_cmd - send immediate commands
633 * @num_desc: number of descriptors within the desc struct
634 * @descr: descriptor structure
635 *
636 * Function will block till command gets ACK from IPA HW, caller needs
637 * to free any resources it allocated after function returns
638 * The callback in ipa_desc should not be set by the caller
639 * for this function.
640 */
641int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
642{
643 struct ipa_desc *desc;
644 int result = 0;
645 struct ipa_sys_context *sys;
646 int ep_idx;
647
648 IPADBG("sending command\n");
649
650 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
651 if (-1 == ep_idx) {
652 IPAERR("Client %u is not mapped\n",
653 IPA_CLIENT_APPS_CMD_PROD);
654 return -EFAULT;
655 }
656 sys = ipa_ctx->ep[ep_idx].sys;
657
658 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
659
660 if (num_desc == 1) {
661 init_completion(&descr->xfer_done);
662
663 if (descr->callback || descr->user1)
664 WARN_ON(1);
665
666 descr->callback = ipa_sps_irq_cmd_ack;
667 descr->user1 = descr;
668 if (ipa_send_one(sys, descr, true)) {
669 IPAERR("fail to send immediate command\n");
670 result = -EFAULT;
671 goto bail;
672 }
673 wait_for_completion(&descr->xfer_done);
674 } else {
675 desc = &descr[num_desc - 1];
676 init_completion(&desc->xfer_done);
677
678 if (desc->callback || desc->user1)
679 WARN_ON(1);
680
681 desc->callback = ipa_sps_irq_cmd_ack;
682 desc->user1 = desc;
683 if (ipa_send(sys, num_desc, descr, true)) {
684 IPAERR("fail to send multiple immediate command set\n");
685 result = -EFAULT;
686 goto bail;
687 }
688 wait_for_completion(&desc->xfer_done);
689 }
690
691bail:
692 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
693 return result;
694}
695
696/**
697 * ipa_sps_irq_tx_notify() - Callback function which will be called by
698 * the SPS driver to start a Tx poll operation.
699 * Called in an interrupt context.
700 * @notify: SPS driver supplied notification struct
701 *
702 * This function defer the work for this event to the tx workqueue.
703 */
704static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
705{
706 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
707 int ret;
708
709 IPADBG("event %d notified\n", notify->event_id);
710
711 switch (notify->event_id) {
712 case SPS_EVENT_EOT:
713 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
714 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
715 if (!atomic_read(&sys->curr_polling_state)) {
716 ret = sps_get_config(sys->ep->ep_hdl,
717 &sys->ep->connect);
718 if (ret) {
719 IPAERR("sps_get_config() failed %d\n", ret);
720 break;
721 }
722 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
723 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
724 ret = sps_set_config(sys->ep->ep_hdl,
725 &sys->ep->connect);
726 if (ret) {
727 IPAERR("sps_set_config() failed %d\n", ret);
728 break;
729 }
730 atomic_set(&sys->curr_polling_state, 1);
731 queue_work(sys->wq, &sys->work);
732 }
733 break;
734 default:
735 IPAERR("received unexpected event id %d\n", notify->event_id);
736 }
737}
738
739/**
740 * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
741 * the SPS driver after a Tx operation is complete.
742 * Called in an interrupt context.
743 * @notify: SPS driver supplied notification struct
744 *
745 * This function defer the work for this event to the tx workqueue.
746 * This event will be later handled by ipa_write_done.
747 */
748static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
749{
750 struct ipa_tx_pkt_wrapper *tx_pkt;
751
752 IPADBG("event %d notified\n", notify->event_id);
753
754 switch (notify->event_id) {
755 case SPS_EVENT_EOT:
756 tx_pkt = notify->data.transfer.user;
757 if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
758 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
759 queue_work(tx_pkt->sys->wq, &tx_pkt->work);
760 break;
761 default:
762 IPAERR("received unexpected event id %d\n", notify->event_id);
763 }
764}
765
766/**
767 * ipa_poll_pkt() - Poll packet from SPS BAM
768 * return 0 to caller on poll successfully
769 * else -EIO
770 *
771 */
772static int ipa_poll_pkt(struct ipa_sys_context *sys,
773 struct sps_iovec *iov)
774{
775 int ret;
776
777 ret = sps_get_iovec(sys->ep->ep_hdl, iov);
778 if (ret) {
779 IPAERR("sps_get_iovec failed %d\n", ret);
780 return ret;
781 }
782
783 if (iov->addr == 0)
784 return -EIO;
785
786 return 0;
787}
788
789/**
790 * ipa_handle_rx_core() - The core functionality of packet reception. This
791 * function is read from multiple code paths.
792 *
793 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
794 * endpoint. The function runs as long as there are packets in the pipe.
795 * For each packet:
796 * - Disconnect the packet from the system pipe linked list
797 * - Unmap the packets skb, make it non DMAable
798 * - Free the packet from the cache
799 * - Prepare a proper skb
800 * - Call the endpoints notify function, passing the skb in the parameters
801 * - Replenish the rx cache
802 */
803static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
804 bool in_poll_state)
805{
806 struct sps_iovec iov;
807 int ret;
808 int cnt = 0;
809
810 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
811 !atomic_read(&sys->curr_polling_state))) {
812 if (cnt && !process_all)
813 break;
814
815 ret = ipa_poll_pkt(sys, &iov);
816 if (ret)
817 break;
818
819 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
820 ipa_dma_memcpy_notify(sys, &iov);
821 else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
822 ipa_wlan_wq_rx_common(sys, iov.size);
823 else
824 ipa_wq_rx_common(sys, iov.size);
825
826 cnt++;
827 };
828
829 return cnt;
830}
831
832/**
833 * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
834 */
835static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
836{
837 int ret;
838
839 if (!sys->ep || !sys->ep->valid) {
840 IPAERR("EP Not Valid, no need to cleanup.\n");
841 return;
842 }
843
844 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
845 if (ret) {
846 IPAERR("sps_get_config() failed %d\n", ret);
847 goto fail;
848 }
849
850 if (!atomic_read(&sys->curr_polling_state) &&
851 ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
852 IPADBG("already in intr mode\n");
853 return;
854 }
855
856 if (!atomic_read(&sys->curr_polling_state)) {
857 IPAERR("already in intr mode\n");
858 goto fail;
859 }
860
861 sys->event.options = SPS_O_EOT;
862 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
863 if (ret) {
864 IPAERR("sps_register_event() failed %d\n", ret);
865 goto fail;
866 }
867 sys->ep->connect.options =
868 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
869 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
870 if (ret) {
871 IPAERR("sps_set_config() failed %d\n", ret);
872 goto fail;
873 }
874 atomic_set(&sys->curr_polling_state, 0);
875 if (!sys->ep->napi_enabled)
876 ipa_handle_rx_core(sys, true, false);
877 ipa_dec_release_wakelock(sys->ep->wakelock_client);
878 return;
879
880fail:
881 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
882 msecs_to_jiffies(1));
883}
884
885
886/**
887 * ipa_sps_irq_control() - Function to enable or disable BAM IRQ.
888 */
889static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
890{
891 int ret;
892
893 /*
894 * Do not change sps config in case we are in polling mode as this
895 * indicates that sps driver already notified EOT event and sps config
896 * should not change until ipa driver processes the packet.
897 */
898 if (atomic_read(&sys->curr_polling_state)) {
899 IPADBG("in polling mode, do not change config\n");
900 return;
901 }
902
903 if (enable) {
904 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
905 if (ret) {
906 IPAERR("sps_get_config() failed %d\n", ret);
907 return;
908 }
909 sys->event.options = SPS_O_EOT;
910 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
911 if (ret) {
912 IPAERR("sps_register_event() failed %d\n", ret);
913 return;
914 }
915 sys->ep->connect.options =
916 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
917 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
918 if (ret) {
919 IPAERR("sps_set_config() failed %d\n", ret);
920 return;
921 }
922 } else {
923 ret = sps_get_config(sys->ep->ep_hdl,
924 &sys->ep->connect);
925 if (ret) {
926 IPAERR("sps_get_config() failed %d\n", ret);
927 return;
928 }
929 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
930 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
931 ret = sps_set_config(sys->ep->ep_hdl,
932 &sys->ep->connect);
933 if (ret) {
934 IPAERR("sps_set_config() failed %d\n", ret);
935 return;
936 }
937 }
938}
939
940void ipa_sps_irq_control_all(bool enable)
941{
942 struct ipa_ep_context *ep;
943 int ipa_ep_idx, client_num;
944
945 IPADBG("\n");
946
947 for (client_num = IPA_CLIENT_CONS;
948 client_num < IPA_CLIENT_MAX; client_num++) {
949 if (!IPA_CLIENT_IS_APPS_CONS(client_num))
950 continue;
951
952 ipa_ep_idx = ipa_get_ep_mapping(client_num);
953 if (ipa_ep_idx == -1) {
954 IPAERR("Invalid client.\n");
955 continue;
956 }
957 ep = &ipa_ctx->ep[ipa_ep_idx];
958 if (!ep->valid) {
959 IPAERR("EP (%d) not allocated.\n", ipa_ep_idx);
960 continue;
961 }
962 ipa_sps_irq_control(ep->sys, enable);
963 }
964}
965
966/**
967 * ipa_rx_notify() - Callback function which is called by the SPS driver when a
968 * a packet is received
969 * @notify: SPS driver supplied notification information
970 *
971 * Called in an interrupt context, therefore the majority of the work is
972 * deffered using a work queue.
973 *
974 * After receiving a packet, the driver goes to polling mode and keeps pulling
975 * packets until the rx buffer is empty, then it goes back to interrupt mode.
976 * This comes to prevent the CPU from handling too many interrupts when the
977 * throughput is high.
978 */
979static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
980{
981 struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
982 int ret;
983
984 IPADBG("event %d notified\n", notify->event_id);
985
986 switch (notify->event_id) {
987 case SPS_EVENT_EOT:
988 if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
989 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
990
991 if (atomic_read(&sys->curr_polling_state)) {
992 sys->ep->eot_in_poll_err++;
993 break;
994 }
995
996 ret = sps_get_config(sys->ep->ep_hdl,
997 &sys->ep->connect);
998 if (ret) {
999 IPAERR("sps_get_config() failed %d\n", ret);
1000 break;
1001 }
1002 sys->ep->connect.options = SPS_O_AUTO_ENABLE |
1003 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1004 ret = sps_set_config(sys->ep->ep_hdl,
1005 &sys->ep->connect);
1006 if (ret) {
1007 IPAERR("sps_set_config() failed %d\n", ret);
1008 break;
1009 }
1010 ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
1011 atomic_set(&sys->curr_polling_state, 1);
1012 trace_intr_to_poll(sys->ep->client);
1013 queue_work(sys->wq, &sys->work);
1014 break;
1015 default:
1016 IPAERR("received unexpected event id %d\n", notify->event_id);
1017 }
1018}
1019
1020static void switch_to_intr_tx_work_func(struct work_struct *work)
1021{
1022 struct delayed_work *dwork;
1023 struct ipa_sys_context *sys;
1024
1025 dwork = container_of(work, struct delayed_work, work);
1026 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1027 ipa_handle_tx(sys);
1028}
1029
1030/**
1031 * ipa_handle_rx() - handle packet reception. This function is executed in the
1032 * context of a work queue.
1033 * @work: work struct needed by the work queue
1034 *
1035 * ipa_handle_rx_core() is run in polling mode. After all packets has been
1036 * received, the driver switches back to interrupt mode.
1037 */
1038static void ipa_handle_rx(struct ipa_sys_context *sys)
1039{
1040 int inactive_cycles = 0;
1041 int cnt;
1042
1043 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1044 do {
1045 cnt = ipa_handle_rx_core(sys, true, true);
1046 if (cnt == 0) {
1047 inactive_cycles++;
1048 trace_idle_sleep_enter(sys->ep->client);
1049 usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
1050 ipa_ctx->ipa_rx_max_timeout_usec);
1051 trace_idle_sleep_exit(sys->ep->client);
1052 } else {
1053 inactive_cycles = 0;
1054 }
1055
1056 /* if pipe is out of buffers there is no point polling for
1057 * completed descs; release the worker so delayed work can
1058 * run in a timely manner
1059 */
1060 if (sys->len == 0)
1061 break;
1062
1063 } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
1064
1065 trace_poll_to_intr(sys->ep->client);
1066 ipa_rx_switch_to_intr_mode(sys);
1067 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1068}
1069
1070/**
1071 * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
1072 * function is exectued in the softirq context
1073 *
1074 * if input budget is zero, the driver switches back to
1075 * interrupt mode
1076 *
1077 * return number of polled packets, on error 0(zero)
1078 */
1079int ipa2_rx_poll(u32 clnt_hdl, int weight)
1080{
1081 struct ipa_ep_context *ep;
1082 int ret;
1083 int cnt = 0;
1084 unsigned int delay = 1;
1085 struct sps_iovec iov;
1086
1087 IPADBG("\n");
1088 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1089 ipa_ctx->ep[clnt_hdl].valid == 0) {
1090 IPAERR("bad parm 0x%x\n", clnt_hdl);
1091 return cnt;
1092 }
1093
1094 ep = &ipa_ctx->ep[clnt_hdl];
1095 while (cnt < weight &&
1096 atomic_read(&ep->sys->curr_polling_state)) {
1097
1098 ret = ipa_poll_pkt(ep->sys, &iov);
1099 if (ret)
1100 break;
1101
1102 ipa_wq_rx_common(ep->sys, iov.size);
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001103 cnt += IPA_WAN_AGGR_PKT_CNT;
Amir Levy9659e592016-10-27 18:08:27 +03001104 };
1105
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001106 if (cnt == 0 || cnt < weight) {
Amir Levy9659e592016-10-27 18:08:27 +03001107 ep->inactive_cycles++;
1108 ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
1109
1110 if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
1111 ep->switch_to_intr = true;
1112 delay = 0;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001113 } else if (cnt < weight) {
1114 delay = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001115 }
1116 queue_delayed_work(ep->sys->wq,
1117 &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
1118 } else
1119 ep->inactive_cycles = 0;
1120
1121 return cnt;
1122}
1123
1124static void switch_to_intr_rx_work_func(struct work_struct *work)
1125{
1126 struct delayed_work *dwork;
1127 struct ipa_sys_context *sys;
1128
1129 dwork = container_of(work, struct delayed_work, work);
1130 sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
1131
1132 if (sys->ep->napi_enabled) {
1133 if (sys->ep->switch_to_intr) {
1134 ipa_rx_switch_to_intr_mode(sys);
1135 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
1136 sys->ep->switch_to_intr = false;
1137 sys->ep->inactive_cycles = 0;
1138 } else
1139 sys->ep->client_notify(sys->ep->priv,
1140 IPA_CLIENT_START_POLL, 0);
1141 } else
1142 ipa_handle_rx(sys);
1143}
1144
1145/**
1146 * ipa_update_repl_threshold()- Update the repl_threshold for the client.
1147 *
1148 * Return value: None.
1149 */
1150void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
1151{
1152 int ep_idx;
1153 struct ipa_ep_context *ep;
1154
1155 /* Check if ep is valid. */
1156 ep_idx = ipa2_get_ep_mapping(ipa_client);
1157 if (ep_idx == -1) {
1158 IPADBG("Invalid IPA client\n");
1159 return;
1160 }
1161
1162 ep = &ipa_ctx->ep[ep_idx];
1163 if (!ep->valid) {
1164 IPADBG("EP not valid/Not applicable for client.\n");
1165 return;
1166 }
1167 /*
1168 * Determine how many buffers/descriptors remaining will
1169 * cause to drop below the yellow WM bar.
1170 */
Skylar Chang50b21692016-11-01 16:48:30 -07001171 if (ep->sys->rx_buff_sz)
1172 ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
1173 / ep->sys->rx_buff_sz;
1174 else
1175 ep->rx_replenish_threshold = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001176}
1177
1178/**
1179 * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
1180 * IPA EP configuration
1181 * @sys_in: [in] input needed to setup BAM pipe and configure EP
1182 * @clnt_hdl: [out] client handle
1183 *
1184 * - configure the end-point registers with the supplied
1185 * parameters from the user.
1186 * - call SPS APIs to create a system-to-bam connection with IPA.
1187 * - allocate descriptor FIFO
1188 * - register callback function(ipa_sps_irq_rx_notify or
1189 * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
1190 * not configured to pulling mode
1191 *
1192 * Returns: 0 on success, negative on failure
1193 */
1194int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
1195{
1196 struct ipa_ep_context *ep;
1197 int ipa_ep_idx;
1198 int result = -EINVAL;
1199 dma_addr_t dma_addr;
1200 char buff[IPA_RESOURCE_NAME_MAX];
1201 struct iommu_domain *smmu_domain;
1202
1203 if (unlikely(!ipa_ctx)) {
1204 IPAERR("IPA driver was not initialized\n");
1205 return -EINVAL;
1206 }
1207
1208 if (sys_in == NULL || clnt_hdl == NULL) {
1209 IPAERR("NULL args\n");
1210 goto fail_gen;
1211 }
1212
1213 if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
1214 IPAERR("bad parm client:%d fifo_sz:%d\n",
1215 sys_in->client, sys_in->desc_fifo_sz);
1216 goto fail_gen;
1217 }
1218
1219 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
1220 if (ipa_ep_idx == -1) {
1221 IPAERR("Invalid client.\n");
1222 goto fail_gen;
1223 }
1224
1225 ep = &ipa_ctx->ep[ipa_ep_idx];
1226
1227 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
1228
1229 if (ep->valid == 1) {
1230 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
1231 IPAERR("EP already allocated.\n");
1232 goto fail_and_disable_clocks;
1233 } else {
1234 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
1235 &sys_in->ipa_ep_cfg.hdr)) {
1236 IPAERR("fail to configure hdr prop of EP.\n");
1237 result = -EFAULT;
1238 goto fail_and_disable_clocks;
1239 }
1240 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
1241 &sys_in->ipa_ep_cfg.cfg)) {
1242 IPAERR("fail to configure cfg prop of EP.\n");
1243 result = -EFAULT;
1244 goto fail_and_disable_clocks;
1245 }
1246 IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
1247 sys_in->client, ipa_ep_idx, ep->sys);
1248 ep->client_notify = sys_in->notify;
1249 ep->priv = sys_in->priv;
1250 *clnt_hdl = ipa_ep_idx;
1251 if (!ep->keep_ipa_awake)
1252 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1253
1254 return 0;
1255 }
1256 }
1257
1258 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
1259
1260 if (!ep->sys) {
1261 ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL);
1262 if (!ep->sys) {
1263 IPAERR("failed to sys ctx for client %d\n",
1264 sys_in->client);
1265 result = -ENOMEM;
1266 goto fail_and_disable_clocks;
1267 }
1268
1269 ep->sys->ep = ep;
1270 snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
1271 sys_in->client);
1272 ep->sys->wq = alloc_workqueue(buff,
1273 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1274 if (!ep->sys->wq) {
1275 IPAERR("failed to create wq for client %d\n",
1276 sys_in->client);
1277 result = -EFAULT;
1278 goto fail_wq;
1279 }
1280
1281 snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
1282 sys_in->client);
1283 ep->sys->repl_wq = alloc_workqueue(buff,
1284 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
1285 if (!ep->sys->repl_wq) {
1286 IPAERR("failed to create rep wq for client %d\n",
1287 sys_in->client);
1288 result = -EFAULT;
1289 goto fail_wq2;
1290 }
1291
1292 INIT_LIST_HEAD(&ep->sys->head_desc_list);
1293 INIT_LIST_HEAD(&ep->sys->rcycl_list);
1294 spin_lock_init(&ep->sys->spinlock);
1295 } else {
1296 memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
1297 }
1298
1299 ep->skip_ep_cfg = sys_in->skip_ep_cfg;
1300 if (ipa_assign_policy(sys_in, ep->sys)) {
1301 IPAERR("failed to sys ctx for client %d\n", sys_in->client);
1302 result = -ENOMEM;
1303 goto fail_gen2;
1304 }
1305
1306 ep->valid = 1;
1307 ep->client = sys_in->client;
1308 ep->client_notify = sys_in->notify;
1309 ep->napi_enabled = sys_in->napi_enabled;
1310 ep->priv = sys_in->priv;
1311 ep->keep_ipa_awake = sys_in->keep_ipa_awake;
1312 atomic_set(&ep->avail_fifo_desc,
1313 ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
1314
1315 if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
1316 ep->sys->status_stat == NULL) {
1317 ep->sys->status_stat =
1318 kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL);
1319 if (!ep->sys->status_stat) {
1320 IPAERR("no memory\n");
1321 goto fail_gen2;
1322 }
1323 }
1324
1325 result = ipa_enable_data_path(ipa_ep_idx);
1326 if (result) {
1327 IPAERR("enable data path failed res=%d clnt=%d.\n", result,
1328 ipa_ep_idx);
1329 goto fail_gen2;
1330 }
1331
1332 if (!ep->skip_ep_cfg) {
1333 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
1334 IPAERR("fail to configure EP.\n");
1335 goto fail_gen2;
1336 }
1337 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
1338 IPAERR("fail to configure status of EP.\n");
1339 goto fail_gen2;
1340 }
1341 IPADBG("ep configuration successful\n");
1342 } else {
1343 IPADBG("skipping ep configuration\n");
1344 }
1345
1346 /* Default Config */
1347 ep->ep_hdl = sps_alloc_endpoint();
1348 if (ep->ep_hdl == NULL) {
1349 IPAERR("SPS EP allocation failed.\n");
1350 goto fail_gen2;
1351 }
1352
1353 result = sps_get_config(ep->ep_hdl, &ep->connect);
1354 if (result) {
1355 IPAERR("fail to get config.\n");
1356 goto fail_sps_cfg;
1357 }
1358
1359 /* Specific Config */
1360 if (IPA_CLIENT_IS_CONS(sys_in->client)) {
1361 ep->connect.mode = SPS_MODE_SRC;
1362 ep->connect.destination = SPS_DEV_HANDLE_MEM;
1363 ep->connect.source = ipa_ctx->bam_handle;
1364 ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++;
1365 ep->connect.src_pipe_index = ipa_ep_idx;
1366 /*
1367 * Determine how many buffers/descriptors remaining will
1368 * cause to drop below the yellow WM bar.
1369 */
Skylar Chang50b21692016-11-01 16:48:30 -07001370 if (ep->sys->rx_buff_sz)
1371 ep->rx_replenish_threshold =
1372 ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz;
1373 else
1374 ep->rx_replenish_threshold = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001375 /* Only when the WAN pipes are setup, actual threshold will
1376 * be read from the register. So update LAN_CONS ep again with
1377 * right value.
1378 */
1379 if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS)
1380 ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS);
1381 } else {
1382 ep->connect.mode = SPS_MODE_DEST;
1383 ep->connect.source = SPS_DEV_HANDLE_MEM;
1384 ep->connect.destination = ipa_ctx->bam_handle;
1385 ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++;
1386 ep->connect.dest_pipe_index = ipa_ep_idx;
1387 }
1388
1389 IPADBG("client:%d ep:%d",
1390 sys_in->client, ipa_ep_idx);
1391
1392 IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
1393 ep->connect.dest_pipe_index,
1394 ep->connect.src_pipe_index);
1395
1396 ep->connect.options = ep->sys->sps_option;
1397 ep->connect.desc.size = sys_in->desc_fifo_sz;
1398 ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
1399 ep->connect.desc.size, &dma_addr, GFP_KERNEL);
1400 if (ipa_ctx->smmu_s1_bypass) {
1401 ep->connect.desc.phys_base = dma_addr;
1402 } else {
1403 ep->connect.desc.iova = dma_addr;
1404 smmu_domain = ipa2_get_smmu_domain();
1405 if (smmu_domain != NULL) {
1406 ep->connect.desc.phys_base =
1407 iommu_iova_to_phys(smmu_domain, dma_addr);
1408 }
1409 }
1410 if (ep->connect.desc.base == NULL) {
1411 IPAERR("fail to get DMA desc memory.\n");
1412 goto fail_sps_cfg;
1413 }
1414
1415 ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
1416
1417 result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client);
1418 if (result) {
1419 IPAERR("sps_connect fails.\n");
1420 goto fail_sps_connect;
1421 }
1422
1423 ep->sys->event.options = SPS_O_EOT;
1424 ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
1425 ep->sys->event.xfer_done = NULL;
1426 ep->sys->event.user = ep->sys;
1427 ep->sys->event.callback = ep->sys->sps_callback;
1428 result = sps_register_event(ep->ep_hdl, &ep->sys->event);
1429 if (result < 0) {
1430 IPAERR("register event error %d\n", result);
1431 goto fail_register_event;
1432 }
1433
1434 *clnt_hdl = ipa_ep_idx;
1435
1436 if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
1437 ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
1438 ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
1439 sizeof(void *), GFP_KERNEL);
1440 if (!ep->sys->repl.cache) {
1441 IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
1442 ep->sys->repl_hdlr = ipa_replenish_rx_cache;
1443 ep->sys->repl.capacity = 0;
1444 } else {
1445 atomic_set(&ep->sys->repl.head_idx, 0);
1446 atomic_set(&ep->sys->repl.tail_idx, 0);
1447 ipa_wq_repl_rx(&ep->sys->repl_work);
1448 }
1449 }
1450
1451 if (IPA_CLIENT_IS_CONS(sys_in->client))
1452 ipa_replenish_rx_cache(ep->sys);
1453
1454 if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
1455 ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
1456 atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
1457 }
1458
1459 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
1460 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
1461 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1462 sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1463 IPADBG("modem cfg emb pipe flt\n");
1464 else
1465 ipa_install_dflt_flt_rules(ipa_ep_idx);
1466 }
1467
1468 if (!ep->keep_ipa_awake)
1469 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1470
1471 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
1472 ipa_ep_idx, ep->sys);
1473
1474 return 0;
1475
1476fail_register_event:
1477 sps_disconnect(ep->ep_hdl);
1478fail_sps_connect:
1479 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1480 ep->connect.desc.base,
1481 ep->connect.desc.phys_base);
1482fail_sps_cfg:
1483 sps_free_endpoint(ep->ep_hdl);
1484fail_gen2:
1485 destroy_workqueue(ep->sys->repl_wq);
1486fail_wq2:
1487 destroy_workqueue(ep->sys->wq);
1488fail_wq:
1489 kfree(ep->sys);
1490 memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
1491fail_and_disable_clocks:
1492 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
1493fail_gen:
1494 return result;
1495}
1496
1497/**
1498 * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
1499 * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe
1500 *
1501 * Returns: 0 on success, negative on failure
1502 */
1503int ipa2_teardown_sys_pipe(u32 clnt_hdl)
1504{
1505 struct ipa_ep_context *ep;
1506 int empty;
1507
1508 if (unlikely(!ipa_ctx)) {
1509 IPAERR("IPA driver was not initialized\n");
1510 return -EINVAL;
1511 }
1512
1513 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
1514 ipa_ctx->ep[clnt_hdl].valid == 0) {
1515 IPAERR("bad parm.\n");
1516 return -EINVAL;
1517 }
1518
1519 ep = &ipa_ctx->ep[clnt_hdl];
1520
1521 if (!ep->keep_ipa_awake)
1522 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
1523
1524 ipa_disable_data_path(clnt_hdl);
1525 if (ep->napi_enabled) {
1526 ep->switch_to_intr = true;
1527 do {
1528 usleep_range(95, 105);
1529 } while (atomic_read(&ep->sys->curr_polling_state));
1530 }
1531
1532 if (IPA_CLIENT_IS_PROD(ep->client)) {
1533 do {
1534 spin_lock_bh(&ep->sys->spinlock);
1535 empty = list_empty(&ep->sys->head_desc_list);
1536 spin_unlock_bh(&ep->sys->spinlock);
1537 if (!empty)
1538 usleep_range(95, 105);
1539 else
1540 break;
1541 } while (1);
1542 }
1543
1544 if (IPA_CLIENT_IS_CONS(ep->client)) {
1545 cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
1546 cancel_delayed_work_sync(&ep->sys->switch_to_intr_work);
1547 }
1548
1549 flush_workqueue(ep->sys->wq);
1550 sps_disconnect(ep->ep_hdl);
1551 dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
1552 ep->connect.desc.base,
1553 ep->connect.desc.phys_base);
1554 sps_free_endpoint(ep->ep_hdl);
1555 if (ep->sys->repl_wq)
1556 flush_workqueue(ep->sys->repl_wq);
1557 if (IPA_CLIENT_IS_CONS(ep->client))
1558 ipa_cleanup_rx(ep->sys);
1559
1560 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
1561 if (ipa_ctx->modem_cfg_emb_pipe_flt &&
1562 ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
1563 IPADBG("modem cfg emb pipe flt\n");
1564 else
1565 ipa_delete_dflt_flt_rules(clnt_hdl);
1566 }
1567
1568 if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
1569 atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt);
1570
1571 memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats));
1572
1573 if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
1574 ipa_cleanup_wlan_rx_common_cache();
1575
1576 ep->valid = 0;
1577 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
1578
1579 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
1580
1581 return 0;
1582}
1583
1584/**
1585 * ipa_tx_comp_usr_notify_release() - Callback function which will call the
1586 * user supplied callback function to release the skb, or release it on
1587 * its own if no callback function was supplied.
1588 * @user1
1589 * @user2
1590 *
1591 * This notified callback is for the destination client.
1592 * This function is supplied in ipa_connect.
1593 */
1594static void ipa_tx_comp_usr_notify_release(void *user1, int user2)
1595{
1596 struct sk_buff *skb = (struct sk_buff *)user1;
1597 int ep_idx = user2;
1598
1599 IPADBG("skb=%p ep=%d\n", skb, ep_idx);
1600
1601 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl);
1602
1603 if (ipa_ctx->ep[ep_idx].client_notify)
1604 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
1605 IPA_WRITE_DONE, (unsigned long)skb);
1606 else
1607 dev_kfree_skb_any(skb);
1608}
1609
1610static void ipa_tx_cmd_comp(void *user1, int user2)
1611{
1612 kfree(user1);
1613}
1614
1615/**
1616 * ipa2_tx_dp() - Data-path tx handler
1617 * @dst: [in] which IPA destination to route tx packets to
1618 * @skb: [in] the packet to send
1619 * @metadata: [in] TX packet meta-data
1620 *
1621 * Data-path tx handler, this is used for both SW data-path which by-passes most
1622 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
1623 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
1624 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
1625 * is an error. For errors, client needs to free the skb as needed. For success,
1626 * IPA driver will later invoke client callback if one was supplied. That
1627 * callback should free the skb. If no callback supplied, IPA driver will free
1628 * the skb internally
1629 *
1630 * The function will use two descriptors for this send command
1631 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
1632 * the first descriptor will be used to inform the IPA hardware that
1633 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
1634 * Once this send was done from SPS point-of-view the IPA driver will
1635 * get notified by the supplied callback - ipa_sps_irq_tx_comp()
1636 *
1637 * ipa_sps_irq_tx_comp will call to the user supplied
1638 * callback (from ipa_connect)
1639 *
1640 * Returns: 0 on success, negative on failure
1641 */
1642int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
1643 struct ipa_tx_meta *meta)
1644{
1645 struct ipa_desc *desc;
1646 struct ipa_desc _desc[2];
1647 int dst_ep_idx;
1648 struct ipa_ip_packet_init *cmd;
1649 struct ipa_sys_context *sys;
1650 int src_ep_idx;
1651 int num_frags, f;
1652
1653 if (unlikely(!ipa_ctx)) {
1654 IPAERR("IPA driver was not initialized\n");
1655 return -EINVAL;
1656 }
1657
1658 if (skb->len == 0) {
1659 IPAERR("packet size is 0\n");
1660 return -EINVAL;
1661 }
1662
1663 num_frags = skb_shinfo(skb)->nr_frags;
1664 if (num_frags) {
1665 /* 1 desc is needed for the linear portion of skb;
1666 * 1 desc may be needed for the PACKET_INIT;
1667 * 1 desc for each frag
1668 */
1669 desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
1670 if (!desc) {
1671 IPAERR("failed to alloc desc array\n");
1672 goto fail_mem;
1673 }
1674 } else {
1675 memset(_desc, 0, 2 * sizeof(struct ipa_desc));
1676 desc = &_desc[0];
1677 }
1678
1679 /*
1680 * USB_CONS: PKT_INIT ep_idx = dst pipe
1681 * Q6_CONS: PKT_INIT ep_idx = sender pipe
1682 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
1683 *
1684 * LAN TX: all PKT_INIT
1685 * WAN TX: PKT_INIT (cmd) + HW (data)
1686 *
1687 */
1688 if (IPA_CLIENT_IS_CONS(dst)) {
1689 src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1690 if (-1 == src_ep_idx) {
1691 IPAERR("Client %u is not mapped\n",
1692 IPA_CLIENT_APPS_LAN_WAN_PROD);
1693 goto fail_gen;
1694 }
1695 dst_ep_idx = ipa2_get_ep_mapping(dst);
1696 } else {
1697 src_ep_idx = ipa2_get_ep_mapping(dst);
1698 if (-1 == src_ep_idx) {
1699 IPAERR("Client %u is not mapped\n", dst);
1700 goto fail_gen;
1701 }
1702 if (meta && meta->pkt_init_dst_ep_valid)
1703 dst_ep_idx = meta->pkt_init_dst_ep;
1704 else
1705 dst_ep_idx = -1;
1706 }
1707
1708 sys = ipa_ctx->ep[src_ep_idx].sys;
1709
1710 if (!sys->ep->valid) {
1711 IPAERR("pipe not valid\n");
1712 goto fail_gen;
1713 }
1714
1715 if (dst_ep_idx != -1) {
1716 /* SW data path */
1717 cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
1718 if (!cmd) {
1719 IPAERR("failed to alloc immediate command object\n");
1720 goto fail_gen;
1721 }
1722
1723 cmd->destination_pipe_index = dst_ep_idx;
1724 desc[0].opcode = IPA_IP_PACKET_INIT;
1725 desc[0].pyld = cmd;
1726 desc[0].len = sizeof(struct ipa_ip_packet_init);
1727 desc[0].type = IPA_IMM_CMD_DESC;
1728 desc[0].callback = ipa_tx_cmd_comp;
1729 desc[0].user1 = cmd;
1730 desc[1].pyld = skb->data;
1731 desc[1].len = skb_headlen(skb);
1732 desc[1].type = IPA_DATA_DESC_SKB;
1733 desc[1].callback = ipa_tx_comp_usr_notify_release;
1734 desc[1].user1 = skb;
1735 desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid &&
1736 meta->pkt_init_dst_ep_remote) ?
1737 src_ep_idx :
1738 dst_ep_idx;
1739 if (meta && meta->dma_address_valid) {
1740 desc[1].dma_address_valid = true;
1741 desc[1].dma_address = meta->dma_address;
1742 }
1743
1744 for (f = 0; f < num_frags; f++) {
1745 desc[2+f].frag = &skb_shinfo(skb)->frags[f];
1746 desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
1747 desc[2+f].len = skb_frag_size(desc[2+f].frag);
1748 }
1749
1750 /* don't free skb till frag mappings are released */
1751 if (num_frags) {
1752 desc[2+f-1].callback = desc[1].callback;
1753 desc[2+f-1].user1 = desc[1].user1;
1754 desc[2+f-1].user2 = desc[1].user2;
1755 desc[1].callback = NULL;
1756 }
1757
1758 if (ipa_send(sys, num_frags + 2, desc, true)) {
1759 IPAERR("fail to send skb %p num_frags %u SWP\n",
1760 skb, num_frags);
1761 goto fail_send;
1762 }
1763 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
1764 } else {
1765 /* HW data path */
1766 desc[0].pyld = skb->data;
1767 desc[0].len = skb_headlen(skb);
1768 desc[0].type = IPA_DATA_DESC_SKB;
1769 desc[0].callback = ipa_tx_comp_usr_notify_release;
1770 desc[0].user1 = skb;
1771 desc[0].user2 = src_ep_idx;
1772
1773 if (meta && meta->dma_address_valid) {
1774 desc[0].dma_address_valid = true;
1775 desc[0].dma_address = meta->dma_address;
1776 }
1777
1778 if (num_frags == 0) {
1779 if (ipa_send_one(sys, desc, true)) {
1780 IPAERR("fail to send skb %p HWP\n", skb);
1781 goto fail_gen;
1782 }
1783 } else {
1784 for (f = 0; f < num_frags; f++) {
1785 desc[1+f].frag = &skb_shinfo(skb)->frags[f];
1786 desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
1787 desc[1+f].len = skb_frag_size(desc[1+f].frag);
1788 }
1789
1790 /* don't free skb till frag mappings are released */
1791 desc[1+f-1].callback = desc[0].callback;
1792 desc[1+f-1].user1 = desc[0].user1;
1793 desc[1+f-1].user2 = desc[0].user2;
1794 desc[0].callback = NULL;
1795
1796 if (ipa_send(sys, num_frags + 1, desc, true)) {
1797 IPAERR("fail to send skb %p num_frags %u HWP\n",
1798 skb, num_frags);
1799 goto fail_gen;
1800 }
1801 }
1802
1803 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
1804 }
1805
1806 if (num_frags) {
1807 kfree(desc);
1808 IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
1809 }
1810
1811 return 0;
1812
1813fail_send:
1814 kfree(cmd);
1815fail_gen:
1816 if (num_frags)
1817 kfree(desc);
1818fail_mem:
1819 return -EFAULT;
1820}
1821
1822static void ipa_wq_handle_rx(struct work_struct *work)
1823{
1824 struct ipa_sys_context *sys;
1825
1826 sys = container_of(work, struct ipa_sys_context, work);
1827
1828 if (sys->ep->napi_enabled) {
1829 IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
1830 sys->ep->client_notify(sys->ep->priv,
1831 IPA_CLIENT_START_POLL, 0);
1832 } else
1833 ipa_handle_rx(sys);
1834}
1835
1836static void ipa_wq_repl_rx(struct work_struct *work)
1837{
1838 struct ipa_sys_context *sys;
1839 void *ptr;
1840 struct ipa_rx_pkt_wrapper *rx_pkt;
1841 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
1842 u32 next;
1843 u32 curr;
1844
1845 sys = container_of(work, struct ipa_sys_context, repl_work);
1846 curr = atomic_read(&sys->repl.tail_idx);
1847
1848begin:
1849 while (1) {
1850 next = (curr + 1) % sys->repl.capacity;
1851 if (next == atomic_read(&sys->repl.head_idx))
1852 goto fail_kmem_cache_alloc;
1853
1854 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
1855 flag);
1856 if (!rx_pkt) {
1857 pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
1858 __func__, sys);
1859 goto fail_kmem_cache_alloc;
1860 }
1861
1862 INIT_LIST_HEAD(&rx_pkt->link);
1863 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
1864 rx_pkt->sys = sys;
1865
1866 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1867 if (rx_pkt->data.skb == NULL) {
1868 pr_err_ratelimited("%s fail alloc skb sys=%p\n",
1869 __func__, sys);
1870 goto fail_skb_alloc;
1871 }
1872 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1873 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
1874 sys->rx_buff_sz,
1875 DMA_FROM_DEVICE);
1876 if (rx_pkt->data.dma_addr == 0 ||
1877 rx_pkt->data.dma_addr == ~0) {
1878 pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
1879 __func__, (void *)rx_pkt->data.dma_addr,
1880 ptr, sys);
1881 goto fail_dma_mapping;
1882 }
1883
1884 sys->repl.cache[curr] = rx_pkt;
1885 curr = next;
1886 /* ensure write is done before setting tail index */
1887 mb();
1888 atomic_set(&sys->repl.tail_idx, next);
1889 }
1890
1891 return;
1892
1893fail_dma_mapping:
1894 sys->free_skb(rx_pkt->data.skb);
1895fail_skb_alloc:
1896 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1897fail_kmem_cache_alloc:
1898 if (atomic_read(&sys->repl.tail_idx) ==
1899 atomic_read(&sys->repl.head_idx)) {
1900 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1901 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty);
1902 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1903 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty);
1904 else
1905 WARN_ON(1);
1906 pr_err_ratelimited("%s sys=%p repl ring empty\n",
1907 __func__, sys);
1908 goto begin;
1909 }
1910}
1911
1912static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys)
1913{
1914 struct ipa_rx_pkt_wrapper *rx_pkt = NULL;
1915 struct ipa_rx_pkt_wrapper *tmp;
1916 int ret;
1917 u32 rx_len_cached = 0;
1918
1919 IPADBG("\n");
1920
1921 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1922 rx_len_cached = sys->len;
1923
1924 if (rx_len_cached < sys->rx_pool_sz) {
1925 list_for_each_entry_safe(rx_pkt, tmp,
1926 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1927 list_del(&rx_pkt->link);
1928
1929 if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0)
1930 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1931
1932 INIT_LIST_HEAD(&rx_pkt->link);
1933 rx_pkt->len = 0;
1934 rx_pkt->sys = sys;
1935
1936 ret = sps_transfer_one(sys->ep->ep_hdl,
1937 rx_pkt->data.dma_addr,
1938 IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
1939
1940 if (ret) {
1941 IPAERR("sps_transfer_one failed %d\n", ret);
1942 goto fail_sps_transfer;
1943 }
1944
1945 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1946 rx_len_cached = ++sys->len;
1947
1948 if (rx_len_cached >= sys->rx_pool_sz) {
1949 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1950 return;
1951 }
1952 }
1953 }
1954 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1955
1956 if (rx_len_cached < sys->rx_pool_sz &&
1957 ipa_ctx->wc_memb.wlan_comm_total_cnt <
1958 IPA_WLAN_COMM_RX_POOL_HIGH) {
1959 ipa_replenish_rx_cache(sys);
1960 ipa_ctx->wc_memb.wlan_comm_total_cnt +=
1961 (sys->rx_pool_sz - rx_len_cached);
1962 }
1963
1964 return;
1965
1966fail_sps_transfer:
1967 list_del(&rx_pkt->link);
1968 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1969}
1970
1971static void ipa_cleanup_wlan_rx_common_cache(void)
1972{
1973 struct ipa_rx_pkt_wrapper *rx_pkt;
1974 struct ipa_rx_pkt_wrapper *tmp;
1975
Utkarsh Saxena69f307d2017-03-13 14:58:40 +05301976 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1977
Amir Levy9659e592016-10-27 18:08:27 +03001978 list_for_each_entry_safe(rx_pkt, tmp,
1979 &ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
1980 list_del(&rx_pkt->link);
1981 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
1982 IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
1983 dev_kfree_skb_any(rx_pkt->data.skb);
1984 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1985 ipa_ctx->wc_memb.wlan_comm_free_cnt--;
1986 ipa_ctx->wc_memb.wlan_comm_total_cnt--;
1987 }
1988 ipa_ctx->wc_memb.total_tx_pkts_freed = 0;
1989
1990 if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0)
1991 IPAERR("wlan comm buff free cnt: %d\n",
1992 ipa_ctx->wc_memb.wlan_comm_free_cnt);
1993
1994 if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0)
1995 IPAERR("wlan comm buff total cnt: %d\n",
1996 ipa_ctx->wc_memb.wlan_comm_total_cnt);
1997
Utkarsh Saxena69f307d2017-03-13 14:58:40 +05301998 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
1999
Amir Levy9659e592016-10-27 18:08:27 +03002000}
2001
2002static void ipa_alloc_wlan_rx_common_cache(u32 size)
2003{
2004 void *ptr;
2005 struct ipa_rx_pkt_wrapper *rx_pkt;
2006 int rx_len_cached = 0;
2007 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2008 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2009
2010 rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt;
2011 while (rx_len_cached < size) {
2012 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2013 flag);
2014 if (!rx_pkt) {
2015 IPAERR("failed to alloc rx wrapper\n");
2016 goto fail_kmem_cache_alloc;
2017 }
2018
2019 INIT_LIST_HEAD(&rx_pkt->link);
2020 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2021
2022 rx_pkt->data.skb =
2023 ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
2024 flag);
2025 if (rx_pkt->data.skb == NULL) {
2026 IPAERR("failed to alloc skb\n");
2027 goto fail_skb_alloc;
2028 }
2029 ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
2030 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2031 IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
2032 if (rx_pkt->data.dma_addr == 0 ||
2033 rx_pkt->data.dma_addr == ~0) {
2034 IPAERR("dma_map_single failure %p for %p\n",
2035 (void *)rx_pkt->data.dma_addr, ptr);
2036 goto fail_dma_mapping;
2037 }
2038
2039 list_add_tail(&rx_pkt->link,
2040 &ipa_ctx->wc_memb.wlan_comm_desc_list);
2041 rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
2042
2043 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
2044
2045 }
2046
2047 return;
2048
2049fail_dma_mapping:
2050 dev_kfree_skb_any(rx_pkt->data.skb);
2051fail_skb_alloc:
2052 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2053fail_kmem_cache_alloc:
2054 return;
2055}
2056
2057
2058/**
2059 * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
2060 *
2061 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
2062 * are IPA_RX_POOL_CEIL buffers in the cache.
2063 * - Allocate a buffer in the cache
2064 * - Initialized the packets link
2065 * - Initialize the packets work struct
2066 * - Allocate the packets socket buffer (skb)
2067 * - Fill the packets skb with data
2068 * - Make the packet DMAable
2069 * - Add the packet to the system pipe linked list
2070 * - Initiate a SPS transfer so that SPS driver will use this packet later.
2071 */
2072static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
2073{
2074 void *ptr;
2075 struct ipa_rx_pkt_wrapper *rx_pkt;
2076 int ret;
2077 int rx_len_cached = 0;
2078 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2079 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2080
2081 rx_len_cached = sys->len;
2082
2083 while (rx_len_cached < sys->rx_pool_sz) {
2084 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
2085 flag);
2086 if (!rx_pkt) {
2087 IPAERR("failed to alloc rx wrapper\n");
2088 goto fail_kmem_cache_alloc;
2089 }
2090
2091 INIT_LIST_HEAD(&rx_pkt->link);
2092 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2093 rx_pkt->sys = sys;
2094
2095 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
2096 if (rx_pkt->data.skb == NULL) {
2097 IPAERR("failed to alloc skb\n");
2098 goto fail_skb_alloc;
2099 }
2100 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2101 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
2102 sys->rx_buff_sz,
2103 DMA_FROM_DEVICE);
2104 if (rx_pkt->data.dma_addr == 0 ||
2105 rx_pkt->data.dma_addr == ~0) {
2106 IPAERR("dma_map_single failure %p for %p\n",
2107 (void *)rx_pkt->data.dma_addr, ptr);
2108 goto fail_dma_mapping;
2109 }
2110
2111 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2112 rx_len_cached = ++sys->len;
2113
2114 ret = sps_transfer_one(sys->ep->ep_hdl,
2115 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2116
2117 if (ret) {
2118 IPAERR("sps_transfer_one failed %d\n", ret);
2119 goto fail_sps_transfer;
2120 }
2121 }
2122
2123 return;
2124
2125fail_sps_transfer:
2126 list_del(&rx_pkt->link);
2127 rx_len_cached = --sys->len;
2128 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2129 sys->rx_buff_sz, DMA_FROM_DEVICE);
2130fail_dma_mapping:
2131 sys->free_skb(rx_pkt->data.skb);
2132fail_skb_alloc:
2133 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2134fail_kmem_cache_alloc:
2135 if (rx_len_cached == 0)
2136 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2137 msecs_to_jiffies(1));
2138}
2139
2140static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
2141{
2142 void *ptr;
2143 struct ipa_rx_pkt_wrapper *rx_pkt;
2144 int ret;
2145 int rx_len_cached = 0;
2146
2147 rx_len_cached = sys->len;
2148
2149 while (rx_len_cached < sys->rx_pool_sz) {
2150 spin_lock_bh(&sys->spinlock);
2151 if (list_empty(&sys->rcycl_list))
2152 goto fail_kmem_cache_alloc;
2153
2154 rx_pkt = list_first_entry(&sys->rcycl_list,
2155 struct ipa_rx_pkt_wrapper, link);
2156 list_del(&rx_pkt->link);
2157 spin_unlock_bh(&sys->spinlock);
2158 INIT_LIST_HEAD(&rx_pkt->link);
2159 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
2160 rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
2161 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
2162 if (rx_pkt->data.dma_addr == 0 ||
2163 rx_pkt->data.dma_addr == ~0)
2164 goto fail_dma_mapping;
2165
2166 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2167 rx_len_cached = ++sys->len;
2168
2169 ret = sps_transfer_one(sys->ep->ep_hdl,
2170 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2171
2172 if (ret) {
2173 IPAERR("sps_transfer_one failed %d\n", ret);
2174 goto fail_sps_transfer;
2175 }
2176 }
2177
2178 return;
2179fail_sps_transfer:
2180 rx_len_cached = --sys->len;
2181 list_del(&rx_pkt->link);
2182 INIT_LIST_HEAD(&rx_pkt->link);
2183 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2184 sys->rx_buff_sz, DMA_FROM_DEVICE);
2185fail_dma_mapping:
2186 spin_lock_bh(&sys->spinlock);
2187 list_add_tail(&rx_pkt->link, &sys->rcycl_list);
2188 INIT_LIST_HEAD(&rx_pkt->link);
2189 spin_unlock_bh(&sys->spinlock);
2190fail_kmem_cache_alloc:
2191 spin_unlock_bh(&sys->spinlock);
2192 if (rx_len_cached == 0)
2193 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2194 msecs_to_jiffies(1));
2195}
2196
2197static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
2198{
2199 struct ipa_rx_pkt_wrapper *rx_pkt;
2200 int ret;
2201 int rx_len_cached = 0;
2202 u32 curr;
2203
2204 rx_len_cached = sys->len;
2205 curr = atomic_read(&sys->repl.head_idx);
2206
2207 while (rx_len_cached < sys->rx_pool_sz) {
2208 if (curr == atomic_read(&sys->repl.tail_idx)) {
2209 queue_work(sys->repl_wq, &sys->repl_work);
2210 break;
2211 }
2212
2213 rx_pkt = sys->repl.cache[curr];
2214 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
2215
2216 ret = sps_transfer_one(sys->ep->ep_hdl,
2217 rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
2218
2219 if (ret) {
2220 IPAERR("sps_transfer_one failed %d\n", ret);
2221 list_del(&rx_pkt->link);
2222 break;
2223 }
2224 rx_len_cached = ++sys->len;
2225 sys->repl_trig_cnt++;
2226 curr = (curr + 1) % sys->repl.capacity;
2227 /* ensure write is done before setting head index */
2228 mb();
2229 atomic_set(&sys->repl.head_idx, curr);
2230 }
2231
2232 if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0)
2233 queue_work(sys->repl_wq, &sys->repl_work);
2234
2235 if (rx_len_cached <= sys->ep->rx_replenish_threshold) {
2236 if (rx_len_cached == 0) {
2237 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
2238 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty);
2239 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
2240 IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty);
2241 else
2242 WARN_ON(1);
2243 }
2244 sys->repl_trig_cnt = 0;
2245 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
2246 msecs_to_jiffies(1));
2247 }
2248}
2249
2250static void replenish_rx_work_func(struct work_struct *work)
2251{
2252 struct delayed_work *dwork;
2253 struct ipa_sys_context *sys;
2254
2255 dwork = container_of(work, struct delayed_work, work);
2256 sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
2257 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2258 sys->repl_hdlr(sys);
2259 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2260}
2261
2262/**
2263 * ipa_cleanup_rx() - release RX queue resources
2264 *
2265 */
2266static void ipa_cleanup_rx(struct ipa_sys_context *sys)
2267{
2268 struct ipa_rx_pkt_wrapper *rx_pkt;
2269 struct ipa_rx_pkt_wrapper *r;
2270 u32 head;
2271 u32 tail;
2272
2273 list_for_each_entry_safe(rx_pkt, r,
2274 &sys->head_desc_list, link) {
2275 list_del(&rx_pkt->link);
2276 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2277 sys->rx_buff_sz, DMA_FROM_DEVICE);
2278 sys->free_skb(rx_pkt->data.skb);
2279 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2280 }
2281
2282 list_for_each_entry_safe(rx_pkt, r,
2283 &sys->rcycl_list, link) {
2284 list_del(&rx_pkt->link);
2285 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2286 sys->rx_buff_sz, DMA_FROM_DEVICE);
2287 sys->free_skb(rx_pkt->data.skb);
2288 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2289 }
2290
2291 if (sys->repl.cache) {
2292 head = atomic_read(&sys->repl.head_idx);
2293 tail = atomic_read(&sys->repl.tail_idx);
2294 while (head != tail) {
2295 rx_pkt = sys->repl.cache[head];
2296 dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
2297 sys->rx_buff_sz, DMA_FROM_DEVICE);
2298 sys->free_skb(rx_pkt->data.skb);
2299 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
2300 head = (head + 1) % sys->repl.capacity;
2301 }
2302 kfree(sys->repl.cache);
2303 }
2304}
2305
2306static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
2307{
2308 struct sk_buff *skb2 = NULL;
2309
2310 skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
2311 if (likely(skb2)) {
2312 /* Set the data pointer */
2313 skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
2314 memcpy(skb2->data, skb->data, len);
2315 skb2->len = len;
2316 skb_set_tail_pointer(skb2, len);
2317 }
2318
2319 return skb2;
2320}
2321
2322static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
2323 struct ipa_sys_context *sys)
2324{
2325 int rc = 0;
2326 struct ipa_hw_pkt_status *status;
2327 struct sk_buff *skb2;
2328 int pad_len_byte;
2329 int len;
2330 unsigned char *buf;
2331 int src_pipe;
2332 unsigned int used = *(unsigned int *)skb->cb;
2333 unsigned int used_align = ALIGN(used, 32);
2334 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
Skylar Change985d272016-12-05 18:10:19 -08002335 u32 skb2_len;
Amir Levy9659e592016-10-27 18:08:27 +03002336
2337 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2338
2339 if (skb->len == 0) {
2340 IPAERR("ZLT\n");
2341 sys->free_skb(skb);
2342 return rc;
2343 }
2344
2345 if (sys->len_partial) {
2346 IPADBG("len_partial %d\n", sys->len_partial);
2347 buf = skb_push(skb, sys->len_partial);
2348 memcpy(buf, sys->prev_skb->data, sys->len_partial);
2349 sys->len_partial = 0;
2350 sys->free_skb(sys->prev_skb);
2351 sys->prev_skb = NULL;
2352 goto begin;
2353 }
2354
2355 /* this pipe has TX comp (status only) + mux-ed LAN RX data
2356 * (status+data)
2357 */
2358 if (sys->len_rem) {
2359 IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
2360 sys->len_pad);
2361 if (sys->len_rem <= skb->len) {
2362 if (sys->prev_skb) {
2363 skb2 = skb_copy_expand(sys->prev_skb, 0,
2364 sys->len_rem, GFP_KERNEL);
2365 if (likely(skb2)) {
2366 memcpy(skb_put(skb2, sys->len_rem),
2367 skb->data, sys->len_rem);
2368 skb_trim(skb2,
2369 skb2->len - sys->len_pad);
2370 skb2->truesize = skb2->len +
2371 sizeof(struct sk_buff);
2372 if (sys->drop_packet)
2373 dev_kfree_skb_any(skb2);
2374 else
2375 sys->ep->client_notify(
2376 sys->ep->priv,
2377 IPA_RECEIVE,
2378 (unsigned long)(skb2));
2379 } else {
2380 IPAERR("copy expand failed\n");
2381 }
2382 dev_kfree_skb_any(sys->prev_skb);
2383 }
2384 skb_pull(skb, sys->len_rem);
2385 sys->prev_skb = NULL;
2386 sys->len_rem = 0;
2387 sys->len_pad = 0;
2388 } else {
2389 if (sys->prev_skb) {
2390 skb2 = skb_copy_expand(sys->prev_skb, 0,
2391 skb->len, GFP_KERNEL);
2392 if (likely(skb2)) {
2393 memcpy(skb_put(skb2, skb->len),
2394 skb->data, skb->len);
2395 } else {
2396 IPAERR("copy expand failed\n");
2397 }
2398 dev_kfree_skb_any(sys->prev_skb);
2399 sys->prev_skb = skb2;
2400 }
2401 sys->len_rem -= skb->len;
2402 sys->free_skb(skb);
2403 return rc;
2404 }
2405 }
2406
2407begin:
2408 while (skb->len) {
2409 sys->drop_packet = false;
2410 IPADBG("LEN_REM %d\n", skb->len);
2411
2412 if (skb->len < IPA_PKT_STATUS_SIZE) {
2413 WARN_ON(sys->prev_skb != NULL);
2414 IPADBG("status straddles buffer\n");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002415 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002416 sys->len_partial = skb->len;
2417 return rc;
2418 }
2419
2420 status = (struct ipa_hw_pkt_status *)skb->data;
2421 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2422 status->status_opcode, status->endp_src_idx,
2423 status->endp_dest_idx, status->pkt_len);
2424 if (sys->status_stat) {
2425 sys->status_stat->status[sys->status_stat->curr] =
2426 *status;
2427 sys->status_stat->curr++;
2428 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2429 sys->status_stat->curr = 0;
2430 }
2431
2432 if (status->status_opcode !=
2433 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2434 status->status_opcode !=
2435 IPA_HW_STATUS_OPCODE_PACKET &&
2436 status->status_opcode !=
2437 IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET &&
2438 status->status_opcode !=
2439 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2440 IPAERR("unsupported opcode(%d)\n",
2441 status->status_opcode);
2442 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2443 continue;
2444 }
2445 IPA_STATS_EXCP_CNT(status->exception,
2446 ipa_ctx->stats.rx_excp_pkts);
2447 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2448 status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
2449 IPAERR("status fields invalid\n");
2450 IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
2451 status->status_opcode, status->endp_src_idx,
2452 status->endp_dest_idx, status->pkt_len);
2453 WARN_ON(1);
2454 BUG();
2455 }
2456 if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
2457 struct ipa_tag_completion *comp;
2458
2459 IPADBG("TAG packet arrived\n");
2460 if (status->tag_f_2 == IPA_COOKIE) {
2461 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2462 if (skb->len < sizeof(comp)) {
2463 IPAERR("TAG arrived without packet\n");
2464 return rc;
2465 }
2466 memcpy(&comp, skb->data, sizeof(comp));
2467 skb_pull(skb, sizeof(comp) +
2468 IPA_SIZE_DL_CSUM_META_TRAILER);
2469 complete(&comp->comp);
2470 if (atomic_dec_return(&comp->cnt) == 0)
2471 kfree(comp);
2472 continue;
2473 } else {
2474 IPADBG("ignoring TAG with wrong cookie\n");
2475 }
2476 }
2477 if (status->pkt_len == 0) {
2478 IPADBG("Skip aggr close status\n");
2479 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2480 IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close);
2481 IPA_STATS_DEC_CNT(
2482 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2483 continue;
2484 }
2485 if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
2486 /* RX data */
2487 src_pipe = status->endp_src_idx;
2488
2489 /*
2490 * A packet which is received back to the AP after
2491 * there was no route match.
2492 */
2493 if (!status->exception && !status->route_match)
2494 sys->drop_packet = true;
2495
2496 if (skb->len == IPA_PKT_STATUS_SIZE &&
2497 !status->exception) {
2498 WARN_ON(sys->prev_skb != NULL);
2499 IPADBG("Ins header in next buffer\n");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002500 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002501 sys->len_partial = skb->len;
2502 return rc;
2503 }
2504
2505 pad_len_byte = ((status->pkt_len + 3) & ~3) -
2506 status->pkt_len;
2507
2508 len = status->pkt_len + pad_len_byte +
2509 IPA_SIZE_DL_CSUM_META_TRAILER;
2510 IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
2511 status->pkt_len, len);
2512
2513 if (status->exception ==
2514 IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
2515 IPADBG("Dropping packet on DeAggr Exception\n");
2516 sys->drop_packet = true;
2517 }
2518
Skylar Change985d272016-12-05 18:10:19 -08002519 skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE;
2520 skb2_len = min(skb2_len, skb->len);
2521 skb2 = ipa_skb_copy_for_client(skb, skb2_len);
Amir Levy9659e592016-10-27 18:08:27 +03002522 if (likely(skb2)) {
2523 if (skb->len < len + IPA_PKT_STATUS_SIZE) {
2524 IPADBG("SPL skb len %d len %d\n",
2525 skb->len, len);
2526 sys->prev_skb = skb2;
2527 sys->len_rem = len - skb->len +
2528 IPA_PKT_STATUS_SIZE;
2529 sys->len_pad = pad_len_byte;
2530 skb_pull(skb, skb->len);
2531 } else {
2532 skb_trim(skb2, status->pkt_len +
2533 IPA_PKT_STATUS_SIZE);
2534 IPADBG("rx avail for %d\n",
2535 status->endp_dest_idx);
2536 if (sys->drop_packet) {
2537 dev_kfree_skb_any(skb2);
2538 } else if (status->pkt_len >
2539 IPA_GENERIC_AGGR_BYTE_LIMIT *
2540 1024) {
2541 IPAERR("packet size invalid\n");
2542 IPAERR("STATUS opcode=%d\n",
2543 status->status_opcode);
2544 IPAERR("src=%d dst=%d len=%d\n",
2545 status->endp_src_idx,
2546 status->endp_dest_idx,
2547 status->pkt_len);
2548 BUG();
2549 } else {
2550 skb2->truesize = skb2->len +
2551 sizeof(struct sk_buff) +
2552 (ALIGN(len +
2553 IPA_PKT_STATUS_SIZE, 32) *
2554 unused / used_align);
2555 sys->ep->client_notify(
2556 sys->ep->priv,
2557 IPA_RECEIVE,
2558 (unsigned long)(skb2));
2559 }
2560 skb_pull(skb, len +
2561 IPA_PKT_STATUS_SIZE);
2562 }
2563 } else {
2564 IPAERR("fail to alloc skb\n");
2565 if (skb->len < len) {
2566 sys->prev_skb = NULL;
2567 sys->len_rem = len - skb->len +
2568 IPA_PKT_STATUS_SIZE;
2569 sys->len_pad = pad_len_byte;
2570 skb_pull(skb, skb->len);
2571 } else {
2572 skb_pull(skb, len +
2573 IPA_PKT_STATUS_SIZE);
2574 }
2575 }
2576 /* TX comp */
2577 ipa_wq_write_done_status(src_pipe);
2578 IPADBG("tx comp imp for %d\n", src_pipe);
2579 } else {
2580 /* TX comp */
2581 ipa_wq_write_done_status(status->endp_src_idx);
2582 IPADBG("tx comp exp for %d\n", status->endp_src_idx);
2583 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2584 IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl);
2585 IPA_STATS_DEC_CNT(
2586 ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
2587 }
2588 };
2589
2590 sys->free_skb(skb);
2591 return rc;
2592}
2593
2594static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb,
2595 struct sk_buff *skb, unsigned int len)
2596{
2597 struct sk_buff *skb2;
2598
2599 skb2 = skb_copy_expand(prev_skb, 0,
2600 len, GFP_KERNEL);
2601 if (likely(skb2)) {
2602 memcpy(skb_put(skb2, len),
2603 skb->data, len);
2604 } else {
2605 IPAERR("copy expand failed\n");
2606 skb2 = NULL;
2607 }
2608 dev_kfree_skb_any(prev_skb);
2609
2610 return skb2;
2611}
2612
2613static void wan_rx_handle_splt_pyld(struct sk_buff *skb,
2614 struct ipa_sys_context *sys)
2615{
2616 struct sk_buff *skb2;
2617
2618 IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
2619 if (sys->len_rem <= skb->len) {
2620 if (sys->prev_skb) {
2621 skb2 = join_prev_skb(sys->prev_skb, skb,
2622 sys->len_rem);
2623 if (likely(skb2)) {
2624 IPADBG(
2625 "removing Status element from skb and sending to WAN client");
2626 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2627 skb2->truesize = skb2->len +
2628 sizeof(struct sk_buff);
2629 sys->ep->client_notify(sys->ep->priv,
2630 IPA_RECEIVE,
2631 (unsigned long)(skb2));
2632 }
2633 }
2634 skb_pull(skb, sys->len_rem);
2635 sys->prev_skb = NULL;
2636 sys->len_rem = 0;
2637 } else {
2638 if (sys->prev_skb) {
2639 skb2 = join_prev_skb(sys->prev_skb, skb,
2640 skb->len);
2641 sys->prev_skb = skb2;
2642 }
2643 sys->len_rem -= skb->len;
2644 skb_pull(skb, skb->len);
2645 }
2646}
2647
2648static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
2649 struct ipa_sys_context *sys)
2650{
2651 int rc = 0;
2652 struct ipa_hw_pkt_status *status;
2653 struct sk_buff *skb2;
2654 u16 pkt_len_with_pad;
2655 u32 qmap_hdr;
2656 int checksum_trailer_exists;
2657 int frame_len;
2658 int ep_idx;
2659 unsigned int used = *(unsigned int *)skb->cb;
2660 unsigned int used_align = ALIGN(used, 32);
2661 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2662
2663 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2664 if (skb->len == 0) {
2665 IPAERR("ZLT\n");
2666 goto bail;
2667 }
2668
2669 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
2670 sys->ep->client_notify(sys->ep->priv,
2671 IPA_RECEIVE, (unsigned long)(skb));
2672 return rc;
2673 }
2674 if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
2675 IPAERR("Recycle should enable only with GRO Aggr\n");
2676 ipa_assert();
2677 }
2678 /*
2679 * payload splits across 2 buff or more,
2680 * take the start of the payload from prev_skb
2681 */
2682 if (sys->len_rem)
2683 wan_rx_handle_splt_pyld(skb, sys);
2684
2685
2686 while (skb->len) {
2687 IPADBG("LEN_REM %d\n", skb->len);
2688 if (skb->len < IPA_PKT_STATUS_SIZE) {
2689 IPAERR("status straddles buffer\n");
2690 WARN_ON(1);
2691 goto bail;
2692 }
2693 status = (struct ipa_hw_pkt_status *)skb->data;
2694 IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
2695 status->status_opcode, status->endp_src_idx,
2696 status->endp_dest_idx, status->pkt_len);
2697
2698 if (sys->status_stat) {
2699 sys->status_stat->status[sys->status_stat->curr] =
2700 *status;
2701 sys->status_stat->curr++;
2702 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2703 sys->status_stat->curr = 0;
2704 }
2705
2706 if (status->status_opcode !=
2707 IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
2708 status->status_opcode !=
2709 IPA_HW_STATUS_OPCODE_PACKET &&
2710 status->status_opcode !=
2711 IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
2712 IPAERR("unsupported opcode\n");
2713 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2714 continue;
2715 }
2716 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2717 if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
2718 status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
2719 status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
2720 IPAERR("status fields invalid\n");
2721 WARN_ON(1);
2722 goto bail;
2723 }
2724 if (status->pkt_len == 0) {
2725 IPADBG("Skip aggr close status\n");
2726 skb_pull(skb, IPA_PKT_STATUS_SIZE);
2727 IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts);
2728 IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close);
2729 continue;
2730 }
2731 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
2732 if (status->endp_dest_idx != ep_idx) {
2733 IPAERR("expected endp_dest_idx %d received %d\n",
2734 ep_idx, status->endp_dest_idx);
2735 WARN_ON(1);
2736 goto bail;
2737 }
2738 /* RX data */
2739 if (skb->len == IPA_PKT_STATUS_SIZE) {
2740 IPAERR("Ins header in next buffer\n");
2741 WARN_ON(1);
2742 goto bail;
2743 }
2744 qmap_hdr = *(u32 *)(status+1);
2745 /*
2746 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
2747 * header
2748 */
2749
2750 /*QMAP is BE: convert the pkt_len field from BE to LE*/
2751 pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
2752 IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
2753 /*get the CHECKSUM_PROCESS bit*/
2754 checksum_trailer_exists = status->status_mask &
2755 IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
2756 IPADBG("checksum_trailer_exists %d\n",
2757 checksum_trailer_exists);
2758
2759 frame_len = IPA_PKT_STATUS_SIZE +
2760 IPA_QMAP_HEADER_LENGTH +
2761 pkt_len_with_pad;
2762 if (checksum_trailer_exists)
2763 frame_len += IPA_DL_CHECKSUM_LENGTH;
2764 IPADBG("frame_len %d\n", frame_len);
2765
2766 skb2 = skb_clone(skb, GFP_KERNEL);
2767 if (likely(skb2)) {
2768 /*
2769 * the len of actual data is smaller than expected
2770 * payload split across 2 buff
2771 */
2772 if (skb->len < frame_len) {
2773 IPADBG("SPL skb len %d len %d\n",
2774 skb->len, frame_len);
2775 sys->prev_skb = skb2;
2776 sys->len_rem = frame_len - skb->len;
2777 skb_pull(skb, skb->len);
2778 } else {
2779 skb_trim(skb2, frame_len);
2780 IPADBG("rx avail for %d\n",
2781 status->endp_dest_idx);
2782 IPADBG(
2783 "removing Status element from skb and sending to WAN client");
2784 skb_pull(skb2, IPA_PKT_STATUS_SIZE);
2785 skb2->truesize = skb2->len +
2786 sizeof(struct sk_buff) +
2787 (ALIGN(frame_len, 32) *
2788 unused / used_align);
2789 sys->ep->client_notify(sys->ep->priv,
2790 IPA_RECEIVE, (unsigned long)(skb2));
2791 skb_pull(skb, frame_len);
2792 }
2793 } else {
2794 IPAERR("fail to clone\n");
2795 if (skb->len < frame_len) {
2796 sys->prev_skb = NULL;
2797 sys->len_rem = frame_len - skb->len;
2798 skb_pull(skb, skb->len);
2799 } else {
2800 skb_pull(skb, frame_len);
2801 }
2802 }
2803 };
2804bail:
2805 sys->free_skb(skb);
2806 return rc;
2807}
2808
2809static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys)
2810{
2811 struct ipa_a5_mux_hdr *mux_hdr;
2812 unsigned int pull_len;
2813 unsigned int padding;
2814 struct ipa_ep_context *ep;
2815 unsigned int src_pipe;
2816
2817 mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
2818
2819 src_pipe = mux_hdr->src_pipe_index;
2820
2821 IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
2822 rx_skb->len, ntohs(mux_hdr->interface_id),
2823 src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
2824
2825 IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
2826
2827 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
2828 IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
2829
2830 /*
2831 * Any packets arriving over AMPDU_TX should be dispatched
2832 * to the regular WLAN RX data-path.
2833 */
2834 if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
2835 src_pipe = WLAN_PROD_TX_EP;
2836
2837 ep = &ipa_ctx->ep[src_pipe];
2838 spin_lock(&ipa_ctx->disconnect_lock);
2839 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2840 !ep->valid || !ep->client_notify)) {
2841 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2842 src_pipe, ep->valid, ep->client_notify);
2843 dev_kfree_skb_any(rx_skb);
2844 spin_unlock(&ipa_ctx->disconnect_lock);
2845 return 0;
2846 }
2847
2848 pull_len = sizeof(struct ipa_a5_mux_hdr);
2849
2850 /*
2851 * IP packet starts on word boundary
2852 * remove the MUX header and any padding and pass the frame to
2853 * the client which registered a rx callback on the "src pipe"
2854 */
2855 padding = ep->cfg.hdr.hdr_len & 0x3;
2856 if (padding)
2857 pull_len += 4 - padding;
2858
2859 IPADBG("pulling %d bytes from skb\n", pull_len);
2860 skb_pull(rx_skb, pull_len);
2861 ep->client_notify(ep->priv, IPA_RECEIVE,
2862 (unsigned long)(rx_skb));
2863 spin_unlock(&ipa_ctx->disconnect_lock);
2864 return 0;
2865}
2866
2867static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags)
2868{
2869 return __dev_alloc_skb(len, flags);
2870}
2871
2872static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len,
2873 gfp_t flags)
2874{
2875 struct sk_buff *skb;
2876
2877 skb = __dev_alloc_skb(len + IPA_HEADROOM, flags);
2878 if (skb)
2879 skb_reserve(skb, IPA_HEADROOM);
2880
2881 return skb;
2882}
2883
2884static void ipa_free_skb_rx(struct sk_buff *skb)
2885{
2886 dev_kfree_skb_any(skb);
2887}
2888
2889void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
2890{
2891 struct sk_buff *rx_skb = (struct sk_buff *)data;
2892 struct ipa_hw_pkt_status *status;
2893 struct ipa_ep_context *ep;
2894 unsigned int src_pipe;
2895 u32 metadata;
2896
2897 status = (struct ipa_hw_pkt_status *)rx_skb->data;
2898 src_pipe = status->endp_src_idx;
2899 metadata = status->metadata;
2900 ep = &ipa_ctx->ep[src_pipe];
2901 if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
2902 !ep->valid ||
2903 !ep->client_notify)) {
2904 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2905 src_pipe, ep->valid, ep->client_notify);
2906 dev_kfree_skb_any(rx_skb);
2907 return;
2908 }
2909 if (!status->exception)
2910 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE +
2911 IPA_LAN_RX_HEADER_LENGTH);
2912 else
2913 skb_pull(rx_skb, IPA_PKT_STATUS_SIZE);
2914
2915 /*
2916 * Metadata Info
2917 * ------------------------------------------
2918 * | 3 | 2 | 1 | 0 |
2919 * | fw_desc | vdev_id | qmap mux id | Resv |
2920 * ------------------------------------------
2921 */
2922 *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
2923 IPADBG("meta_data: 0x%x cb: 0x%x\n",
2924 metadata, *(u32 *)rx_skb->cb);
2925
2926 ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
2927}
2928
2929void ipa2_recycle_wan_skb(struct sk_buff *skb)
2930{
2931 struct ipa_rx_pkt_wrapper *rx_pkt;
2932 int ep_idx = ipa2_get_ep_mapping(
2933 IPA_CLIENT_APPS_WAN_CONS);
2934 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
2935 (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
2936
2937 if (unlikely(ep_idx == -1)) {
2938 IPAERR("dest EP does not exist\n");
2939 ipa_assert();
2940 }
2941
2942 rx_pkt = kmem_cache_zalloc(
2943 ipa_ctx->rx_pkt_wrapper_cache, flag);
2944 if (!rx_pkt)
2945 ipa_assert();
2946
2947 INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
2948 rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
2949
2950 rx_pkt->data.skb = skb;
2951 rx_pkt->data.dma_addr = 0;
2952 ipa_skb_recycle(rx_pkt->data.skb);
2953 skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
2954 INIT_LIST_HEAD(&rx_pkt->link);
2955 spin_lock_bh(&rx_pkt->sys->spinlock);
2956 list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
2957 spin_unlock_bh(&rx_pkt->sys->spinlock);
2958}
2959
2960static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2961{
2962 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2963 struct sk_buff *rx_skb;
2964
2965 if (unlikely(list_empty(&sys->head_desc_list))) {
2966 WARN_ON(1);
2967 return;
2968 }
2969 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2970 struct ipa_rx_pkt_wrapper,
2971 link);
2972 list_del(&rx_pkt_expected->link);
2973 sys->len--;
2974 if (size)
2975 rx_pkt_expected->len = size;
2976 rx_skb = rx_pkt_expected->data.skb;
2977 dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
2978 sys->rx_buff_sz, DMA_FROM_DEVICE);
2979 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2980 rx_skb->len = rx_pkt_expected->len;
2981 *(unsigned int *)rx_skb->cb = rx_skb->len;
2982 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2983 sys->pyld_hdlr(rx_skb, sys);
2984 sys->repl_hdlr(sys);
2985 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
2986
2987}
2988
2989static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
2990{
2991 struct ipa_rx_pkt_wrapper *rx_pkt_expected;
2992 struct sk_buff *rx_skb;
2993
2994 if (unlikely(list_empty(&sys->head_desc_list))) {
2995 WARN_ON(1);
2996 return;
2997 }
2998 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2999 struct ipa_rx_pkt_wrapper,
3000 link);
3001 list_del(&rx_pkt_expected->link);
3002 sys->len--;
3003
3004 if (size)
3005 rx_pkt_expected->len = size;
3006
3007 rx_skb = rx_pkt_expected->data.skb;
3008 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
3009 rx_skb->len = rx_pkt_expected->len;
3010 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
3011 sys->ep->wstats.tx_pkts_rcvd++;
3012 if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
3013 ipa2_free_skb(&rx_pkt_expected->data);
3014 sys->ep->wstats.tx_pkts_dropped++;
3015 } else {
3016 sys->ep->wstats.tx_pkts_sent++;
3017 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3018 (unsigned long)(&rx_pkt_expected->data));
3019 }
3020 ipa_replenish_wlan_rx_cache(sys);
3021}
3022
3023static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
3024 struct sps_iovec *iovec)
3025{
3026 IPADBG("ENTER.\n");
3027 if (unlikely(list_empty(&sys->head_desc_list))) {
3028 IPAERR("descriptor list is empty!\n");
3029 WARN_ON(1);
3030 return;
3031 }
3032 if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) {
3033 IPAERR("received unexpected event. sps flag is 0x%x\n"
3034 , iovec->flags);
3035 WARN_ON(1);
3036 return;
3037 }
3038 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3039 (unsigned long)(iovec));
3040 IPADBG("EXIT\n");
3041}
3042
3043static void ipa_wq_rx_avail(struct work_struct *work)
3044{
3045 struct ipa_rx_pkt_wrapper *rx_pkt;
3046 struct ipa_sys_context *sys;
3047
3048 rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work);
3049 if (unlikely(rx_pkt == NULL))
3050 WARN_ON(1);
3051 sys = rx_pkt->sys;
3052 ipa_wq_rx_common(sys, 0);
3053}
3054
3055/**
3056 * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
3057 * the SPS driver after a Rx operation is complete.
3058 * Called in an interrupt context.
3059 * @notify: SPS driver supplied notification struct
3060 *
3061 * This function defer the work for this event to a workqueue.
3062 */
3063void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
3064{
3065 struct ipa_rx_pkt_wrapper *rx_pkt;
3066
3067 switch (notify->event_id) {
3068 case SPS_EVENT_EOT:
3069 rx_pkt = notify->data.transfer.user;
3070 if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
3071 atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
3072 rx_pkt->len = notify->data.transfer.iovec.size;
3073 IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
3074 notify->user, rx_pkt->len);
3075 queue_work(rx_pkt->sys->wq, &rx_pkt->work);
3076 break;
3077 default:
3078 IPAERR("received unexpected event id %d sys=%p\n",
3079 notify->event_id, notify->user);
3080 }
3081}
3082
3083static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
3084 struct ipa_sys_context *sys)
3085{
3086 if (sys->ep->client_notify) {
3087 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
3088 (unsigned long)(rx_skb));
3089 } else {
3090 dev_kfree_skb_any(rx_skb);
3091 WARN_ON(1);
3092 }
3093
3094 return 0;
3095}
3096
3097static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
3098 struct ipa_sys_context *sys)
3099{
3100 unsigned long int aggr_byte_limit;
3101
3102 sys->ep->status.status_en = true;
3103 sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
3104 if (IPA_CLIENT_IS_PROD(in->client)) {
3105 if (!sys->ep->skip_ep_cfg) {
3106 sys->policy = IPA_POLICY_NOINTR_MODE;
3107 sys->sps_option = SPS_O_AUTO_ENABLE;
3108 sys->sps_callback = NULL;
3109 sys->ep->status.status_ep = ipa2_get_ep_mapping(
3110 IPA_CLIENT_APPS_LAN_CONS);
3111 if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
3112 sys->ep->status.status_en = false;
3113 } else {
3114 sys->policy = IPA_POLICY_INTR_MODE;
3115 sys->sps_option = (SPS_O_AUTO_ENABLE |
3116 SPS_O_EOT);
3117 sys->sps_callback =
3118 ipa_sps_irq_tx_no_aggr_notify;
3119 }
3120 return 0;
3121 }
3122
3123 aggr_byte_limit =
3124 (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
3125 ipa_adjust_ra_buff_base_sz(
3126 in->ipa_ep_cfg.aggr.aggr_byte_limit));
3127
3128 if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
3129 in->client == IPA_CLIENT_APPS_WAN_CONS) {
3130 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3131 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3132 | SPS_O_ACK_TRANSFERS);
3133 sys->sps_callback = ipa_sps_irq_rx_notify;
3134 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3135 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3136 switch_to_intr_rx_work_func);
3137 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3138 replenish_rx_work_func);
3139 INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
3140 atomic_set(&sys->curr_polling_state, 0);
3141 sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
3142 IPA_GENERIC_RX_BUFF_BASE_SZ) -
3143 IPA_HEADROOM;
3144 sys->get_skb = ipa_get_skb_ipa_rx_headroom;
3145 sys->free_skb = ipa_free_skb_rx;
3146 in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
3147 in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
3148 in->ipa_ep_cfg.aggr.aggr_time_limit =
3149 IPA_GENERIC_AGGR_TIME_LIMIT;
3150 if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3151 sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
Utkarsh Saxena4badc042017-03-03 15:38:45 +05303152 sys->rx_pool_sz =
3153 ipa_ctx->lan_rx_ring_size;
Amir Levy9659e592016-10-27 18:08:27 +03003154 if (nr_cpu_ids > 1) {
3155 sys->repl_hdlr =
3156 ipa_fast_replenish_rx_cache;
3157 sys->repl_trig_thresh =
3158 sys->rx_pool_sz / 8;
3159 } else {
3160 sys->repl_hdlr =
3161 ipa_replenish_rx_cache;
3162 }
Amir Levy9659e592016-10-27 18:08:27 +03003163 in->ipa_ep_cfg.aggr.aggr_byte_limit =
3164 IPA_GENERIC_AGGR_BYTE_LIMIT;
3165 in->ipa_ep_cfg.aggr.aggr_pkt_limit =
3166 IPA_GENERIC_AGGR_PKT_LIMIT;
3167 sys->ep->wakelock_client =
3168 IPA_WAKELOCK_REF_CLIENT_LAN_RX;
3169 } else if (in->client ==
3170 IPA_CLIENT_APPS_WAN_CONS) {
3171 sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003172 sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
3173 if (nr_cpu_ids > 1) {
Amir Levy9659e592016-10-27 18:08:27 +03003174 sys->repl_hdlr =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003175 ipa_fast_replenish_rx_cache;
3176 sys->repl_trig_thresh =
3177 sys->rx_pool_sz / 8;
Amir Levy9659e592016-10-27 18:08:27 +03003178 } else {
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003179 sys->repl_hdlr =
3180 ipa_replenish_rx_cache;
3181 }
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003182 if (in->napi_enabled && in->recycle_enabled)
3183 sys->repl_hdlr =
3184 ipa_replenish_rx_cache_recycle;
Amir Levy9659e592016-10-27 18:08:27 +03003185 sys->ep->wakelock_client =
3186 IPA_WAKELOCK_REF_CLIENT_WAN_RX;
3187 in->ipa_ep_cfg.aggr.aggr_sw_eof_active
3188 = true;
3189 if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
3190 IPAERR("get close-by %u\n",
3191 ipa_adjust_ra_buff_base_sz(
3192 in->ipa_ep_cfg.aggr.
3193 aggr_byte_limit));
3194 IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
3195 /* disable ipa_status */
3196 sys->ep->status.
3197 status_en = false;
3198 sys->rx_buff_sz =
3199 IPA_GENERIC_RX_BUFF_SZ(
3200 ipa_adjust_ra_buff_base_sz(
3201 in->ipa_ep_cfg.aggr.
3202 aggr_byte_limit - IPA_HEADROOM));
3203 in->ipa_ep_cfg.aggr.
3204 aggr_byte_limit =
3205 sys->rx_buff_sz < in->
3206 ipa_ep_cfg.aggr.aggr_byte_limit ?
3207 IPA_ADJUST_AGGR_BYTE_LIMIT(
3208 sys->rx_buff_sz) :
3209 IPA_ADJUST_AGGR_BYTE_LIMIT(
3210 in->ipa_ep_cfg.
3211 aggr.aggr_byte_limit);
3212 IPAERR("set aggr_limit %lu\n",
3213 (unsigned long int)
3214 in->ipa_ep_cfg.aggr.
3215 aggr_byte_limit);
3216 } else {
3217 in->ipa_ep_cfg.aggr.
3218 aggr_byte_limit =
3219 IPA_GENERIC_AGGR_BYTE_LIMIT;
3220 in->ipa_ep_cfg.aggr.
3221 aggr_pkt_limit =
3222 IPA_GENERIC_AGGR_PKT_LIMIT;
3223 }
3224 }
3225 } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
3226 IPADBG("assigning policy to client:%d",
3227 in->client);
3228
3229 sys->ep->status.status_en = false;
3230 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3231 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3232 | SPS_O_ACK_TRANSFERS);
3233 sys->sps_callback = ipa_sps_irq_rx_notify;
3234 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3235 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3236 switch_to_intr_rx_work_func);
3237 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3238 replenish_rx_work_func);
3239 atomic_set(&sys->curr_polling_state, 0);
3240 sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
3241 sys->rx_pool_sz = in->desc_fifo_sz /
3242 sizeof(struct sps_iovec) - 1;
3243 if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
3244 sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
3245 sys->pyld_hdlr = NULL;
3246 sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
3247 sys->get_skb = ipa_get_skb_ipa_rx;
3248 sys->free_skb = ipa_free_skb_rx;
3249 in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
3250 sys->ep->wakelock_client =
3251 IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
3252 } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
3253 IPADBG("assigning policy to client:%d",
3254 in->client);
3255
3256 sys->ep->status.status_en = false;
3257 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3258 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3259 | SPS_O_ACK_TRANSFERS);
3260 sys->sps_callback = ipa_sps_irq_rx_notify;
3261 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3262 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3263 switch_to_intr_rx_work_func);
3264 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3265 replenish_rx_work_func);
3266 atomic_set(&sys->curr_polling_state, 0);
3267 sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
3268 sys->rx_pool_sz = in->desc_fifo_sz /
3269 sizeof(struct sps_iovec) - 1;
3270 if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
3271 sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
3272 sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
3273 sys->get_skb = ipa_get_skb_ipa_rx;
3274 sys->free_skb = ipa_free_skb_rx;
3275 sys->repl_hdlr = ipa_replenish_rx_cache;
3276 sys->ep->wakelock_client =
3277 IPA_WAKELOCK_REF_CLIENT_ODU_RX;
3278 } else if (in->client ==
3279 IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
3280 IPADBG("assigning policy to client:%d",
3281 in->client);
3282 sys->ep->status.status_en = false;
3283 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3284 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
3285 | SPS_O_ACK_TRANSFERS);
3286 sys->sps_callback = ipa_sps_irq_rx_notify;
3287 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3288 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3289 switch_to_intr_rx_work_func);
3290 } else if (in->client ==
3291 IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
3292 IPADBG("assigning policy to client:%d",
3293 in->client);
3294 sys->ep->status.status_en = false;
3295 sys->policy = IPA_POLICY_NOINTR_MODE;
3296 sys->sps_option = SPS_O_AUTO_ENABLE |
3297 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
3298 } else {
3299 IPAERR("Need to install a RX pipe hdlr\n");
3300 WARN_ON(1);
3301 return -EINVAL;
3302 }
3303 return 0;
3304}
3305
3306static int ipa_assign_policy(struct ipa_sys_connect_params *in,
3307 struct ipa_sys_context *sys)
3308{
3309 if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
3310 sys->policy = IPA_POLICY_INTR_MODE;
3311 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3312 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3313 return 0;
3314 }
3315
3316 if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
3317 if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) {
3318 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3319 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3320 SPS_O_ACK_TRANSFERS);
3321 sys->sps_callback = ipa_sps_irq_tx_notify;
3322 INIT_WORK(&sys->work, ipa_wq_handle_tx);
3323 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3324 switch_to_intr_tx_work_func);
3325 atomic_set(&sys->curr_polling_state, 0);
3326 } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
3327 sys->policy = IPA_POLICY_INTR_POLL_MODE;
3328 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
3329 SPS_O_ACK_TRANSFERS);
3330 sys->sps_callback = ipa_sps_irq_rx_notify;
3331 INIT_WORK(&sys->work, ipa_wq_handle_rx);
3332 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
3333 switch_to_intr_rx_work_func);
3334 INIT_DELAYED_WORK(&sys->replenish_rx_work,
3335 replenish_rx_work_func);
3336 atomic_set(&sys->curr_polling_state, 0);
3337 sys->rx_buff_sz = IPA_RX_SKB_SIZE;
3338 sys->rx_pool_sz = IPA_RX_POOL_CEIL;
3339 sys->pyld_hdlr = ipa_rx_pyld_hdlr;
3340 sys->get_skb = ipa_get_skb_ipa_rx;
3341 sys->free_skb = ipa_free_skb_rx;
3342 sys->repl_hdlr = ipa_replenish_rx_cache;
3343 } else if (IPA_CLIENT_IS_PROD(in->client)) {
3344 sys->policy = IPA_POLICY_INTR_MODE;
3345 sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
3346 sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
3347 } else {
3348 IPAERR("Need to install a RX pipe hdlr\n");
3349 WARN_ON(1);
3350 return -EINVAL;
3351 }
3352
3353 return 0;
3354 } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3355 return ipa_assign_policy_v2(in, sys);
3356
3357 IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
3358 WARN_ON(1);
3359 return -EINVAL;
3360}
3361
3362/**
3363 * ipa_tx_client_rx_notify_release() - Callback function
3364 * which will call the user supplied callback function to
3365 * release the skb, or release it on its own if no callback
3366 * function was supplied
3367 *
3368 * @user1: [in] - Data Descriptor
3369 * @user2: [in] - endpoint idx
3370 *
3371 * This notified callback is for the destination client
3372 * This function is supplied in ipa_tx_dp_mul
3373 */
3374static void ipa_tx_client_rx_notify_release(void *user1, int user2)
3375{
3376 struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
3377 int ep_idx = user2;
3378
3379 IPADBG("Received data desc anchor:%p\n", dd);
3380
3381 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3382 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3383
3384 /* wlan host driver waits till tx complete before unload */
3385 IPADBG("ep=%d fifo_desc_free_count=%d\n",
3386 ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc));
3387 IPADBG("calling client notify callback with priv:%p\n",
3388 ipa_ctx->ep[ep_idx].priv);
3389
3390 if (ipa_ctx->ep[ep_idx].client_notify) {
3391 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
3392 IPA_WRITE_DONE, (unsigned long)user1);
3393 ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++;
3394 }
3395}
3396/**
3397 * ipa_tx_client_rx_pkt_status() - Callback function
3398 * which will call the user supplied callback function to
3399 * increase the available fifo descriptor
3400 *
3401 * @user1: [in] - Data Descriptor
3402 * @user2: [in] - endpoint idx
3403 *
3404 * This notified callback is for the destination client
3405 * This function is supplied in ipa_tx_dp_mul
3406 */
3407static void ipa_tx_client_rx_pkt_status(void *user1, int user2)
3408{
3409 int ep_idx = user2;
3410
3411 atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
3412 ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
3413}
3414
3415
3416/**
3417 * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets
3418 * @src: [in] - Client that is sending data
3419 * @ipa_tx_data_desc: [in] data descriptors from wlan
3420 *
3421 * this is used for to transfer data descriptors that received
3422 * from WLAN1_PROD pipe to IPA HW
3423 *
3424 * The function will send data descriptors from WLAN1_PROD (one
3425 * at a time) using sps_transfer_one. Will set EOT flag for last
3426 * descriptor Once this send was done from SPS point-of-view the
3427 * IPA driver will get notified by the supplied callback -
3428 * ipa_sps_irq_tx_no_aggr_notify()
3429 *
3430 * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
3431 * callback (from ipa_connect)
3432 *
3433 * Returns: 0 on success, negative on failure
3434 */
3435int ipa2_tx_dp_mul(enum ipa_client_type src,
3436 struct ipa_tx_data_desc *data_desc)
3437{
3438 /* The second byte in wlan header holds qmap id */
3439#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
3440 struct ipa_tx_data_desc *entry;
3441 struct ipa_sys_context *sys;
3442 struct ipa_desc desc = { 0 };
3443 u32 num_desc, cnt;
3444 int ep_idx;
3445
3446 if (unlikely(!ipa_ctx)) {
3447 IPAERR("IPA driver was not initialized\n");
3448 return -EINVAL;
3449 }
3450
3451 IPADBG("Received data desc anchor:%p\n", data_desc);
3452
3453 spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3454
3455 ep_idx = ipa2_get_ep_mapping(src);
3456 if (unlikely(ep_idx == -1)) {
3457 IPAERR("dest EP does not exist.\n");
3458 goto fail_send;
3459 }
3460 IPADBG("ep idx:%d\n", ep_idx);
3461 sys = ipa_ctx->ep[ep_idx].sys;
3462
3463 if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) {
3464 IPAERR("dest EP not valid.\n");
3465 goto fail_send;
3466 }
3467 sys->ep->wstats.rx_hd_rcvd++;
3468
3469 /* Calculate the number of descriptors */
3470 num_desc = 0;
3471 list_for_each_entry(entry, &data_desc->link, link) {
3472 num_desc++;
3473 }
3474 IPADBG("Number of Data Descriptors:%d", num_desc);
3475
3476 if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
3477 IPAERR("Insufficient data descriptors available\n");
3478 goto fail_send;
3479 }
3480
3481 /* Assign callback only for last data descriptor */
3482 cnt = 0;
3483 list_for_each_entry(entry, &data_desc->link, link) {
3484 IPADBG("Parsing data desc :%d\n", cnt);
3485 cnt++;
3486 ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
3487 (u8)sys->ep->cfg.meta.qmap_id;
3488 desc.pyld = entry->pyld_buffer;
3489 desc.len = entry->pyld_len;
3490 desc.type = IPA_DATA_DESC_SKB;
3491 desc.user1 = data_desc;
3492 desc.user2 = ep_idx;
3493 IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
3494 entry->priv, desc.pyld, desc.len);
3495
3496 /* In case of last descriptor populate callback */
3497 if (cnt == num_desc) {
3498 IPADBG("data desc:%p\n", data_desc);
3499 desc.callback = ipa_tx_client_rx_notify_release;
3500 } else {
3501 desc.callback = ipa_tx_client_rx_pkt_status;
3502 }
3503
3504 IPADBG("calling ipa_send_one()\n");
3505 if (ipa_send_one(sys, &desc, true)) {
3506 IPAERR("fail to send skb\n");
3507 sys->ep->wstats.rx_pkt_leak += (cnt-1);
3508 sys->ep->wstats.rx_dp_fail++;
3509 goto fail_send;
3510 }
3511
3512 if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
3513 atomic_dec(&sys->ep->avail_fifo_desc);
3514
3515 sys->ep->wstats.rx_pkts_rcvd++;
3516 IPADBG("ep=%d fifo desc=%d\n",
3517 ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
3518 }
3519
3520 sys->ep->wstats.rx_hd_processed++;
3521 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3522 return 0;
3523
3524fail_send:
3525 spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
3526 return -EFAULT;
3527
3528}
3529
3530void ipa2_free_skb(struct ipa_rx_data *data)
3531{
3532 struct ipa_rx_pkt_wrapper *rx_pkt;
3533
3534 if (unlikely(!ipa_ctx)) {
3535 IPAERR("IPA driver was not initialized\n");
3536 return;
3537 }
3538
3539 spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3540
3541 ipa_ctx->wc_memb.total_tx_pkts_freed++;
3542 rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data);
3543
3544 ipa_skb_recycle(rx_pkt->data.skb);
3545 (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
3546
3547 list_add_tail(&rx_pkt->link,
3548 &ipa_ctx->wc_memb.wlan_comm_desc_list);
3549 ipa_ctx->wc_memb.wlan_comm_free_cnt++;
3550
3551 spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
3552}
3553
3554
3555/* Functions added to support kernel tests */
3556
3557int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
3558 unsigned long *ipa_bam_hdl,
3559 u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
3560{
3561 struct ipa_ep_context *ep;
3562 int ipa_ep_idx;
3563 int result = -EINVAL;
3564
3565 if (sys_in == NULL || clnt_hdl == NULL) {
3566 IPAERR("NULL args\n");
3567 goto fail_gen;
3568 }
3569
3570 if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) {
3571 IPAERR("NULL args\n");
3572 goto fail_gen;
3573 }
3574 if (sys_in->client >= IPA_CLIENT_MAX) {
3575 IPAERR("bad parm client:%d\n", sys_in->client);
3576 goto fail_gen;
3577 }
3578
3579 ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
3580 if (ipa_ep_idx == -1) {
3581 IPAERR("Invalid client :%d\n", sys_in->client);
3582 goto fail_gen;
3583 }
3584
3585 ep = &ipa_ctx->ep[ipa_ep_idx];
3586
3587 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
3588
3589 if (ep->valid == 1) {
3590 if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
3591 IPAERR("EP %d already allocated\n", ipa_ep_idx);
3592 goto fail_and_disable_clocks;
3593 } else {
3594 if (ipa2_cfg_ep_hdr(ipa_ep_idx,
3595 &sys_in->ipa_ep_cfg.hdr)) {
3596 IPAERR("fail to configure hdr prop of EP %d\n",
3597 ipa_ep_idx);
3598 result = -EFAULT;
3599 goto fail_and_disable_clocks;
3600 }
3601 if (ipa2_cfg_ep_cfg(ipa_ep_idx,
3602 &sys_in->ipa_ep_cfg.cfg)) {
3603 IPAERR("fail to configure cfg prop of EP %d\n",
3604 ipa_ep_idx);
3605 result = -EFAULT;
3606 goto fail_and_disable_clocks;
3607 }
3608 IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
3609 sys_in->client, ipa_ep_idx, ep->sys);
3610 ep->client_notify = sys_in->notify;
3611 ep->priv = sys_in->priv;
3612 *clnt_hdl = ipa_ep_idx;
3613 if (!ep->keep_ipa_awake)
3614 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3615
3616 return 0;
3617 }
3618 }
3619
3620 memset(ep, 0, offsetof(struct ipa_ep_context, sys));
3621
3622 ep->valid = 1;
3623 ep->client = sys_in->client;
3624 ep->client_notify = sys_in->notify;
3625 ep->priv = sys_in->priv;
3626 ep->keep_ipa_awake = true;
3627
3628 result = ipa_enable_data_path(ipa_ep_idx);
3629 if (result) {
3630 IPAERR("enable data path failed res=%d clnt=%d.\n",
3631 result, ipa_ep_idx);
3632 goto fail_gen2;
3633 }
3634
3635 if (!ep->skip_ep_cfg) {
3636 if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
3637 IPAERR("fail to configure EP.\n");
3638 goto fail_gen2;
3639 }
3640 if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
3641 IPAERR("fail to configure status of EP.\n");
3642 goto fail_gen2;
3643 }
3644 IPADBG("ep configuration successful\n");
3645 } else {
3646 IPADBG("skipping ep configuration\n");
3647 }
3648
3649 *clnt_hdl = ipa_ep_idx;
3650
3651 *ipa_pipe_num = ipa_ep_idx;
3652 *ipa_bam_hdl = ipa_ctx->bam_handle;
3653
3654 if (!ep->keep_ipa_awake)
3655 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3656
3657 ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
3658 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
3659 ipa_ep_idx, ep->sys);
3660
3661 return 0;
3662
3663fail_gen2:
3664fail_and_disable_clocks:
3665 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3666fail_gen:
3667 return result;
3668}
3669
3670int ipa2_sys_teardown(u32 clnt_hdl)
3671{
3672 struct ipa_ep_context *ep;
3673
3674 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3675 ipa_ctx->ep[clnt_hdl].valid == 0) {
3676 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3677 return -EINVAL;
3678 }
3679
3680 ep = &ipa_ctx->ep[clnt_hdl];
3681
3682 if (!ep->keep_ipa_awake)
3683 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3684
3685 ipa_disable_data_path(clnt_hdl);
3686 ep->valid = 0;
3687
3688 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3689
3690 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
3691
3692 return 0;
3693}
3694
3695int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
3696 unsigned long gsi_ev_hdl)
3697{
3698 IPAERR("GSI not supported in IPAv2");
3699 return -EFAULT;
3700}
3701
3702
3703/**
3704 * ipa_adjust_ra_buff_base_sz()
3705 *
3706 * Return value: the largest power of two which is smaller
3707 * than the input value
3708 */
3709static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
3710{
3711 aggr_byte_limit += IPA_MTU;
3712 aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
3713 aggr_byte_limit--;
3714 aggr_byte_limit |= aggr_byte_limit >> 1;
3715 aggr_byte_limit |= aggr_byte_limit >> 2;
3716 aggr_byte_limit |= aggr_byte_limit >> 4;
3717 aggr_byte_limit |= aggr_byte_limit >> 8;
3718 aggr_byte_limit |= aggr_byte_limit >> 16;
3719 aggr_byte_limit++;
3720 return aggr_byte_limit >> 1;
3721}