blob: 24402af3f5a01ed7368a9ed5050f3660c33c529c [file] [log] [blame]
Skylar Chang652ee8e2017-02-10 11:40:30 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include <linux/msm_gsi.h>
19#include "ipa_i.h"
20#include "ipa_trace.h"
21#include "ipahal/ipahal.h"
22#include "ipahal/ipahal_fltrt.h"
23
Sunil Paidimarri226cf032016-10-14 13:33:08 -070024#define IPA_WAN_AGGR_PKT_CNT 5
Amir Levy9659e592016-10-27 18:08:27 +030025#define IPA_LAST_DESC_CNT 0xFFFF
26#define POLLING_INACTIVITY_RX 40
27#define POLLING_MIN_SLEEP_RX 1010
28#define POLLING_MAX_SLEEP_RX 1050
29#define POLLING_INACTIVITY_TX 40
30#define POLLING_MIN_SLEEP_TX 400
31#define POLLING_MAX_SLEEP_TX 500
32/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
33#define IPA_MTU 1500
34#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
35#define IPA_GENERIC_AGGR_TIME_LIMIT 1
36#define IPA_GENERIC_AGGR_PKT_LIMIT 0
37
38#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
39#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
40 (X) + NET_SKB_PAD) +\
41 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
42#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
43 (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
44#define IPA_GENERIC_RX_BUFF_LIMIT (\
45 IPA_REAL_GENERIC_RX_BUFF_SZ(\
46 IPA_GENERIC_RX_BUFF_BASE_SZ) -\
47 IPA_GENERIC_RX_BUFF_BASE_SZ)
48
49/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
50#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
51
52#define IPA_RX_BUFF_CLIENT_HEADROOM 256
53
54#define IPA_WLAN_RX_POOL_SZ 100
55#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
56#define IPA_WLAN_RX_BUFF_SZ 2048
57#define IPA_WLAN_COMM_RX_POOL_LOW 100
58#define IPA_WLAN_COMM_RX_POOL_HIGH 900
59
60#define IPA_ODU_RX_BUFF_SZ 2048
61#define IPA_ODU_RX_POOL_SZ 64
62#define IPA_SIZE_DL_CSUM_META_TRAILER 8
63
Amir Levy9659e592016-10-27 18:08:27 +030064#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
Ghanim Fodic6b67492017-03-15 14:19:56 +020065#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
Amir Levy9659e592016-10-27 18:08:27 +030066
67#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
68/* The below virtual channel cannot be used by any entity */
69#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
70
71#define IPA_DEFAULT_SYS_YELLOW_WM 32
Skylar Changd407e592017-03-30 11:25:30 -070072#define IPA_REPL_XFER_THRESH 10
73
74#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
Amir Levy9659e592016-10-27 18:08:27 +030075
76static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
77static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
78static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
79static void ipa3_replenish_rx_work_func(struct work_struct *work);
80static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
81static void ipa3_wq_handle_rx(struct work_struct *work);
Amir Levy9659e592016-10-27 18:08:27 +030082static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
83static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
84 u32 size);
85static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
86 struct ipa3_sys_context *sys);
87static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
88static void ipa3_wq_rx_avail(struct work_struct *work);
89static void ipa3_alloc_wlan_rx_common_cache(u32 size);
90static void ipa3_cleanup_wlan_rx_common_cache(void);
91static void ipa3_wq_repl_rx(struct work_struct *work);
92static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
93 struct ipa_mem_buffer *mem_info);
94static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
95 struct ipa3_ep_context *ep);
96static int ipa_populate_tag_field(struct ipa3_desc *desc,
97 struct ipa3_tx_pkt_wrapper *tx_pkt,
98 struct ipahal_imm_cmd_pyld **tag_pyld_ret);
Amir Levya59ed3f2017-03-05 17:30:55 +020099static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
100 struct ipa_mem_buffer *mem_info);
Amir Levy9659e592016-10-27 18:08:27 +0300101static unsigned long tag_to_pointer_wa(uint64_t tag);
102static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
103
104static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
105
106static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
107 struct ipa3_tx_pkt_wrapper *tx_pkt)
108{
109 struct ipa3_tx_pkt_wrapper *next_pkt;
110 int i, cnt;
111
112 if (unlikely(tx_pkt == NULL)) {
113 IPAERR("tx_pkt is NULL\n");
114 return;
115 }
116
117 cnt = tx_pkt->cnt;
118 IPADBG_LOW("cnt: %d\n", cnt);
119 for (i = 0; i < cnt; i++) {
120 spin_lock_bh(&sys->spinlock);
121 if (unlikely(list_empty(&sys->head_desc_list))) {
122 spin_unlock_bh(&sys->spinlock);
123 return;
124 }
125 next_pkt = list_next_entry(tx_pkt, link);
126 list_del(&tx_pkt->link);
127 sys->len--;
128 spin_unlock_bh(&sys->spinlock);
129 if (!tx_pkt->no_unmap_dma) {
130 if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
131 dma_unmap_single(ipa3_ctx->pdev,
132 tx_pkt->mem.phys_base,
133 tx_pkt->mem.size,
134 DMA_TO_DEVICE);
135 } else {
136 dma_unmap_page(ipa3_ctx->pdev,
137 next_pkt->mem.phys_base,
138 next_pkt->mem.size,
139 DMA_TO_DEVICE);
140 }
141 }
142 if (tx_pkt->callback)
143 tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
144
Amir Levy9659e592016-10-27 18:08:27 +0300145 kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
146 tx_pkt = next_pkt;
147 }
148}
149
150static void ipa3_wq_write_done_status(int src_pipe,
151 struct ipa3_tx_pkt_wrapper *tx_pkt)
152{
153 struct ipa3_sys_context *sys;
154
155 WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
156
157 if (!ipa3_ctx->ep[src_pipe].status.status_en)
158 return;
159
160 sys = ipa3_ctx->ep[src_pipe].sys;
161 if (!sys)
162 return;
163
164 ipa3_wq_write_done_common(sys, tx_pkt);
165}
166
167/**
168 * ipa_write_done() - this function will be (eventually) called when a Tx
169 * operation is complete
170 * * @work: work_struct used by the work queue
171 *
172 * Will be called in deferred context.
173 * - invoke the callback supplied by the client who sent this command
174 * - iterate over all packets and validate that
175 * the order for sent packet is the same as expected
176 * - delete all the tx packet descriptors from the system
177 * pipe context (not needed anymore)
Amir Levy9659e592016-10-27 18:08:27 +0300178 */
179static void ipa3_wq_write_done(struct work_struct *work)
180{
181 struct ipa3_tx_pkt_wrapper *tx_pkt;
182 struct ipa3_sys_context *sys;
Skylar Changd407e592017-03-30 11:25:30 -0700183 struct ipa3_tx_pkt_wrapper *this_pkt;
Amir Levy9659e592016-10-27 18:08:27 +0300184
185 tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
186 sys = tx_pkt->sys;
Skylar Changd407e592017-03-30 11:25:30 -0700187 spin_lock_bh(&sys->spinlock);
188 this_pkt = list_first_entry(&sys->head_desc_list,
189 struct ipa3_tx_pkt_wrapper, link);
190 while (tx_pkt != this_pkt) {
191 spin_unlock_bh(&sys->spinlock);
192 ipa3_wq_write_done_common(sys, this_pkt);
193 spin_lock_bh(&sys->spinlock);
194 this_pkt = list_first_entry(&sys->head_desc_list,
195 struct ipa3_tx_pkt_wrapper, link);
196 }
197 spin_unlock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +0300198 ipa3_wq_write_done_common(sys, tx_pkt);
199}
200
Skylar Changd407e592017-03-30 11:25:30 -0700201
202static void ipa3_send_nop_desc(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +0300203{
Skylar Changd407e592017-03-30 11:25:30 -0700204 struct ipa3_sys_context *sys = container_of(work,
205 struct ipa3_sys_context, work);
206 struct gsi_xfer_elem nop_xfer;
Amir Levy9659e592016-10-27 18:08:27 +0300207 struct ipa3_tx_pkt_wrapper *tx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +0300208
Skylar Changd407e592017-03-30 11:25:30 -0700209 IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
210 tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +0300211 if (!tx_pkt) {
212 IPAERR("failed to alloc tx wrapper\n");
Skylar Changd407e592017-03-30 11:25:30 -0700213 queue_work(sys->wq, &sys->work);
214 return;
Amir Levy9659e592016-10-27 18:08:27 +0300215 }
216
217 INIT_LIST_HEAD(&tx_pkt->link);
Skylar Changd407e592017-03-30 11:25:30 -0700218 tx_pkt->cnt = 1;
Amir Levy9659e592016-10-27 18:08:27 +0300219 INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
Skylar Changd407e592017-03-30 11:25:30 -0700220 tx_pkt->no_unmap_dma = true;
221 tx_pkt->sys = sys;
Amir Levy9659e592016-10-27 18:08:27 +0300222 spin_lock_bh(&sys->spinlock);
223 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
Skylar Changd407e592017-03-30 11:25:30 -0700224 spin_unlock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +0300225
Skylar Changd407e592017-03-30 11:25:30 -0700226 memset(&nop_xfer, 0, sizeof(nop_xfer));
227 nop_xfer.type = GSI_XFER_ELEM_NOP;
228 nop_xfer.flags = GSI_XFER_FLAG_EOT;
229 nop_xfer.xfer_user_data = tx_pkt;
230 if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
231 IPAERR("gsi_queue_xfer for ch:%lu failed\n",
232 sys->ep->gsi_chan_hdl);
233 queue_work(sys->wq, &sys->work);
234 return;
Amir Levy9659e592016-10-27 18:08:27 +0300235 }
Skylar Changd407e592017-03-30 11:25:30 -0700236 sys->len_pending_xfer = 0;
Amir Levy9659e592016-10-27 18:08:27 +0300237
Amir Levy9659e592016-10-27 18:08:27 +0300238}
239
Skylar Changd407e592017-03-30 11:25:30 -0700240
Amir Levy9659e592016-10-27 18:08:27 +0300241/**
242 * ipa3_send() - Send multiple descriptors in one HW transaction
243 * @sys: system pipe context
244 * @num_desc: number of packets
245 * @desc: packets to send (may be immediate command or data)
246 * @in_atomic: whether caller is in atomic context
247 *
Amir Levya59ed3f2017-03-05 17:30:55 +0200248 * This function is used for GPI connection.
Amir Levy9659e592016-10-27 18:08:27 +0300249 * - ipa3_tx_pkt_wrapper will be used for each ipa
250 * descriptor (allocated from wrappers cache)
251 * - The wrapper struct will be configured for each ipa-desc payload and will
252 * contain information which will be later used by the user callbacks
Amir Levy9659e592016-10-27 18:08:27 +0300253 * - Each packet (command or data) that will be sent will also be saved in
254 * ipa3_sys_context for later check that all data was sent
255 *
256 * Return codes: 0: success, -EFAULT: failure
257 */
258int ipa3_send(struct ipa3_sys_context *sys,
259 u32 num_desc,
260 struct ipa3_desc *desc,
261 bool in_atomic)
262{
263 struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
264 struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
265 struct ipa3_tx_pkt_wrapper *next_pkt;
Amir Levy9659e592016-10-27 18:08:27 +0300266 struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300267 int i = 0;
268 int j;
269 int result;
270 int fail_dma_wrap = 0;
Amir Levy9659e592016-10-27 18:08:27 +0300271 u32 mem_flag = GFP_ATOMIC;
Amir Levy3be373c2017-03-05 16:31:30 +0200272 const struct ipa_gsi_ep_config *gsi_ep_cfg;
Amir Levy9659e592016-10-27 18:08:27 +0300273
274 if (unlikely(!in_atomic))
275 mem_flag = GFP_KERNEL;
276
Amir Levya59ed3f2017-03-05 17:30:55 +0200277 gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
278 if (unlikely(!gsi_ep_cfg)) {
279 IPAERR("failed to get gsi EP config for client=%d\n",
280 sys->ep->client);
281 return -EFAULT;
282 }
283 if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
284 IPAERR("Too many chained descriptors need=%d max=%d\n",
285 num_desc, gsi_ep_cfg->ipa_if_tlv);
286 WARN_ON(1);
287 return -EPERM;
288 }
Amir Levy9659e592016-10-27 18:08:27 +0300289
Amir Levya59ed3f2017-03-05 17:30:55 +0200290 gsi_xfer_elem_array =
291 kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
292 mem_flag);
293 if (!gsi_xfer_elem_array) {
294 IPAERR("Failed to alloc mem for gsi xfer array.\n");
295 return -EFAULT;
Amir Levy9659e592016-10-27 18:08:27 +0300296 }
297
298 spin_lock_bh(&sys->spinlock);
299
300 for (i = 0; i < num_desc; i++) {
301 fail_dma_wrap = 0;
302 tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
303 mem_flag);
304 if (!tx_pkt) {
305 IPAERR("failed to alloc tx wrapper\n");
306 goto failure;
307 }
308
309 INIT_LIST_HEAD(&tx_pkt->link);
310
311 if (i == 0) {
312 tx_pkt_first = tx_pkt;
313 tx_pkt->cnt = num_desc;
314 INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
315 }
316
317 /* populate tag field */
318 if (desc[i].opcode ==
319 ipahal_imm_cmd_get_opcode(
320 IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
321 if (ipa_populate_tag_field(&desc[i], tx_pkt,
322 &tag_pyld_ret)) {
323 IPAERR("Failed to populate tag field\n");
324 goto failure;
325 }
326 }
327
328 tx_pkt->type = desc[i].type;
329
330 if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
331 tx_pkt->mem.base = desc[i].pyld;
332 tx_pkt->mem.size = desc[i].len;
333
334 if (!desc[i].dma_address_valid) {
335 tx_pkt->mem.phys_base =
336 dma_map_single(ipa3_ctx->pdev,
337 tx_pkt->mem.base,
338 tx_pkt->mem.size,
339 DMA_TO_DEVICE);
340 if (!tx_pkt->mem.phys_base) {
341 IPAERR("failed to do dma map.\n");
342 fail_dma_wrap = 1;
343 goto failure;
344 }
345 } else {
346 tx_pkt->mem.phys_base =
347 desc[i].dma_address;
348 tx_pkt->no_unmap_dma = true;
349 }
350 } else {
351 tx_pkt->mem.base = desc[i].frag;
352 tx_pkt->mem.size = desc[i].len;
353
354 if (!desc[i].dma_address_valid) {
355 tx_pkt->mem.phys_base =
356 skb_frag_dma_map(ipa3_ctx->pdev,
357 desc[i].frag,
358 0, tx_pkt->mem.size,
359 DMA_TO_DEVICE);
360 if (!tx_pkt->mem.phys_base) {
361 IPAERR("dma map failed\n");
362 fail_dma_wrap = 1;
363 goto failure;
364 }
365 } else {
366 tx_pkt->mem.phys_base =
367 desc[i].dma_address;
368 tx_pkt->no_unmap_dma = true;
369 }
370 }
371 tx_pkt->sys = sys;
372 tx_pkt->callback = desc[i].callback;
373 tx_pkt->user1 = desc[i].user1;
374 tx_pkt->user2 = desc[i].user2;
375
376 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
377
Amir Levya59ed3f2017-03-05 17:30:55 +0200378 gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
Amir Levy9659e592016-10-27 18:08:27 +0300379
Amir Levya59ed3f2017-03-05 17:30:55 +0200380 /*
381 * Special treatment for immediate commands, where
382 * the structure of the descriptor is different
383 */
384 if (desc[i].type == IPA_IMM_CMD_DESC) {
385 gsi_xfer_elem_array[i].len = desc[i].opcode;
386 gsi_xfer_elem_array[i].type =
387 GSI_XFER_ELEM_IMME_CMD;
Amir Levy9659e592016-10-27 18:08:27 +0300388 } else {
Amir Levya59ed3f2017-03-05 17:30:55 +0200389 gsi_xfer_elem_array[i].len = desc[i].len;
390 gsi_xfer_elem_array[i].type =
391 GSI_XFER_ELEM_DATA;
Amir Levy9659e592016-10-27 18:08:27 +0300392 }
Amir Levya59ed3f2017-03-05 17:30:55 +0200393
394 if (i == (num_desc - 1)) {
Skylar Changd407e592017-03-30 11:25:30 -0700395 if (!sys->use_comm_evt_ring) {
396 gsi_xfer_elem_array[i].flags |=
397 GSI_XFER_FLAG_EOT;
Ghanim Fodic6b67492017-03-15 14:19:56 +0200398 gsi_xfer_elem_array[i].flags |=
399 GSI_XFER_FLAG_BEI;
Skylar Changd407e592017-03-30 11:25:30 -0700400 }
Amir Levya59ed3f2017-03-05 17:30:55 +0200401 gsi_xfer_elem_array[i].xfer_user_data =
402 tx_pkt_first;
Skylar Changd407e592017-03-30 11:25:30 -0700403 } else {
404 gsi_xfer_elem_array[i].flags |=
405 GSI_XFER_FLAG_CHAIN;
406 }
Amir Levy9659e592016-10-27 18:08:27 +0300407 }
408
Skylar Changd407e592017-03-30 11:25:30 -0700409 IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
Amir Levya59ed3f2017-03-05 17:30:55 +0200410 result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
411 gsi_xfer_elem_array, true);
412 if (result != GSI_STATUS_SUCCESS) {
413 IPAERR("GSI xfer failed.\n");
414 goto failure;
Amir Levy9659e592016-10-27 18:08:27 +0300415 }
Amir Levya59ed3f2017-03-05 17:30:55 +0200416 kfree(gsi_xfer_elem_array);
Amir Levy9659e592016-10-27 18:08:27 +0300417
Skylar Changd407e592017-03-30 11:25:30 -0700418 kfree(gsi_xfer_elem_array);
Amir Levy9659e592016-10-27 18:08:27 +0300419 spin_unlock_bh(&sys->spinlock);
Skylar Changd407e592017-03-30 11:25:30 -0700420
421 /* set the timer for sending the NOP descriptor */
422 if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
423 ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
424
425 IPADBG_LOW("scheduling timer for ch %lu\n",
426 sys->ep->gsi_chan_hdl);
427 hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
428 }
429
Amir Levy9659e592016-10-27 18:08:27 +0300430 return 0;
431
432failure:
433 ipahal_destroy_imm_cmd(tag_pyld_ret);
434 tx_pkt = tx_pkt_first;
435 for (j = 0; j < i; j++) {
436 next_pkt = list_next_entry(tx_pkt, link);
437 list_del(&tx_pkt->link);
438 if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
439 dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
440 tx_pkt->mem.size,
441 DMA_TO_DEVICE);
442 } else {
443 dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
444 tx_pkt->mem.size,
445 DMA_TO_DEVICE);
446 }
447 kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
448 tx_pkt = next_pkt;
449 }
450 if (j < num_desc)
451 /* last desc failed */
452 if (fail_dma_wrap)
453 kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
454
Amir Levya59ed3f2017-03-05 17:30:55 +0200455 kfree(gsi_xfer_elem_array);
456
Amir Levy9659e592016-10-27 18:08:27 +0300457 spin_unlock_bh(&sys->spinlock);
458 return -EFAULT;
459}
460
461/**
Skylar Changd407e592017-03-30 11:25:30 -0700462 * ipa3_send_one() - Send a single descriptor
463 * @sys: system pipe context
464 * @desc: descriptor to send
465 * @in_atomic: whether caller is in atomic context
466 *
467 * - Allocate tx_packet wrapper
468 * - transfer data to the IPA
469 * - after the transfer was done the SPS will
470 * notify the sending user via ipa_sps_irq_comp_tx()
471 *
472 * Return codes: 0: success, -EFAULT: failure
473 */
474int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
475 bool in_atomic)
476{
477 return ipa3_send(sys, 1, desc, in_atomic);
478}
479
480/**
Amir Levy9659e592016-10-27 18:08:27 +0300481 * ipa3_transport_irq_cmd_ack - callback function which will be called by
Amir Levya59ed3f2017-03-05 17:30:55 +0200482 * the transport driver after an immediate command is complete.
Amir Levy9659e592016-10-27 18:08:27 +0300483 * @user1: pointer to the descriptor of the transfer
484 * @user2:
485 *
486 * Complete the immediate commands completion object, this will release the
487 * thread which waits on this completion object (ipa3_send_cmd())
488 */
489static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
490{
491 struct ipa3_desc *desc = (struct ipa3_desc *)user1;
492
493 if (!desc) {
494 IPAERR("desc is NULL\n");
495 WARN_ON(1);
496 return;
497 }
498 IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
499 complete(&desc->xfer_done);
500}
501
502/**
Skylar Chang19ab2322016-10-28 14:22:59 -0700503 * ipa3_transport_irq_cmd_ack_free - callback function which will be
Amir Levya59ed3f2017-03-05 17:30:55 +0200504 * called by the transport driver after an immediate command is complete.
Skylar Chang19ab2322016-10-28 14:22:59 -0700505 * This function will also free the completion object once it is done.
506 * @tag_comp: pointer to the completion object
507 * @ignored: parameter not used
508 *
509 * Complete the immediate commands completion object, this will release the
510 * thread which waits on this completion object (ipa3_send_cmd())
511 */
512static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
513{
514 struct ipa3_tag_completion *comp = tag_comp;
515
516 if (!comp) {
517 IPAERR("comp is NULL\n");
518 return;
519 }
520
521 complete(&comp->comp);
522 if (atomic_dec_return(&comp->cnt) == 0)
523 kfree(comp);
524}
525
526/**
Amir Levy9659e592016-10-27 18:08:27 +0300527 * ipa3_send_cmd - send immediate commands
528 * @num_desc: number of descriptors within the desc struct
529 * @descr: descriptor structure
530 *
531 * Function will block till command gets ACK from IPA HW, caller needs
532 * to free any resources it allocated after function returns
533 * The callback in ipa3_desc should not be set by the caller
534 * for this function.
535 */
536int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
537{
Skylar Chang19ab2322016-10-28 14:22:59 -0700538 struct ipa3_desc *desc;
539 int i, result = 0;
540 struct ipa3_sys_context *sys;
541 int ep_idx;
542
543 for (i = 0; i < num_desc; i++)
544 IPADBG("sending imm cmd %d\n", descr[i].opcode);
545
546 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
547 if (-1 == ep_idx) {
548 IPAERR("Client %u is not mapped\n",
549 IPA_CLIENT_APPS_CMD_PROD);
550 return -EFAULT;
551 }
552
553 sys = ipa3_ctx->ep[ep_idx].sys;
554 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
555
556 if (num_desc == 1) {
557 init_completion(&descr->xfer_done);
558
559 if (descr->callback || descr->user1)
560 WARN_ON(1);
561
562 descr->callback = ipa3_transport_irq_cmd_ack;
563 descr->user1 = descr;
564 if (ipa3_send_one(sys, descr, true)) {
565 IPAERR("fail to send immediate command\n");
566 result = -EFAULT;
567 goto bail;
568 }
569 wait_for_completion(&descr->xfer_done);
570 } else {
571 desc = &descr[num_desc - 1];
572 init_completion(&desc->xfer_done);
573
574 if (desc->callback || desc->user1)
575 WARN_ON(1);
576
577 desc->callback = ipa3_transport_irq_cmd_ack;
578 desc->user1 = desc;
579 if (ipa3_send(sys, num_desc, descr, true)) {
580 IPAERR("fail to send multiple immediate command set\n");
581 result = -EFAULT;
582 goto bail;
583 }
584 wait_for_completion(&desc->xfer_done);
585 }
586
587bail:
588 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
589 return result;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200590}
591
592/**
593 * ipa3_send_cmd_timeout - send immediate commands with limited time
594 * waiting for ACK from IPA HW
595 * @num_desc: number of descriptors within the desc struct
596 * @descr: descriptor structure
597 * @timeout: millisecond to wait till get ACK from IPA HW
598 *
599 * Function will block till command gets ACK from IPA HW or timeout.
600 * Caller needs to free any resources it allocated after function returns
601 * The callback in ipa3_desc should not be set by the caller
602 * for this function.
603 */
604int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
605{
Amir Levy9659e592016-10-27 18:08:27 +0300606 struct ipa3_desc *desc;
607 int i, result = 0;
608 struct ipa3_sys_context *sys;
609 int ep_idx;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200610 int completed;
Skylar Chang19ab2322016-10-28 14:22:59 -0700611 struct ipa3_tag_completion *comp;
Amir Levy9659e592016-10-27 18:08:27 +0300612
613 for (i = 0; i < num_desc; i++)
614 IPADBG("sending imm cmd %d\n", descr[i].opcode);
615
616 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
617 if (-1 == ep_idx) {
618 IPAERR("Client %u is not mapped\n",
619 IPA_CLIENT_APPS_CMD_PROD);
620 return -EFAULT;
621 }
Skylar Chang19ab2322016-10-28 14:22:59 -0700622
623 comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
624 if (!comp) {
625 IPAERR("no mem\n");
626 return -ENOMEM;
627 }
628 init_completion(&comp->comp);
629
630 /* completion needs to be released from both here and in ack callback */
631 atomic_set(&comp->cnt, 2);
632
Amir Levy9659e592016-10-27 18:08:27 +0300633 sys = ipa3_ctx->ep[ep_idx].sys;
634 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
635
636 if (num_desc == 1) {
Amir Levy9659e592016-10-27 18:08:27 +0300637 if (descr->callback || descr->user1)
638 WARN_ON(1);
639
Skylar Chang19ab2322016-10-28 14:22:59 -0700640 descr->callback = ipa3_transport_irq_cmd_ack_free;
641 descr->user1 = comp;
Amir Levy9659e592016-10-27 18:08:27 +0300642 if (ipa3_send_one(sys, descr, true)) {
643 IPAERR("fail to send immediate command\n");
Skylar Chang19ab2322016-10-28 14:22:59 -0700644 kfree(comp);
Amir Levy9659e592016-10-27 18:08:27 +0300645 result = -EFAULT;
646 goto bail;
647 }
Amir Levy9659e592016-10-27 18:08:27 +0300648 } else {
649 desc = &descr[num_desc - 1];
Amir Levy9659e592016-10-27 18:08:27 +0300650
651 if (desc->callback || desc->user1)
652 WARN_ON(1);
653
Skylar Chang19ab2322016-10-28 14:22:59 -0700654 desc->callback = ipa3_transport_irq_cmd_ack_free;
655 desc->user1 = comp;
Amir Levy9659e592016-10-27 18:08:27 +0300656 if (ipa3_send(sys, num_desc, descr, true)) {
657 IPAERR("fail to send multiple immediate command set\n");
Skylar Chang19ab2322016-10-28 14:22:59 -0700658 kfree(comp);
Amir Levy9659e592016-10-27 18:08:27 +0300659 result = -EFAULT;
660 goto bail;
661 }
Amir Levy9659e592016-10-27 18:08:27 +0300662 }
663
Skylar Chang19ab2322016-10-28 14:22:59 -0700664 completed = wait_for_completion_timeout(
665 &comp->comp, msecs_to_jiffies(timeout));
666 if (!completed)
667 IPADBG("timeout waiting for imm-cmd ACK\n");
668
669 if (atomic_dec_return(&comp->cnt) == 0)
670 kfree(comp);
671
Amir Levy9659e592016-10-27 18:08:27 +0300672bail:
673 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
674 return result;
675}
676
677/**
Amir Levy9659e592016-10-27 18:08:27 +0300678 * ipa3_handle_rx_core() - The core functionality of packet reception. This
679 * function is read from multiple code paths.
680 *
681 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
682 * endpoint. The function runs as long as there are packets in the pipe.
683 * For each packet:
684 * - Disconnect the packet from the system pipe linked list
685 * - Unmap the packets skb, make it non DMAable
686 * - Free the packet from the cache
687 * - Prepare a proper skb
688 * - Call the endpoints notify function, passing the skb in the parameters
689 * - Replenish the rx cache
690 */
691static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
692 bool in_poll_state)
693{
Amir Levya59ed3f2017-03-05 17:30:55 +0200694 int ret;
695 int cnt = 0;
696 struct ipa_mem_buffer mem_info = { 0 };
Amir Levy9659e592016-10-27 18:08:27 +0300697
Amir Levya59ed3f2017-03-05 17:30:55 +0200698 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
699 !atomic_read(&sys->curr_polling_state))) {
700 if (cnt && !process_all)
701 break;
Amir Levy9659e592016-10-27 18:08:27 +0300702
Amir Levya59ed3f2017-03-05 17:30:55 +0200703 ret = ipa_poll_gsi_pkt(sys, &mem_info);
704 if (ret)
705 break;
706
707 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
708 ipa3_dma_memcpy_notify(sys, &mem_info);
709 else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
710 ipa3_wlan_wq_rx_common(sys, mem_info.size);
711 else
712 ipa3_wq_rx_common(sys, mem_info.size);
713
714 ++cnt;
715 }
Amir Levy9659e592016-10-27 18:08:27 +0300716 return cnt;
717}
718
719/**
720 * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
721 */
722static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
723{
724 int ret;
725
Amir Levya59ed3f2017-03-05 17:30:55 +0200726 if (!atomic_read(&sys->curr_polling_state)) {
727 IPAERR("already in intr mode\n");
728 goto fail;
729 }
730 atomic_set(&sys->curr_polling_state, 0);
731 ipa3_dec_release_wakelock();
732 ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
733 GSI_CHAN_MODE_CALLBACK);
734 if (ret != GSI_STATUS_SUCCESS) {
735 IPAERR("Failed to switch to intr mode.\n");
736 goto fail;
Amir Levy9659e592016-10-27 18:08:27 +0300737 }
738 return;
739
740fail:
741 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
742 msecs_to_jiffies(1));
743}
744
745/**
Amir Levy9659e592016-10-27 18:08:27 +0300746 * ipa3_handle_rx() - handle packet reception. This function is executed in the
747 * context of a work queue.
748 * @work: work struct needed by the work queue
749 *
750 * ipa3_handle_rx_core() is run in polling mode. After all packets has been
751 * received, the driver switches back to interrupt mode.
752 */
753static void ipa3_handle_rx(struct ipa3_sys_context *sys)
754{
755 int inactive_cycles = 0;
756 int cnt;
757
758 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
759 do {
760 cnt = ipa3_handle_rx_core(sys, true, true);
Skylar Changd407e592017-03-30 11:25:30 -0700761 if (cnt == 0)
Amir Levy9659e592016-10-27 18:08:27 +0300762 inactive_cycles++;
Skylar Changd407e592017-03-30 11:25:30 -0700763 else
Amir Levy9659e592016-10-27 18:08:27 +0300764 inactive_cycles = 0;
Skylar Changd407e592017-03-30 11:25:30 -0700765
766 trace_idle_sleep_enter3(sys->ep->client);
767 usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
768 trace_idle_sleep_exit3(sys->ep->client);
Amir Levy9659e592016-10-27 18:08:27 +0300769 } while (inactive_cycles <= POLLING_INACTIVITY_RX);
770
771 trace_poll_to_intr3(sys->ep->client);
772 ipa3_rx_switch_to_intr_mode(sys);
773 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
774}
775
776static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
777{
778 struct delayed_work *dwork;
779 struct ipa3_sys_context *sys;
780
781 dwork = container_of(work, struct delayed_work, work);
782 sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
783
784 if (sys->ep->napi_enabled) {
785 if (sys->ep->switch_to_intr) {
786 ipa3_rx_switch_to_intr_mode(sys);
787 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
788 sys->ep->switch_to_intr = false;
789 sys->ep->inactive_cycles = 0;
790 } else
791 sys->ep->client_notify(sys->ep->priv,
792 IPA_CLIENT_START_POLL, 0);
793 } else
794 ipa3_handle_rx(sys);
795}
796
Skylar Changd407e592017-03-30 11:25:30 -0700797enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
798{
799 struct ipa3_sys_context *sys = container_of(param,
800 struct ipa3_sys_context, db_timer);
801
802 queue_work(sys->wq, &sys->work);
803 return HRTIMER_NORESTART;
804}
805
Amir Levy9659e592016-10-27 18:08:27 +0300806/**
Amir Levya59ed3f2017-03-05 17:30:55 +0200807 * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
Amir Levy9659e592016-10-27 18:08:27 +0300808 * IPA EP configuration
Amir Levya59ed3f2017-03-05 17:30:55 +0200809 * @sys_in: [in] input needed to setup the pipe and configure EP
Amir Levy9659e592016-10-27 18:08:27 +0300810 * @clnt_hdl: [out] client handle
811 *
812 * - configure the end-point registers with the supplied
813 * parameters from the user.
Amir Levya59ed3f2017-03-05 17:30:55 +0200814 * - Creates a GPI connection with IPA.
Amir Levy9659e592016-10-27 18:08:27 +0300815 * - allocate descriptor FIFO
Amir Levy9659e592016-10-27 18:08:27 +0300816 *
817 * Returns: 0 on success, negative on failure
818 */
819int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
820{
821 struct ipa3_ep_context *ep;
822 int ipa_ep_idx;
823 int result = -EINVAL;
Amir Levy9659e592016-10-27 18:08:27 +0300824 char buff[IPA_RESOURCE_NAME_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300825
826 if (sys_in == NULL || clnt_hdl == NULL) {
827 IPAERR("NULL args\n");
828 goto fail_gen;
829 }
830
831 if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
832 IPAERR("bad parm client:%d fifo_sz:%d\n",
833 sys_in->client, sys_in->desc_fifo_sz);
834 goto fail_gen;
835 }
836
837 ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
838 if (ipa_ep_idx == -1) {
839 IPAERR("Invalid client.\n");
840 goto fail_gen;
841 }
842
843 ep = &ipa3_ctx->ep[ipa_ep_idx];
Amir Levy9659e592016-10-27 18:08:27 +0300844 if (ep->valid == 1) {
Ghanim Fodic6b67492017-03-15 14:19:56 +0200845 IPAERR("EP %d already allocated.\n", ipa_ep_idx);
846 goto fail_gen;
Amir Levy9659e592016-10-27 18:08:27 +0300847 }
848
Ghanim Fodic6b67492017-03-15 14:19:56 +0200849 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
Amir Levy9659e592016-10-27 18:08:27 +0300850 memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
851
852 if (!ep->sys) {
853 ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
854 if (!ep->sys) {
855 IPAERR("failed to sys ctx for client %d\n",
856 sys_in->client);
857 result = -ENOMEM;
858 goto fail_and_disable_clocks;
859 }
860
861 ep->sys->ep = ep;
862 snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
863 sys_in->client);
864 ep->sys->wq = alloc_workqueue(buff,
865 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
866 if (!ep->sys->wq) {
867 IPAERR("failed to create wq for client %d\n",
868 sys_in->client);
869 result = -EFAULT;
870 goto fail_wq;
871 }
872
873 snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
874 sys_in->client);
875 ep->sys->repl_wq = alloc_workqueue(buff,
876 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
877 if (!ep->sys->repl_wq) {
878 IPAERR("failed to create rep wq for client %d\n",
879 sys_in->client);
880 result = -EFAULT;
881 goto fail_wq2;
882 }
883
884 INIT_LIST_HEAD(&ep->sys->head_desc_list);
885 INIT_LIST_HEAD(&ep->sys->rcycl_list);
886 spin_lock_init(&ep->sys->spinlock);
Skylar Changd407e592017-03-30 11:25:30 -0700887 hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
888 HRTIMER_MODE_REL);
889 ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
Amir Levy9659e592016-10-27 18:08:27 +0300890 } else {
891 memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
892 }
893
894 ep->skip_ep_cfg = sys_in->skip_ep_cfg;
895 if (ipa3_assign_policy(sys_in, ep->sys)) {
896 IPAERR("failed to sys ctx for client %d\n", sys_in->client);
897 result = -ENOMEM;
898 goto fail_gen2;
899 }
900
901 ep->valid = 1;
902 ep->client = sys_in->client;
903 ep->client_notify = sys_in->notify;
904 ep->napi_enabled = sys_in->napi_enabled;
905 ep->priv = sys_in->priv;
906 ep->keep_ipa_awake = sys_in->keep_ipa_awake;
907 atomic_set(&ep->avail_fifo_desc,
Amir Levya59ed3f2017-03-05 17:30:55 +0200908 ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
Amir Levy9659e592016-10-27 18:08:27 +0300909
910 if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
911 ep->sys->status_stat == NULL) {
912 ep->sys->status_stat =
913 kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
914 if (!ep->sys->status_stat) {
915 IPAERR("no memory\n");
916 goto fail_gen2;
917 }
918 }
919
Amir Levy9659e592016-10-27 18:08:27 +0300920 if (!ep->skip_ep_cfg) {
921 if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
922 IPAERR("fail to configure EP.\n");
923 goto fail_gen2;
924 }
925 if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
926 IPAERR("fail to configure status of EP.\n");
927 goto fail_gen2;
928 }
Ghanim Fodic6b67492017-03-15 14:19:56 +0200929 IPADBG("ep %d configuration successful\n", ipa_ep_idx);
Amir Levy9659e592016-10-27 18:08:27 +0300930 } else {
Ghanim Fodic6b67492017-03-15 14:19:56 +0200931 IPADBG("skipping ep %d configuration\n", ipa_ep_idx);
Amir Levy9659e592016-10-27 18:08:27 +0300932 }
933
Amir Levya59ed3f2017-03-05 17:30:55 +0200934 result = ipa_gsi_setup_channel(sys_in, ep);
935 if (result) {
936 IPAERR("Failed to setup GSI channel\n");
937 goto fail_gen2;
938 }
Amir Levy9659e592016-10-27 18:08:27 +0300939
940 *clnt_hdl = ipa_ep_idx;
941
942 if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
943 ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
944 ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
945 sizeof(void *), GFP_KERNEL);
946 if (!ep->sys->repl.cache) {
947 IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
948 ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
949 ep->sys->repl.capacity = 0;
950 } else {
951 atomic_set(&ep->sys->repl.head_idx, 0);
952 atomic_set(&ep->sys->repl.tail_idx, 0);
953 ipa3_wq_repl_rx(&ep->sys->repl_work);
954 }
955 }
956
957 if (IPA_CLIENT_IS_CONS(sys_in->client))
958 ipa3_replenish_rx_cache(ep->sys);
959
960 if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
961 ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
962 atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
963 }
964
965 ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
966 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
967 if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
Ghanim Fodic6b67492017-03-15 14:19:56 +0200968 sys_in->client == IPA_CLIENT_APPS_WAN_PROD)
Amir Levy9659e592016-10-27 18:08:27 +0300969 IPADBG("modem cfg emb pipe flt\n");
970 else
971 ipa3_install_dflt_flt_rules(ipa_ep_idx);
972 }
973
Skylar Chang652ee8e2017-02-10 11:40:30 -0800974 result = ipa3_enable_data_path(ipa_ep_idx);
975 if (result) {
976 IPAERR("enable data path failed res=%d ep=%d.\n", result,
977 ipa_ep_idx);
978 goto fail_gen2;
979 }
980
Amir Levy9659e592016-10-27 18:08:27 +0300981 if (!ep->keep_ipa_awake)
982 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
983
984 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
985 ipa_ep_idx, ep->sys);
986
987 return 0;
988
Amir Levy9659e592016-10-27 18:08:27 +0300989fail_gen2:
990 destroy_workqueue(ep->sys->repl_wq);
991fail_wq2:
992 destroy_workqueue(ep->sys->wq);
993fail_wq:
994 kfree(ep->sys);
995 memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
996fail_and_disable_clocks:
997 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
998fail_gen:
999 return result;
1000}
1001
1002/**
Amir Levya59ed3f2017-03-05 17:30:55 +02001003 * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
Amir Levy9659e592016-10-27 18:08:27 +03001004 * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
1005 *
1006 * Returns: 0 on success, negative on failure
1007 */
1008int ipa3_teardown_sys_pipe(u32 clnt_hdl)
1009{
1010 struct ipa3_ep_context *ep;
1011 int empty;
1012 int result;
1013
1014 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1015 ipa3_ctx->ep[clnt_hdl].valid == 0) {
1016 IPAERR("bad parm.\n");
1017 return -EINVAL;
1018 }
1019
1020 ep = &ipa3_ctx->ep[clnt_hdl];
1021
1022 if (!ep->keep_ipa_awake)
1023 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1024
1025 ipa3_disable_data_path(clnt_hdl);
1026 if (ep->napi_enabled) {
1027 ep->switch_to_intr = true;
1028 do {
1029 usleep_range(95, 105);
1030 } while (atomic_read(&ep->sys->curr_polling_state));
1031 }
1032
1033 if (IPA_CLIENT_IS_PROD(ep->client)) {
1034 do {
1035 spin_lock_bh(&ep->sys->spinlock);
1036 empty = list_empty(&ep->sys->head_desc_list);
1037 spin_unlock_bh(&ep->sys->spinlock);
1038 if (!empty)
1039 usleep_range(95, 105);
1040 else
1041 break;
1042 } while (1);
1043 }
1044
1045 if (IPA_CLIENT_IS_CONS(ep->client))
1046 cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
1047 flush_workqueue(ep->sys->wq);
Amir Levya59ed3f2017-03-05 17:30:55 +02001048 result = ipa3_stop_gsi_channel(clnt_hdl);
1049 if (result != GSI_STATUS_SUCCESS) {
1050 IPAERR("GSI stop chan err: %d.\n", result);
1051 ipa_assert();
1052 return result;
1053 }
1054 result = gsi_reset_channel(ep->gsi_chan_hdl);
1055 if (result != GSI_STATUS_SUCCESS) {
1056 IPAERR("Failed to reset chan: %d.\n", result);
1057 ipa_assert();
1058 return result;
1059 }
1060 dma_free_coherent(ipa3_ctx->pdev,
1061 ep->gsi_mem_info.chan_ring_len,
1062 ep->gsi_mem_info.chan_ring_base_vaddr,
1063 ep->gsi_mem_info.chan_ring_base_addr);
1064 result = gsi_dealloc_channel(ep->gsi_chan_hdl);
1065 if (result != GSI_STATUS_SUCCESS) {
1066 IPAERR("Failed to dealloc chan: %d.\n", result);
1067 ipa_assert();
1068 return result;
1069 }
1070
1071 /* free event ring only when it is present */
Skylar Changd407e592017-03-30 11:25:30 -07001072 if (ep->sys->use_comm_evt_ring) {
1073 ipa3_ctx->gsi_evt_comm_ring_rem +=
1074 ep->gsi_mem_info.chan_ring_len;
1075 } else if (ep->gsi_evt_ring_hdl != ~0) {
Amir Levya59ed3f2017-03-05 17:30:55 +02001076 result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03001077 if (result != GSI_STATUS_SUCCESS) {
Amir Levya59ed3f2017-03-05 17:30:55 +02001078 IPAERR("Failed to reset evt ring: %d.\n",
1079 result);
Amir Levy9659e592016-10-27 18:08:27 +03001080 BUG();
1081 return result;
1082 }
1083 dma_free_coherent(ipa3_ctx->pdev,
Amir Levya59ed3f2017-03-05 17:30:55 +02001084 ep->gsi_mem_info.evt_ring_len,
1085 ep->gsi_mem_info.evt_ring_base_vaddr,
1086 ep->gsi_mem_info.evt_ring_base_addr);
1087 result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03001088 if (result != GSI_STATUS_SUCCESS) {
Amir Levya59ed3f2017-03-05 17:30:55 +02001089 IPAERR("Failed to dealloc evt ring: %d.\n",
1090 result);
Amir Levy9659e592016-10-27 18:08:27 +03001091 BUG();
1092 return result;
1093 }
Amir Levy9659e592016-10-27 18:08:27 +03001094 }
1095 if (ep->sys->repl_wq)
1096 flush_workqueue(ep->sys->repl_wq);
1097 if (IPA_CLIENT_IS_CONS(ep->client))
1098 ipa3_cleanup_rx(ep->sys);
1099
1100 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
1101 if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
Ghanim Fodic6b67492017-03-15 14:19:56 +02001102 ep->client == IPA_CLIENT_APPS_WAN_PROD)
Amir Levy9659e592016-10-27 18:08:27 +03001103 IPADBG("modem cfg emb pipe flt\n");
1104 else
1105 ipa3_delete_dflt_flt_rules(clnt_hdl);
1106 }
1107
1108 if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
1109 atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
1110
1111 memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
1112
1113 if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
1114 ipa3_cleanup_wlan_rx_common_cache();
1115
1116 ep->valid = 0;
1117 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1118
1119 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
1120
1121 return 0;
1122}
1123
1124/**
1125 * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
1126 * user supplied callback function to release the skb, or release it on
1127 * its own if no callback function was supplied.
1128 * @user1
1129 * @user2
1130 *
1131 * This notified callback is for the destination client.
Amir Levy9659e592016-10-27 18:08:27 +03001132 */
1133static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
1134{
1135 struct sk_buff *skb = (struct sk_buff *)user1;
1136 int ep_idx = user2;
1137
1138 IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx);
1139
1140 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
1141
1142 if (ipa3_ctx->ep[ep_idx].client_notify)
1143 ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
1144 IPA_WRITE_DONE, (unsigned long)skb);
1145 else
1146 dev_kfree_skb_any(skb);
1147}
1148
Skylar Changcd3902d2017-03-27 18:08:27 -07001149void ipa3_tx_cmd_comp(void *user1, int user2)
Amir Levy9659e592016-10-27 18:08:27 +03001150{
1151 ipahal_destroy_imm_cmd(user1);
1152}
1153
1154/**
1155 * ipa3_tx_dp() - Data-path tx handler
1156 * @dst: [in] which IPA destination to route tx packets to
1157 * @skb: [in] the packet to send
1158 * @metadata: [in] TX packet meta-data
1159 *
1160 * Data-path tx handler, this is used for both SW data-path which by-passes most
1161 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
1162 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
1163 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
1164 * is an error. For errors, client needs to free the skb as needed. For success,
1165 * IPA driver will later invoke client callback if one was supplied. That
1166 * callback should free the skb. If no callback supplied, IPA driver will free
1167 * the skb internally
1168 *
1169 * The function will use two descriptors for this send command
1170 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
1171 * the first descriptor will be used to inform the IPA hardware that
1172 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
Amir Levya59ed3f2017-03-05 17:30:55 +02001173 * Once this send was done from transport point-of-view the IPA driver will
1174 * get notified by the supplied callback.
Amir Levy9659e592016-10-27 18:08:27 +03001175 *
1176 * Returns: 0 on success, negative on failure
1177 */
1178int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
1179 struct ipa_tx_meta *meta)
1180{
1181 struct ipa3_desc *desc;
1182 struct ipa3_desc _desc[3];
1183 int dst_ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03001184 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
1185 struct ipa3_sys_context *sys;
1186 int src_ep_idx;
1187 int num_frags, f;
Amir Levy3be373c2017-03-05 16:31:30 +02001188 const struct ipa_gsi_ep_config *gsi_ep;
Ghanim Fodic6b67492017-03-15 14:19:56 +02001189 int data_idx;
Amir Levy9659e592016-10-27 18:08:27 +03001190
1191 if (unlikely(!ipa3_ctx)) {
1192 IPAERR("IPA3 driver was not initialized\n");
1193 return -EINVAL;
1194 }
1195
1196 if (skb->len == 0) {
1197 IPAERR("packet size is 0\n");
1198 return -EINVAL;
1199 }
1200
Amir Levy9659e592016-10-27 18:08:27 +03001201 /*
1202 * USB_CONS: PKT_INIT ep_idx = dst pipe
1203 * Q6_CONS: PKT_INIT ep_idx = sender pipe
1204 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
1205 *
1206 * LAN TX: all PKT_INIT
1207 * WAN TX: PKT_INIT (cmd) + HW (data)
1208 *
1209 */
1210 if (IPA_CLIENT_IS_CONS(dst)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001211 src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
Amir Levy9659e592016-10-27 18:08:27 +03001212 if (-1 == src_ep_idx) {
1213 IPAERR("Client %u is not mapped\n",
Ghanim Fodic6b67492017-03-15 14:19:56 +02001214 IPA_CLIENT_APPS_LAN_PROD);
Amir Levy9659e592016-10-27 18:08:27 +03001215 goto fail_gen;
1216 }
1217 dst_ep_idx = ipa3_get_ep_mapping(dst);
1218 } else {
1219 src_ep_idx = ipa3_get_ep_mapping(dst);
1220 if (-1 == src_ep_idx) {
1221 IPAERR("Client %u is not mapped\n", dst);
1222 goto fail_gen;
1223 }
1224 if (meta && meta->pkt_init_dst_ep_valid)
1225 dst_ep_idx = meta->pkt_init_dst_ep;
1226 else
1227 dst_ep_idx = -1;
1228 }
1229
1230 sys = ipa3_ctx->ep[src_ep_idx].sys;
1231
1232 if (!sys->ep->valid) {
1233 IPAERR("pipe not valid\n");
1234 goto fail_gen;
1235 }
1236
Skylar Chang6adfc142016-11-08 09:56:24 -08001237 num_frags = skb_shinfo(skb)->nr_frags;
1238 /*
1239 * make sure TLV FIFO supports the needed frags.
1240 * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
1241 * 1 descriptor needed for the linear portion of skb.
1242 */
Amir Levy3be373c2017-03-05 16:31:30 +02001243 gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
Skylar Chang6adfc142016-11-08 09:56:24 -08001244 if (gsi_ep && (num_frags + 3 > gsi_ep->ipa_if_tlv)) {
1245 if (skb_linearize(skb)) {
1246 IPAERR("Failed to linear skb with %d frags\n",
1247 num_frags);
1248 goto fail_gen;
1249 }
1250 num_frags = 0;
1251 }
1252 if (num_frags) {
1253 /* 1 desc for tag to resolve status out-of-order issue;
1254 * 1 desc is needed for the linear portion of skb;
1255 * 1 desc may be needed for the PACKET_INIT;
1256 * 1 desc for each frag
1257 */
1258 desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
1259 if (!desc) {
1260 IPAERR("failed to alloc desc array\n");
1261 goto fail_gen;
1262 }
1263 } else {
1264 memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
1265 desc = &_desc[0];
1266 }
1267
Amir Levy9659e592016-10-27 18:08:27 +03001268 if (dst_ep_idx != -1) {
1269 /* SW data path */
Skylar Changd407e592017-03-30 11:25:30 -07001270 data_idx = 0;
1271 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
1272 /*
1273 * For non-interrupt mode channel (where there is no
1274 * event ring) TAG STATUS are used for completion
1275 * notification. IPA will generate a status packet with
1276 * tag info as a result of the TAG STATUS command.
1277 */
1278 desc[data_idx].opcode =
1279 ipahal_imm_cmd_get_opcode(
1280 IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
1281 desc[data_idx].type = IPA_IMM_CMD_DESC;
1282 desc[data_idx].callback = ipa3_tag_destroy_imm;
1283 data_idx++;
1284 }
1285 desc[data_idx].opcode =
Amir Levy9659e592016-10-27 18:08:27 +03001286 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
Skylar Changcd3902d2017-03-27 18:08:27 -07001287 desc[data_idx].dma_address_valid = true;
1288 desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
Skylar Changd407e592017-03-30 11:25:30 -07001289 desc[data_idx].type = IPA_IMM_CMD_DESC;
Skylar Changcd3902d2017-03-27 18:08:27 -07001290 desc[data_idx].callback = NULL;
Skylar Changd407e592017-03-30 11:25:30 -07001291 data_idx++;
1292 desc[data_idx].pyld = skb->data;
1293 desc[data_idx].len = skb_headlen(skb);
1294 desc[data_idx].type = IPA_DATA_DESC_SKB;
1295 desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
1296 desc[data_idx].user1 = skb;
1297 desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
Amir Levy9659e592016-10-27 18:08:27 +03001298 meta->pkt_init_dst_ep_remote) ?
1299 src_ep_idx :
1300 dst_ep_idx;
1301 if (meta && meta->dma_address_valid) {
Skylar Changd407e592017-03-30 11:25:30 -07001302 desc[data_idx].dma_address_valid = true;
1303 desc[data_idx].dma_address = meta->dma_address;
Amir Levy9659e592016-10-27 18:08:27 +03001304 }
Skylar Changd407e592017-03-30 11:25:30 -07001305 data_idx++;
Amir Levy9659e592016-10-27 18:08:27 +03001306
1307 for (f = 0; f < num_frags; f++) {
Skylar Changd407e592017-03-30 11:25:30 -07001308 desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
1309 desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
1310 desc[data_idx + f].len =
1311 skb_frag_size(desc[data_idx + f].frag);
Amir Levy9659e592016-10-27 18:08:27 +03001312 }
1313 /* don't free skb till frag mappings are released */
1314 if (num_frags) {
Skylar Changd407e592017-03-30 11:25:30 -07001315 desc[data_idx + f - 1].callback = desc[2].callback;
1316 desc[data_idx + f - 1].user1 = desc[2].user1;
1317 desc[data_idx + f - 1].user2 = desc[2].user2;
1318 desc[data_idx - 1].callback = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03001319 }
1320
Skylar Changd407e592017-03-30 11:25:30 -07001321 if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001322 IPAERR("fail to send skb %p num_frags %u SWP\n",
1323 skb, num_frags);
1324 goto fail_send;
1325 }
1326 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
1327 } else {
1328 /* HW data path */
Ghanim Fodic6b67492017-03-15 14:19:56 +02001329 data_idx = 0;
1330 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
1331 /*
1332 * For non-interrupt mode channel (where there is no
1333 * event ring) TAG STATUS are used for completion
1334 * notification. IPA will generate a status packet with
1335 * tag info as a result of the TAG STATUS command.
1336 */
1337 desc[data_idx].opcode =
1338 ipahal_imm_cmd_get_opcode(
1339 IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
1340 desc[data_idx].type = IPA_IMM_CMD_DESC;
1341 desc[data_idx].callback = ipa3_tag_destroy_imm;
1342 data_idx++;
1343 }
1344 desc[data_idx].pyld = skb->data;
1345 desc[data_idx].len = skb_headlen(skb);
1346 desc[data_idx].type = IPA_DATA_DESC_SKB;
1347 desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
1348 desc[data_idx].user1 = skb;
1349 desc[data_idx].user2 = src_ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03001350
1351 if (meta && meta->dma_address_valid) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001352 desc[data_idx].dma_address_valid = true;
1353 desc[data_idx].dma_address = meta->dma_address;
Amir Levy9659e592016-10-27 18:08:27 +03001354 }
1355 if (num_frags == 0) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001356 if (ipa3_send(sys, data_idx + 1, desc, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001357 IPAERR("fail to send skb %p HWP\n", skb);
Skylar Chang6adfc142016-11-08 09:56:24 -08001358 goto fail_mem;
Amir Levy9659e592016-10-27 18:08:27 +03001359 }
1360 } else {
1361 for (f = 0; f < num_frags; f++) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001362 desc[data_idx+f+1].frag =
1363 &skb_shinfo(skb)->frags[f];
1364 desc[data_idx+f+1].type =
1365 IPA_DATA_DESC_SKB_PAGED;
1366 desc[data_idx+f+1].len =
1367 skb_frag_size(desc[data_idx+f+1].frag);
Amir Levy9659e592016-10-27 18:08:27 +03001368 }
1369 /* don't free skb till frag mappings are released */
Ghanim Fodic6b67492017-03-15 14:19:56 +02001370 desc[data_idx+f].callback = desc[data_idx].callback;
1371 desc[data_idx+f].user1 = desc[data_idx].user1;
1372 desc[data_idx+f].user2 = desc[data_idx].user2;
1373 desc[data_idx].callback = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03001374
Ghanim Fodic6b67492017-03-15 14:19:56 +02001375 if (ipa3_send(sys, num_frags + data_idx + 1,
1376 desc, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001377 IPAERR("fail to send skb %p num_frags %u HWP\n",
1378 skb, num_frags);
Skylar Chang6adfc142016-11-08 09:56:24 -08001379 goto fail_mem;
Amir Levy9659e592016-10-27 18:08:27 +03001380 }
1381 }
1382 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
1383 }
1384
1385 if (num_frags) {
1386 kfree(desc);
1387 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
1388 }
1389 return 0;
1390
1391fail_send:
1392 ipahal_destroy_imm_cmd(cmd_pyld);
Skylar Chang6adfc142016-11-08 09:56:24 -08001393fail_mem:
Amir Levy9659e592016-10-27 18:08:27 +03001394 if (num_frags)
1395 kfree(desc);
Skylar Chang6adfc142016-11-08 09:56:24 -08001396fail_gen:
Amir Levy9659e592016-10-27 18:08:27 +03001397 return -EFAULT;
1398}
1399
1400static void ipa3_wq_handle_rx(struct work_struct *work)
1401{
1402 struct ipa3_sys_context *sys;
1403
1404 sys = container_of(work, struct ipa3_sys_context, work);
1405
1406 if (sys->ep->napi_enabled) {
1407 IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
1408 sys->ep->client_notify(sys->ep->priv,
1409 IPA_CLIENT_START_POLL, 0);
1410 } else
1411 ipa3_handle_rx(sys);
1412}
1413
1414static void ipa3_wq_repl_rx(struct work_struct *work)
1415{
1416 struct ipa3_sys_context *sys;
1417 void *ptr;
1418 struct ipa3_rx_pkt_wrapper *rx_pkt;
1419 gfp_t flag = GFP_KERNEL;
1420 u32 next;
1421 u32 curr;
1422
1423 sys = container_of(work, struct ipa3_sys_context, repl_work);
1424 curr = atomic_read(&sys->repl.tail_idx);
1425
1426begin:
1427 while (1) {
1428 next = (curr + 1) % sys->repl.capacity;
1429 if (next == atomic_read(&sys->repl.head_idx))
1430 goto fail_kmem_cache_alloc;
1431
1432 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
1433 flag);
1434 if (!rx_pkt) {
1435 pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
1436 __func__, sys);
1437 goto fail_kmem_cache_alloc;
1438 }
1439
1440 INIT_LIST_HEAD(&rx_pkt->link);
1441 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1442 rx_pkt->sys = sys;
1443
1444 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1445 if (rx_pkt->data.skb == NULL) {
1446 pr_err_ratelimited("%s fail alloc skb sys=%p\n",
1447 __func__, sys);
1448 goto fail_skb_alloc;
1449 }
1450 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1451 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
1452 sys->rx_buff_sz,
1453 DMA_FROM_DEVICE);
1454 if (rx_pkt->data.dma_addr == 0 ||
1455 rx_pkt->data.dma_addr == ~0) {
1456 pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
1457 __func__, (void *)rx_pkt->data.dma_addr,
1458 ptr, sys);
1459 goto fail_dma_mapping;
1460 }
1461
1462 sys->repl.cache[curr] = rx_pkt;
1463 curr = next;
1464 /* ensure write is done before setting tail index */
1465 mb();
1466 atomic_set(&sys->repl.tail_idx, next);
1467 }
1468
1469 return;
1470
1471fail_dma_mapping:
1472 sys->free_skb(rx_pkt->data.skb);
1473fail_skb_alloc:
1474 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1475fail_kmem_cache_alloc:
1476 if (atomic_read(&sys->repl.tail_idx) ==
1477 atomic_read(&sys->repl.head_idx)) {
1478 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1479 IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
1480 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1481 IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
1482 else
1483 WARN_ON(1);
1484 pr_err_ratelimited("%s sys=%p repl ring empty\n",
1485 __func__, sys);
1486 goto begin;
1487 }
1488}
1489
1490static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
1491{
1492 struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
1493 struct ipa3_rx_pkt_wrapper *tmp;
1494 int ret;
1495 struct gsi_xfer_elem gsi_xfer_elem_one;
1496 u32 rx_len_cached = 0;
1497
1498 IPADBG_LOW("\n");
1499
1500 spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1501 rx_len_cached = sys->len;
1502
1503 if (rx_len_cached < sys->rx_pool_sz) {
1504 list_for_each_entry_safe(rx_pkt, tmp,
1505 &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
1506 list_del(&rx_pkt->link);
1507
1508 if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
1509 ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
1510
1511 INIT_LIST_HEAD(&rx_pkt->link);
1512 rx_pkt->len = 0;
1513 rx_pkt->sys = sys;
1514
1515 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
Amir Levya59ed3f2017-03-05 17:30:55 +02001516 memset(&gsi_xfer_elem_one, 0,
1517 sizeof(gsi_xfer_elem_one));
1518 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1519 gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
1520 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1521 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1522 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1523 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001524
Amir Levya59ed3f2017-03-05 17:30:55 +02001525 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
1526 &gsi_xfer_elem_one, true);
Amir Levy9659e592016-10-27 18:08:27 +03001527
1528 if (ret) {
1529 IPAERR("failed to provide buffer: %d\n", ret);
1530 goto fail_provide_rx_buffer;
1531 }
1532
1533 rx_len_cached = ++sys->len;
1534
1535 if (rx_len_cached >= sys->rx_pool_sz) {
1536 spin_unlock_bh(
1537 &ipa3_ctx->wc_memb.wlan_spinlock);
1538 return;
1539 }
1540 }
1541 }
1542 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1543
1544 if (rx_len_cached < sys->rx_pool_sz &&
1545 ipa3_ctx->wc_memb.wlan_comm_total_cnt <
1546 IPA_WLAN_COMM_RX_POOL_HIGH) {
1547 ipa3_replenish_rx_cache(sys);
1548 ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
1549 (sys->rx_pool_sz - rx_len_cached);
1550 }
1551
1552 return;
1553
1554fail_provide_rx_buffer:
1555 list_del(&rx_pkt->link);
1556 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1557}
1558
1559static void ipa3_cleanup_wlan_rx_common_cache(void)
1560{
1561 struct ipa3_rx_pkt_wrapper *rx_pkt;
1562 struct ipa3_rx_pkt_wrapper *tmp;
1563
1564 list_for_each_entry_safe(rx_pkt, tmp,
1565 &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
1566 list_del(&rx_pkt->link);
1567 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1568 IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
1569 dev_kfree_skb_any(rx_pkt->data.skb);
1570 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1571 ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
1572 ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
1573 }
1574 ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
1575
1576 if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
1577 IPAERR("wlan comm buff free cnt: %d\n",
1578 ipa3_ctx->wc_memb.wlan_comm_free_cnt);
1579
1580 if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
1581 IPAERR("wlan comm buff total cnt: %d\n",
1582 ipa3_ctx->wc_memb.wlan_comm_total_cnt);
1583
1584}
1585
1586static void ipa3_alloc_wlan_rx_common_cache(u32 size)
1587{
1588 void *ptr;
1589 struct ipa3_rx_pkt_wrapper *rx_pkt;
1590 int rx_len_cached = 0;
1591 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
1592
1593 rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
1594 while (rx_len_cached < size) {
1595 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
1596 flag);
1597 if (!rx_pkt) {
1598 IPAERR("failed to alloc rx wrapper\n");
1599 goto fail_kmem_cache_alloc;
1600 }
1601
1602 INIT_LIST_HEAD(&rx_pkt->link);
1603 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1604
1605 rx_pkt->data.skb =
1606 ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
1607 flag);
1608 if (rx_pkt->data.skb == NULL) {
1609 IPAERR("failed to alloc skb\n");
1610 goto fail_skb_alloc;
1611 }
1612 ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
1613 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
1614 IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
1615 if (rx_pkt->data.dma_addr == 0 ||
1616 rx_pkt->data.dma_addr == ~0) {
1617 IPAERR("dma_map_single failure %p for %p\n",
1618 (void *)rx_pkt->data.dma_addr, ptr);
1619 goto fail_dma_mapping;
1620 }
1621
1622 list_add_tail(&rx_pkt->link,
1623 &ipa3_ctx->wc_memb.wlan_comm_desc_list);
1624 rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
1625
1626 ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
1627
1628 }
1629
1630 return;
1631
1632fail_dma_mapping:
1633 dev_kfree_skb_any(rx_pkt->data.skb);
1634fail_skb_alloc:
1635 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1636fail_kmem_cache_alloc:
1637 return;
1638}
1639
1640
1641/**
1642 * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
1643 *
1644 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
1645 * are IPA_RX_POOL_CEIL buffers in the cache.
1646 * - Allocate a buffer in the cache
1647 * - Initialized the packets link
1648 * - Initialize the packets work struct
1649 * - Allocate the packets socket buffer (skb)
1650 * - Fill the packets skb with data
1651 * - Make the packet DMAable
1652 * - Add the packet to the system pipe linked list
Amir Levy9659e592016-10-27 18:08:27 +03001653 */
1654static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
1655{
1656 void *ptr;
1657 struct ipa3_rx_pkt_wrapper *rx_pkt;
1658 int ret;
1659 int rx_len_cached = 0;
1660 struct gsi_xfer_elem gsi_xfer_elem_one;
1661 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
1662
1663 rx_len_cached = sys->len;
1664
1665 while (rx_len_cached < sys->rx_pool_sz) {
1666 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
1667 flag);
1668 if (!rx_pkt) {
1669 IPAERR("failed to alloc rx wrapper\n");
1670 goto fail_kmem_cache_alloc;
1671 }
1672
1673 INIT_LIST_HEAD(&rx_pkt->link);
1674 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1675 rx_pkt->sys = sys;
1676
1677 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1678 if (rx_pkt->data.skb == NULL) {
1679 IPAERR("failed to alloc skb\n");
1680 goto fail_skb_alloc;
1681 }
1682 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1683 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
1684 sys->rx_buff_sz,
1685 DMA_FROM_DEVICE);
1686 if (rx_pkt->data.dma_addr == 0 ||
1687 rx_pkt->data.dma_addr == ~0) {
1688 IPAERR("dma_map_single failure %p for %p\n",
1689 (void *)rx_pkt->data.dma_addr, ptr);
1690 goto fail_dma_mapping;
1691 }
1692
1693 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1694 rx_len_cached = ++sys->len;
1695
Amir Levya59ed3f2017-03-05 17:30:55 +02001696 memset(&gsi_xfer_elem_one, 0,
1697 sizeof(gsi_xfer_elem_one));
1698 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1699 gsi_xfer_elem_one.len = sys->rx_buff_sz;
1700 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1701 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1702 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1703 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001704
Amir Levya59ed3f2017-03-05 17:30:55 +02001705 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
Skylar Changd407e592017-03-30 11:25:30 -07001706 1, &gsi_xfer_elem_one, false);
Amir Levya59ed3f2017-03-05 17:30:55 +02001707 if (ret != GSI_STATUS_SUCCESS) {
1708 IPAERR("failed to provide buffer: %d\n",
1709 ret);
1710 goto fail_provide_rx_buffer;
Amir Levy9659e592016-10-27 18:08:27 +03001711 }
Skylar Changd407e592017-03-30 11:25:30 -07001712
1713 /*
1714 * As doorbell is a costly operation, notify to GSI
1715 * of new buffers if threshold is exceeded
1716 */
1717 if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
1718 sys->len_pending_xfer = 0;
1719 gsi_start_xfer(sys->ep->gsi_chan_hdl);
1720 }
Amir Levy9659e592016-10-27 18:08:27 +03001721 }
1722
1723 return;
1724
1725fail_provide_rx_buffer:
1726 list_del(&rx_pkt->link);
1727 rx_len_cached = --sys->len;
1728 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1729 sys->rx_buff_sz, DMA_FROM_DEVICE);
1730fail_dma_mapping:
1731 sys->free_skb(rx_pkt->data.skb);
1732fail_skb_alloc:
1733 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1734fail_kmem_cache_alloc:
Skylar Changd407e592017-03-30 11:25:30 -07001735 if (rx_len_cached - sys->len_pending_xfer == 0)
Amir Levy9659e592016-10-27 18:08:27 +03001736 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
1737 msecs_to_jiffies(1));
1738}
1739
1740static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
1741{
1742 void *ptr;
1743 struct ipa3_rx_pkt_wrapper *rx_pkt;
1744 int ret;
1745 int rx_len_cached = 0;
1746 struct gsi_xfer_elem gsi_xfer_elem_one;
1747 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
1748
1749 rx_len_cached = sys->len;
1750
1751 while (rx_len_cached < sys->rx_pool_sz) {
1752 if (list_empty(&sys->rcycl_list)) {
1753 rx_pkt = kmem_cache_zalloc(
1754 ipa3_ctx->rx_pkt_wrapper_cache, flag);
1755 if (!rx_pkt) {
1756 IPAERR("failed to alloc rx wrapper\n");
1757 goto fail_kmem_cache_alloc;
1758 }
1759
1760 INIT_LIST_HEAD(&rx_pkt->link);
1761 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1762 rx_pkt->sys = sys;
1763
1764 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1765 if (rx_pkt->data.skb == NULL) {
1766 IPAERR("failed to alloc skb\n");
1767 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
1768 rx_pkt);
1769 goto fail_kmem_cache_alloc;
1770 }
1771 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1772 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
1773 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
1774 if (rx_pkt->data.dma_addr == 0 ||
1775 rx_pkt->data.dma_addr == ~0) {
1776 IPAERR("dma_map_single failure %p for %p\n",
1777 (void *)rx_pkt->data.dma_addr, ptr);
1778 goto fail_dma_mapping;
1779 }
1780 } else {
1781 spin_lock_bh(&sys->spinlock);
1782 rx_pkt = list_first_entry(&sys->rcycl_list,
1783 struct ipa3_rx_pkt_wrapper, link);
1784 list_del(&rx_pkt->link);
1785 spin_unlock_bh(&sys->spinlock);
1786 INIT_LIST_HEAD(&rx_pkt->link);
1787 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1788 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
1789 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
1790 if (rx_pkt->data.dma_addr == 0 ||
1791 rx_pkt->data.dma_addr == ~0) {
1792 IPAERR("dma_map_single failure %p for %p\n",
1793 (void *)rx_pkt->data.dma_addr, ptr);
1794 goto fail_dma_mapping;
1795 }
1796 }
1797
1798 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1799 rx_len_cached = ++sys->len;
Amir Levya59ed3f2017-03-05 17:30:55 +02001800 memset(&gsi_xfer_elem_one, 0,
1801 sizeof(gsi_xfer_elem_one));
1802 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1803 gsi_xfer_elem_one.len = sys->rx_buff_sz;
1804 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1805 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1806 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1807 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001808
Amir Levya59ed3f2017-03-05 17:30:55 +02001809 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
Skylar Changd407e592017-03-30 11:25:30 -07001810 1, &gsi_xfer_elem_one, false);
Amir Levya59ed3f2017-03-05 17:30:55 +02001811 if (ret != GSI_STATUS_SUCCESS) {
1812 IPAERR("failed to provide buffer: %d\n",
1813 ret);
1814 goto fail_provide_rx_buffer;
Amir Levy9659e592016-10-27 18:08:27 +03001815 }
Skylar Changd407e592017-03-30 11:25:30 -07001816
1817 /*
1818 * As doorbell is a costly operation, notify to GSI
1819 * of new buffers if threshold is exceeded
1820 */
1821 if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
1822 sys->len_pending_xfer = 0;
1823 gsi_start_xfer(sys->ep->gsi_chan_hdl);
1824 }
Amir Levy9659e592016-10-27 18:08:27 +03001825 }
1826
1827 return;
1828fail_provide_rx_buffer:
1829 rx_len_cached = --sys->len;
1830 list_del(&rx_pkt->link);
1831 INIT_LIST_HEAD(&rx_pkt->link);
1832 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1833 sys->rx_buff_sz, DMA_FROM_DEVICE);
1834fail_dma_mapping:
1835 spin_lock_bh(&sys->spinlock);
1836 list_add_tail(&rx_pkt->link, &sys->rcycl_list);
1837 INIT_LIST_HEAD(&rx_pkt->link);
1838 spin_unlock_bh(&sys->spinlock);
1839fail_kmem_cache_alloc:
Skylar Changd407e592017-03-30 11:25:30 -07001840 if (rx_len_cached - sys->len_pending_xfer == 0)
Amir Levy9659e592016-10-27 18:08:27 +03001841 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
1842 msecs_to_jiffies(1));
1843}
1844
1845static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
1846{
1847 struct ipa3_rx_pkt_wrapper *rx_pkt;
1848 int ret;
1849 int rx_len_cached = 0;
1850 struct gsi_xfer_elem gsi_xfer_elem_one;
1851 u32 curr;
1852
1853 rx_len_cached = sys->len;
1854 curr = atomic_read(&sys->repl.head_idx);
1855
1856 while (rx_len_cached < sys->rx_pool_sz) {
1857 if (curr == atomic_read(&sys->repl.tail_idx))
1858 break;
1859
1860 rx_pkt = sys->repl.cache[curr];
1861 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1862
Amir Levya59ed3f2017-03-05 17:30:55 +02001863 memset(&gsi_xfer_elem_one, 0,
1864 sizeof(gsi_xfer_elem_one));
1865 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1866 gsi_xfer_elem_one.len = sys->rx_buff_sz;
1867 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1868 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1869 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1870 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001871
Amir Levya59ed3f2017-03-05 17:30:55 +02001872 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
Skylar Changd407e592017-03-30 11:25:30 -07001873 &gsi_xfer_elem_one, false);
Amir Levya59ed3f2017-03-05 17:30:55 +02001874 if (ret != GSI_STATUS_SUCCESS) {
1875 IPAERR("failed to provide buffer: %d\n",
1876 ret);
1877 break;
Amir Levy9659e592016-10-27 18:08:27 +03001878 }
Skylar Changd407e592017-03-30 11:25:30 -07001879
1880 /*
1881 * As doorbell is a costly operation, notify to GSI
1882 * of new buffers if threshold is exceeded
1883 */
1884 if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
1885 sys->len_pending_xfer = 0;
1886 gsi_start_xfer(sys->ep->gsi_chan_hdl);
1887 }
1888
Amir Levy9659e592016-10-27 18:08:27 +03001889 rx_len_cached = ++sys->len;
1890 curr = (curr + 1) % sys->repl.capacity;
1891 /* ensure write is done before setting head index */
1892 mb();
1893 atomic_set(&sys->repl.head_idx, curr);
1894 }
1895
1896 queue_work(sys->repl_wq, &sys->repl_work);
1897
Skylar Changd407e592017-03-30 11:25:30 -07001898 if (rx_len_cached - sys->len_pending_xfer
1899 <= IPA_DEFAULT_SYS_YELLOW_WM) {
Amir Levy9659e592016-10-27 18:08:27 +03001900 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1901 IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
1902 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1903 IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
1904 else
1905 WARN_ON(1);
1906 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
1907 msecs_to_jiffies(1));
1908 }
1909}
1910
1911static void ipa3_replenish_rx_work_func(struct work_struct *work)
1912{
1913 struct delayed_work *dwork;
1914 struct ipa3_sys_context *sys;
1915
1916 dwork = container_of(work, struct delayed_work, work);
1917 sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
1918 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1919 sys->repl_hdlr(sys);
1920 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1921}
1922
1923/**
1924 * ipa3_cleanup_rx() - release RX queue resources
1925 *
1926 */
1927static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
1928{
1929 struct ipa3_rx_pkt_wrapper *rx_pkt;
1930 struct ipa3_rx_pkt_wrapper *r;
1931 u32 head;
1932 u32 tail;
1933
1934 list_for_each_entry_safe(rx_pkt, r,
1935 &sys->head_desc_list, link) {
1936 list_del(&rx_pkt->link);
1937 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1938 sys->rx_buff_sz, DMA_FROM_DEVICE);
1939 sys->free_skb(rx_pkt->data.skb);
1940 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1941 }
1942
1943 list_for_each_entry_safe(rx_pkt, r,
1944 &sys->rcycl_list, link) {
1945 list_del(&rx_pkt->link);
1946 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1947 sys->rx_buff_sz, DMA_FROM_DEVICE);
1948 sys->free_skb(rx_pkt->data.skb);
1949 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1950 }
1951
1952 if (sys->repl.cache) {
1953 head = atomic_read(&sys->repl.head_idx);
1954 tail = atomic_read(&sys->repl.tail_idx);
1955 while (head != tail) {
1956 rx_pkt = sys->repl.cache[head];
1957 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1958 sys->rx_buff_sz, DMA_FROM_DEVICE);
1959 sys->free_skb(rx_pkt->data.skb);
1960 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1961 head = (head + 1) % sys->repl.capacity;
1962 }
1963 kfree(sys->repl.cache);
1964 }
1965}
1966
1967static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
1968{
1969 struct sk_buff *skb2 = NULL;
1970
1971 skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
1972 if (likely(skb2)) {
1973 /* Set the data pointer */
1974 skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
1975 memcpy(skb2->data, skb->data, len);
1976 skb2->len = len;
1977 skb_set_tail_pointer(skb2, len);
1978 }
1979
1980 return skb2;
1981}
1982
1983static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
1984 struct ipa3_sys_context *sys)
1985{
1986 int rc = 0;
1987 struct ipahal_pkt_status status;
1988 u32 pkt_status_sz;
1989 struct sk_buff *skb2;
1990 int pad_len_byte;
1991 int len;
1992 unsigned char *buf;
1993 int src_pipe;
1994 unsigned int used = *(unsigned int *)skb->cb;
1995 unsigned int used_align = ALIGN(used, 32);
1996 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
1997 struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
1998 unsigned long ptr;
1999
2000 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2001
2002 if (skb->len == 0) {
2003 IPAERR("ZLT\n");
2004 return rc;
2005 }
2006
2007 if (sys->len_partial) {
2008 IPADBG_LOW("len_partial %d\n", sys->len_partial);
2009 buf = skb_push(skb, sys->len_partial);
2010 memcpy(buf, sys->prev_skb->data, sys->len_partial);
2011 sys->len_partial = 0;
2012 sys->free_skb(sys->prev_skb);
2013 sys->prev_skb = NULL;
2014 goto begin;
2015 }
2016
2017 /* this pipe has TX comp (status only) + mux-ed LAN RX data
2018 * (status+data)
2019 */
2020 if (sys->len_rem) {
2021 IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
2022 sys->len_pad);
2023 if (sys->len_rem <= skb->len) {
2024 if (sys->prev_skb) {
2025 skb2 = skb_copy_expand(sys->prev_skb, 0,
2026 sys->len_rem, GFP_KERNEL);
2027 if (likely(skb2)) {
2028 memcpy(skb_put(skb2, sys->len_rem),
2029 skb->data, sys->len_rem);
2030 skb_trim(skb2,
2031 skb2->len - sys->len_pad);
2032 skb2->truesize = skb2->len +
2033 sizeof(struct sk_buff);
2034 if (sys->drop_packet)
2035 dev_kfree_skb_any(skb2);
2036 else
2037 sys->ep->client_notify(
2038 sys->ep->priv,
2039 IPA_RECEIVE,
2040 (unsigned long)(skb2));
2041 } else {
2042 IPAERR("copy expand failed\n");
2043 }
2044 dev_kfree_skb_any(sys->prev_skb);
2045 }
2046 skb_pull(skb, sys->len_rem);
2047 sys->prev_skb = NULL;
2048 sys->len_rem = 0;
2049 sys->len_pad = 0;
2050 } else {
2051 if (sys->prev_skb) {
2052 skb2 = skb_copy_expand(sys->prev_skb, 0,
2053 skb->len, GFP_KERNEL);
2054 if (likely(skb2)) {
2055 memcpy(skb_put(skb2, skb->len),
2056 skb->data, skb->len);
2057 } else {
2058 IPAERR("copy expand failed\n");
2059 }
2060 dev_kfree_skb_any(sys->prev_skb);
2061 sys->prev_skb = skb2;
2062 }
2063 sys->len_rem -= skb->len;
2064 return rc;
2065 }
2066 }
2067
2068begin:
2069 pkt_status_sz = ipahal_pkt_status_get_size();
2070 while (skb->len) {
2071 sys->drop_packet = false;
2072 IPADBG_LOW("LEN_REM %d\n", skb->len);
2073
2074 if (skb->len < pkt_status_sz) {
2075 WARN_ON(sys->prev_skb != NULL);
2076 IPADBG_LOW("status straddles buffer\n");
2077 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
2078 sys->len_partial = skb->len;
2079 return rc;
2080 }
2081
2082 ipahal_pkt_status_parse(skb->data, &status);
2083 IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
2084 status.status_opcode, status.endp_src_idx,
2085 status.endp_dest_idx, status.pkt_len);
2086 if (sys->status_stat) {
2087 sys->status_stat->status[sys->status_stat->curr] =
2088 status;
2089 sys->status_stat->curr++;
2090 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2091 sys->status_stat->curr = 0;
2092 }
2093
2094 if ((status.status_opcode !=
2095 IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
2096 (status.status_opcode !=
2097 IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
2098 (status.status_opcode !=
2099 IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
2100 (status.status_opcode !=
2101 IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
2102 IPAERR("unsupported opcode(%d)\n",
2103 status.status_opcode);
2104 skb_pull(skb, pkt_status_sz);
2105 continue;
2106 }
2107 IPA_STATS_EXCP_CNT(status.exception,
2108 ipa3_ctx->stats.rx_excp_pkts);
2109 if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
2110 status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
2111 IPAERR("status fields invalid\n");
2112 IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
2113 status.status_opcode, status.endp_src_idx,
2114 status.endp_dest_idx, status.pkt_len);
2115 WARN_ON(1);
2116 BUG();
2117 }
2118 if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
2119 IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
2120 struct ipa3_tag_completion *comp;
2121
2122 IPADBG_LOW("TAG packet arrived\n");
2123 if (status.tag_info == IPA_COOKIE) {
2124 skb_pull(skb, pkt_status_sz);
2125 if (skb->len < sizeof(comp)) {
2126 IPAERR("TAG arrived without packet\n");
2127 return rc;
2128 }
2129 memcpy(&comp, skb->data, sizeof(comp));
2130 skb_pull(skb, sizeof(comp) +
2131 IPA_SIZE_DL_CSUM_META_TRAILER);
2132 complete(&comp->comp);
2133 if (atomic_dec_return(&comp->cnt) == 0)
2134 kfree(comp);
2135 continue;
2136 } else {
2137 ptr = tag_to_pointer_wa(status.tag_info);
2138 tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
2139 IPADBG_LOW("tx_pkt recv = %p\n", tx_pkt);
2140 }
2141 }
2142 if (status.pkt_len == 0) {
2143 IPADBG_LOW("Skip aggr close status\n");
2144 skb_pull(skb, pkt_status_sz);
2145 IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
2146 IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
2147 [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
2148 continue;
2149 }
2150
2151 if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
2152 /* RX data */
2153 src_pipe = status.endp_src_idx;
2154
2155 /*
2156 * A packet which is received back to the AP after
2157 * there was no route match.
2158 */
2159 if (status.exception ==
2160 IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
2161 ipahal_is_rule_miss_id(status.rt_rule_id))
2162 sys->drop_packet = true;
2163
2164 if (skb->len == pkt_status_sz &&
2165 status.exception ==
2166 IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
2167 WARN_ON(sys->prev_skb != NULL);
2168 IPADBG_LOW("Ins header in next buffer\n");
2169 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
2170 sys->len_partial = skb->len;
2171 return rc;
2172 }
2173
2174 pad_len_byte = ((status.pkt_len + 3) & ~3) -
2175 status.pkt_len;
2176
2177 len = status.pkt_len + pad_len_byte +
2178 IPA_SIZE_DL_CSUM_META_TRAILER;
2179 IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
2180 status.pkt_len, len);
2181
2182 if (status.exception ==
2183 IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
2184 IPADBG_LOW(
2185 "Dropping packet on DeAggr Exception\n");
2186 sys->drop_packet = true;
2187 }
2188
2189 skb2 = ipa3_skb_copy_for_client(skb,
2190 min(status.pkt_len + pkt_status_sz, skb->len));
2191 if (likely(skb2)) {
2192 if (skb->len < len + pkt_status_sz) {
2193 IPADBG_LOW("SPL skb len %d len %d\n",
2194 skb->len, len);
2195 sys->prev_skb = skb2;
2196 sys->len_rem = len - skb->len +
2197 pkt_status_sz;
2198 sys->len_pad = pad_len_byte;
2199 skb_pull(skb, skb->len);
2200 } else {
2201 skb_trim(skb2, status.pkt_len +
2202 pkt_status_sz);
2203 IPADBG_LOW("rx avail for %d\n",
2204 status.endp_dest_idx);
2205 if (sys->drop_packet) {
2206 dev_kfree_skb_any(skb2);
2207 } else if (status.pkt_len >
2208 IPA_GENERIC_AGGR_BYTE_LIMIT *
2209 1024) {
2210 IPAERR("packet size invalid\n");
2211 IPAERR("STATUS opcode=%d\n",
2212 status.status_opcode);
2213 IPAERR("src=%d dst=%d len=%d\n",
2214 status.endp_src_idx,
2215 status.endp_dest_idx,
2216 status.pkt_len);
2217 BUG();
2218 } else {
2219 skb2->truesize = skb2->len +
2220 sizeof(struct sk_buff) +
2221 (ALIGN(len +
2222 pkt_status_sz, 32) *
2223 unused / used_align);
2224 sys->ep->client_notify(
2225 sys->ep->priv,
2226 IPA_RECEIVE,
2227 (unsigned long)(skb2));
2228 }
2229 skb_pull(skb, len + pkt_status_sz);
2230 }
2231 } else {
2232 IPAERR("fail to alloc skb\n");
2233 if (skb->len < len) {
2234 sys->prev_skb = NULL;
2235 sys->len_rem = len - skb->len +
2236 pkt_status_sz;
2237 sys->len_pad = pad_len_byte;
2238 skb_pull(skb, skb->len);
2239 } else {
2240 skb_pull(skb, len + pkt_status_sz);
2241 }
2242 }
2243 /* TX comp */
2244 ipa3_wq_write_done_status(src_pipe, tx_pkt);
2245 IPADBG_LOW("tx comp imp for %d\n", src_pipe);
2246 } else {
2247 /* TX comp */
2248 ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
2249 IPADBG_LOW("tx comp exp for %d\n",
2250 status.endp_src_idx);
2251 skb_pull(skb, pkt_status_sz);
2252 IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
2253 IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
2254 [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
2255 }
2256 };
2257
2258 return rc;
2259}
2260
2261static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
2262 struct sk_buff *skb, unsigned int len)
2263{
2264 struct sk_buff *skb2;
2265
2266 skb2 = skb_copy_expand(prev_skb, 0,
2267 len, GFP_KERNEL);
2268 if (likely(skb2)) {
2269 memcpy(skb_put(skb2, len),
2270 skb->data, len);
2271 } else {
2272 IPAERR("copy expand failed\n");
2273 skb2 = NULL;
2274 }
2275 dev_kfree_skb_any(prev_skb);
2276
2277 return skb2;
2278}
2279
2280static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
2281 struct ipa3_sys_context *sys)
2282{
2283 struct sk_buff *skb2;
2284
2285 IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
2286 if (sys->len_rem <= skb->len) {
2287 if (sys->prev_skb) {
2288 skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
2289 sys->len_rem);
2290 if (likely(skb2)) {
2291 IPADBG_LOW(
2292 "removing Status element from skb and sending to WAN client");
2293 skb_pull(skb2, ipahal_pkt_status_get_size());
2294 skb2->truesize = skb2->len +
2295 sizeof(struct sk_buff);
2296 sys->ep->client_notify(sys->ep->priv,
2297 IPA_RECEIVE,
2298 (unsigned long)(skb2));
2299 }
2300 }
2301 skb_pull(skb, sys->len_rem);
2302 sys->prev_skb = NULL;
2303 sys->len_rem = 0;
2304 } else {
2305 if (sys->prev_skb) {
2306 skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
2307 skb->len);
2308 sys->prev_skb = skb2;
2309 }
2310 sys->len_rem -= skb->len;
2311 skb_pull(skb, skb->len);
2312 }
2313}
2314
2315static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
2316 struct ipa3_sys_context *sys)
2317{
2318 int rc = 0;
2319 struct ipahal_pkt_status status;
2320 unsigned char *skb_data;
2321 u32 pkt_status_sz;
2322 struct sk_buff *skb2;
2323 u16 pkt_len_with_pad;
2324 u32 qmap_hdr;
2325 int checksum_trailer_exists;
2326 int frame_len;
2327 int ep_idx;
2328 unsigned int used = *(unsigned int *)skb->cb;
2329 unsigned int used_align = ALIGN(used, 32);
2330 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2331
2332 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2333 if (skb->len == 0) {
2334 IPAERR("ZLT\n");
2335 goto bail;
2336 }
2337
2338 if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
2339 sys->ep->client_notify(sys->ep->priv,
2340 IPA_RECEIVE, (unsigned long)(skb));
2341 return rc;
2342 }
2343 if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
2344 IPAERR("Recycle should enable only with GRO Aggr\n");
2345 ipa_assert();
2346 }
2347
2348 /*
2349 * payload splits across 2 buff or more,
2350 * take the start of the payload from prev_skb
2351 */
2352 if (sys->len_rem)
2353 ipa3_wan_rx_handle_splt_pyld(skb, sys);
2354
2355 pkt_status_sz = ipahal_pkt_status_get_size();
2356 while (skb->len) {
2357 IPADBG_LOW("LEN_REM %d\n", skb->len);
2358 if (skb->len < pkt_status_sz) {
2359 IPAERR("status straddles buffer\n");
2360 WARN_ON(1);
2361 goto bail;
2362 }
2363 ipahal_pkt_status_parse(skb->data, &status);
2364 skb_data = skb->data;
2365 IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
2366 status.status_opcode, status.endp_src_idx,
2367 status.endp_dest_idx, status.pkt_len);
2368
2369 if (sys->status_stat) {
2370 sys->status_stat->status[sys->status_stat->curr] =
2371 status;
2372 sys->status_stat->curr++;
2373 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2374 sys->status_stat->curr = 0;
2375 }
2376
2377 if ((status.status_opcode !=
2378 IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
2379 (status.status_opcode !=
2380 IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
2381 (status.status_opcode !=
2382 IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
2383 IPAERR("unsupported opcode(%d)\n",
2384 status.status_opcode);
2385 skb_pull(skb, pkt_status_sz);
2386 continue;
2387 }
2388
2389 IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
2390 if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
2391 status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
2392 status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
2393 IPAERR("status fields invalid\n");
2394 WARN_ON(1);
2395 goto bail;
2396 }
2397 if (status.pkt_len == 0) {
2398 IPADBG_LOW("Skip aggr close status\n");
2399 skb_pull(skb, pkt_status_sz);
2400 IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
2401 IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
2402 continue;
2403 }
2404 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
2405 if (status.endp_dest_idx != ep_idx) {
2406 IPAERR("expected endp_dest_idx %d received %d\n",
2407 ep_idx, status.endp_dest_idx);
2408 WARN_ON(1);
2409 goto bail;
2410 }
2411 /* RX data */
2412 if (skb->len == pkt_status_sz) {
2413 IPAERR("Ins header in next buffer\n");
2414 WARN_ON(1);
2415 goto bail;
2416 }
2417 qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
2418 /*
2419 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
2420 * header
2421 */
2422
2423 /*QMAP is BE: convert the pkt_len field from BE to LE*/
2424 pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
2425 IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
2426 /*get the CHECKSUM_PROCESS bit*/
2427 checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
2428 IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
2429 IPADBG_LOW("checksum_trailer_exists %d\n",
2430 checksum_trailer_exists);
2431
2432 frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
2433 pkt_len_with_pad;
2434 if (checksum_trailer_exists)
2435 frame_len += IPA_DL_CHECKSUM_LENGTH;
2436 IPADBG_LOW("frame_len %d\n", frame_len);
2437
2438 skb2 = skb_clone(skb, GFP_KERNEL);
2439 if (likely(skb2)) {
2440 /*
2441 * the len of actual data is smaller than expected
2442 * payload split across 2 buff
2443 */
2444 if (skb->len < frame_len) {
2445 IPADBG_LOW("SPL skb len %d len %d\n",
2446 skb->len, frame_len);
2447 sys->prev_skb = skb2;
2448 sys->len_rem = frame_len - skb->len;
2449 skb_pull(skb, skb->len);
2450 } else {
2451 skb_trim(skb2, frame_len);
2452 IPADBG_LOW("rx avail for %d\n",
2453 status.endp_dest_idx);
2454 IPADBG_LOW(
2455 "removing Status element from skb and sending to WAN client");
2456 skb_pull(skb2, pkt_status_sz);
2457 skb2->truesize = skb2->len +
2458 sizeof(struct sk_buff) +
2459 (ALIGN(frame_len, 32) *
2460 unused / used_align);
2461 sys->ep->client_notify(sys->ep->priv,
2462 IPA_RECEIVE, (unsigned long)(skb2));
2463 skb_pull(skb, frame_len);
2464 }
2465 } else {
2466 IPAERR("fail to clone\n");
2467 if (skb->len < frame_len) {
2468 sys->prev_skb = NULL;
2469 sys->len_rem = frame_len - skb->len;
2470 skb_pull(skb, skb->len);
2471 } else {
2472 skb_pull(skb, frame_len);
2473 }
2474 }
2475 };
2476bail:
2477 sys->free_skb(skb);
2478 return rc;
2479}
2480
2481static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
2482{
2483 return __dev_alloc_skb(len, flags);
2484}
2485
2486static void ipa3_free_skb_rx(struct sk_buff *skb)
2487{
2488 dev_kfree_skb_any(skb);
2489}
2490
2491void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
2492{
2493 struct sk_buff *rx_skb = (struct sk_buff *)data;
2494 struct ipahal_pkt_status status;
2495 struct ipa3_ep_context *ep;
2496 unsigned int src_pipe;
2497 u32 metadata;
2498
2499 ipahal_pkt_status_parse(rx_skb->data, &status);
2500 src_pipe = status.endp_src_idx;
2501 metadata = status.metadata;
2502 ep = &ipa3_ctx->ep[src_pipe];
2503 if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
2504 !ep->valid ||
2505 !ep->client_notify)) {
2506 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2507 src_pipe, ep->valid, ep->client_notify);
2508 dev_kfree_skb_any(rx_skb);
2509 return;
2510 }
2511 if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
2512 skb_pull(rx_skb, ipahal_pkt_status_get_size() +
2513 IPA_LAN_RX_HEADER_LENGTH);
2514 else
2515 skb_pull(rx_skb, ipahal_pkt_status_get_size());
2516
2517 /* Metadata Info
2518 * ------------------------------------------
2519 * | 3 | 2 | 1 | 0 |
2520 * | fw_desc | vdev_id | qmap mux id | Resv |
2521 * ------------------------------------------
2522 */
2523 *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
2524 IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
2525 metadata, *(u32 *)rx_skb->cb);
2526
2527 ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
2528}
2529
2530static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
2531{
2532 rx_pkt->data.dma_addr = 0;
2533 ipa3_skb_recycle(rx_pkt->data.skb);
2534 INIT_LIST_HEAD(&rx_pkt->link);
2535 spin_lock_bh(&rx_pkt->sys->spinlock);
2536 list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
2537 spin_unlock_bh(&rx_pkt->sys->spinlock);
2538}
2539
2540void ipa3_recycle_wan_skb(struct sk_buff *skb)
2541{
2542 struct ipa3_rx_pkt_wrapper *rx_pkt;
2543 int ep_idx = ipa3_get_ep_mapping(
2544 IPA_CLIENT_APPS_WAN_CONS);
2545 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
2546
2547 if (unlikely(ep_idx == -1)) {
2548 IPAERR("dest EP does not exist\n");
2549 ipa_assert();
2550 }
2551
2552 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
2553 flag);
2554 if (!rx_pkt)
2555 ipa_assert();
2556
2557 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
2558 rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
2559
2560 rx_pkt->data.skb = skb;
2561 ipa3_recycle_rx_wrapper(rx_pkt);
2562}
2563
2564static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
2565{
2566 struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
2567 struct sk_buff *rx_skb;
2568
2569 if (unlikely(list_empty(&sys->head_desc_list))) {
2570 WARN_ON(1);
2571 return;
2572 }
2573 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2574 struct ipa3_rx_pkt_wrapper,
2575 link);
2576 list_del(&rx_pkt_expected->link);
2577 sys->len--;
2578 if (size)
2579 rx_pkt_expected->len = size;
2580 rx_skb = rx_pkt_expected->data.skb;
2581 dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
2582 sys->rx_buff_sz, DMA_FROM_DEVICE);
2583 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2584 rx_skb->len = rx_pkt_expected->len;
2585 *(unsigned int *)rx_skb->cb = rx_skb->len;
2586 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2587 sys->pyld_hdlr(rx_skb, sys);
2588 sys->free_rx_wrapper(rx_pkt_expected);
2589 sys->repl_hdlr(sys);
2590}
2591
2592static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
2593{
2594 struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
2595 struct sk_buff *rx_skb;
2596
2597 if (unlikely(list_empty(&sys->head_desc_list))) {
2598 WARN_ON(1);
2599 return;
2600 }
2601 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2602 struct ipa3_rx_pkt_wrapper,
2603 link);
2604 list_del(&rx_pkt_expected->link);
2605 sys->len--;
2606
2607 if (size)
2608 rx_pkt_expected->len = size;
2609
2610 rx_skb = rx_pkt_expected->data.skb;
2611 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2612 rx_skb->len = rx_pkt_expected->len;
2613 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2614 sys->ep->wstats.tx_pkts_rcvd++;
2615 if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
2616 ipa3_free_skb(&rx_pkt_expected->data);
2617 sys->ep->wstats.tx_pkts_dropped++;
2618 } else {
2619 sys->ep->wstats.tx_pkts_sent++;
2620 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
2621 (unsigned long)(&rx_pkt_expected->data));
2622 }
2623 ipa3_replenish_wlan_rx_cache(sys);
2624}
2625
2626static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
2627 struct ipa_mem_buffer *mem_info)
2628{
2629 IPADBG_LOW("ENTER.\n");
2630 if (unlikely(list_empty(&sys->head_desc_list))) {
2631 IPAERR("descriptor list is empty!\n");
2632 WARN_ON(1);
2633 return;
2634 }
2635 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
2636 (unsigned long)(mem_info));
2637 IPADBG_LOW("EXIT\n");
2638}
2639
2640static void ipa3_wq_rx_avail(struct work_struct *work)
2641{
2642 struct ipa3_rx_pkt_wrapper *rx_pkt;
2643 struct ipa3_sys_context *sys;
2644
2645 rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
2646 if (unlikely(rx_pkt == NULL))
2647 WARN_ON(1);
2648 sys = rx_pkt->sys;
2649 ipa3_wq_rx_common(sys, 0);
2650}
2651
Amir Levy9659e592016-10-27 18:08:27 +03002652static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
2653 struct ipa3_sys_context *sys)
2654{
2655 if (sys->ep->client_notify) {
2656 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
2657 (unsigned long)(rx_skb));
2658 } else {
2659 dev_kfree_skb_any(rx_skb);
2660 WARN_ON(1);
2661 }
2662
2663 return 0;
2664}
2665
2666static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
2667{
2668 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
2669}
2670
2671static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
2672 struct ipa3_sys_context *sys)
2673{
Ghanim Fodic6b67492017-03-15 14:19:56 +02002674 if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
2675 in->client == IPA_CLIENT_APPS_WAN_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03002676 sys->policy = IPA_POLICY_INTR_MODE;
Skylar Changd407e592017-03-30 11:25:30 -07002677 sys->use_comm_evt_ring = false;
Amir Levy9659e592016-10-27 18:08:27 +03002678 return 0;
2679 }
2680
2681 if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
2682 sys->policy = IPA_POLICY_NOINTR_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002683 return 0;
2684 }
2685
2686 if (IPA_CLIENT_IS_PROD(in->client)) {
2687 if (sys->ep->skip_ep_cfg) {
2688 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Skylar Changd407e592017-03-30 11:25:30 -07002689 sys->use_comm_evt_ring = true;
Amir Levy9659e592016-10-27 18:08:27 +03002690 atomic_set(&sys->curr_polling_state, 0);
2691 } else {
Skylar Changd407e592017-03-30 11:25:30 -07002692 sys->policy = IPA_POLICY_INTR_MODE;
2693 sys->use_comm_evt_ring = true;
2694 INIT_WORK(&sys->work, ipa3_send_nop_desc);
Amir Levy9659e592016-10-27 18:08:27 +03002695 }
2696 } else {
2697 if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
2698 in->client == IPA_CLIENT_APPS_WAN_CONS) {
2699 sys->ep->status.status_en = true;
2700 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002701 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2702 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
2703 ipa3_switch_to_intr_rx_work_func);
2704 INIT_DELAYED_WORK(&sys->replenish_rx_work,
2705 ipa3_replenish_rx_work_func);
2706 INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
2707 atomic_set(&sys->curr_polling_state, 0);
2708 sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
2709 IPA_GENERIC_RX_BUFF_BASE_SZ);
2710 sys->get_skb = ipa3_get_skb_ipa_rx;
2711 sys->free_skb = ipa3_free_skb_rx;
2712 in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
2713 in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
2714 in->ipa_ep_cfg.aggr.aggr_time_limit =
2715 IPA_GENERIC_AGGR_TIME_LIMIT;
2716 if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
2717 sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
2718 sys->repl_hdlr =
2719 ipa3_replenish_rx_cache_recycle;
2720 sys->free_rx_wrapper =
2721 ipa3_recycle_rx_wrapper;
2722 sys->rx_pool_sz =
2723 ipa3_ctx->lan_rx_ring_size;
2724 in->ipa_ep_cfg.aggr.aggr_byte_limit =
2725 IPA_GENERIC_AGGR_BYTE_LIMIT;
2726 in->ipa_ep_cfg.aggr.aggr_pkt_limit =
2727 IPA_GENERIC_AGGR_PKT_LIMIT;
2728 } else if (in->client ==
2729 IPA_CLIENT_APPS_WAN_CONS) {
2730 sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
2731 sys->free_rx_wrapper = ipa3_free_rx_wrapper;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002732 sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
2733 if (nr_cpu_ids > 1) {
Amir Levy9659e592016-10-27 18:08:27 +03002734 sys->repl_hdlr =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002735 ipa3_fast_replenish_rx_cache;
2736 } else {
2737 sys->repl_hdlr =
2738 ipa3_replenish_rx_cache;
2739 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002740 if (in->napi_enabled && in->recycle_enabled)
2741 sys->repl_hdlr =
2742 ipa3_replenish_rx_cache_recycle;
Amir Levy9659e592016-10-27 18:08:27 +03002743 in->ipa_ep_cfg.aggr.aggr_sw_eof_active
2744 = true;
2745 if (ipa3_ctx->
2746 ipa_client_apps_wan_cons_agg_gro) {
2747 IPAERR("get close-by %u\n",
2748 ipa_adjust_ra_buff_base_sz(
2749 in->ipa_ep_cfg.aggr.
2750 aggr_byte_limit));
2751 IPAERR("set rx_buff_sz %lu\n",
2752 (unsigned long int)
2753 IPA_GENERIC_RX_BUFF_SZ(
2754 ipa_adjust_ra_buff_base_sz(
2755 in->ipa_ep_cfg.
2756 aggr.aggr_byte_limit)));
2757 /* disable ipa_status */
2758 sys->ep->status.
2759 status_en = false;
2760 sys->rx_buff_sz =
2761 IPA_GENERIC_RX_BUFF_SZ(
2762 ipa_adjust_ra_buff_base_sz(
2763 in->ipa_ep_cfg.aggr.
2764 aggr_byte_limit));
2765 in->ipa_ep_cfg.aggr.
2766 aggr_byte_limit =
2767 sys->rx_buff_sz < in->
2768 ipa_ep_cfg.aggr.
2769 aggr_byte_limit ?
2770 IPA_ADJUST_AGGR_BYTE_LIMIT(
2771 sys->rx_buff_sz) :
2772 IPA_ADJUST_AGGR_BYTE_LIMIT(
2773 in->ipa_ep_cfg.
2774 aggr.aggr_byte_limit);
2775 IPAERR("set aggr_limit %lu\n",
2776 (unsigned long int)
2777 in->ipa_ep_cfg.aggr.
2778 aggr_byte_limit);
2779 } else {
2780 in->ipa_ep_cfg.aggr.
2781 aggr_byte_limit =
2782 IPA_GENERIC_AGGR_BYTE_LIMIT;
2783 in->ipa_ep_cfg.aggr.
2784 aggr_pkt_limit =
2785 IPA_GENERIC_AGGR_PKT_LIMIT;
2786 }
2787 }
2788 } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
2789 IPADBG("assigning policy to client:%d",
2790 in->client);
2791
2792 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002793 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2794 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
2795 ipa3_switch_to_intr_rx_work_func);
2796 INIT_DELAYED_WORK(&sys->replenish_rx_work,
2797 ipa3_replenish_rx_work_func);
2798 atomic_set(&sys->curr_polling_state, 0);
2799 sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
Amir Levya59ed3f2017-03-05 17:30:55 +02002800 sys->rx_pool_sz = in->desc_fifo_sz /
2801 IPA_FIFO_ELEMENT_SIZE - 1;
Amir Levy9659e592016-10-27 18:08:27 +03002802 if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
2803 sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
2804 sys->pyld_hdlr = NULL;
2805 sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
2806 sys->get_skb = ipa3_get_skb_ipa_rx;
2807 sys->free_skb = ipa3_free_skb_rx;
2808 sys->free_rx_wrapper = ipa3_free_rx_wrapper;
2809 in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
2810 } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
2811 IPADBG("assigning policy to client:%d",
2812 in->client);
2813
2814 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002815 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2816 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
Amir Levya59ed3f2017-03-05 17:30:55 +02002817 ipa3_switch_to_intr_rx_work_func);
Amir Levy9659e592016-10-27 18:08:27 +03002818 INIT_DELAYED_WORK(&sys->replenish_rx_work,
2819 ipa3_replenish_rx_work_func);
2820 atomic_set(&sys->curr_polling_state, 0);
2821 sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
2822 sys->rx_pool_sz = in->desc_fifo_sz /
Amir Levya59ed3f2017-03-05 17:30:55 +02002823 IPA_FIFO_ELEMENT_SIZE - 1;
Amir Levy9659e592016-10-27 18:08:27 +03002824 if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
2825 sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
2826 sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
2827 sys->get_skb = ipa3_get_skb_ipa_rx;
2828 sys->free_skb = ipa3_free_skb_rx;
2829 sys->free_rx_wrapper = ipa3_free_rx_wrapper;
2830 sys->repl_hdlr = ipa3_replenish_rx_cache;
2831 } else if (in->client ==
2832 IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
2833 IPADBG("assigning policy to client:%d",
2834 in->client);
2835
2836 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002837 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2838 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
2839 ipa3_switch_to_intr_rx_work_func);
2840 } else if (in->client ==
2841 IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
2842 IPADBG("assigning policy to client:%d",
2843 in->client);
2844
2845 sys->policy = IPA_POLICY_NOINTR_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002846 } else {
2847 IPAERR("Need to install a RX pipe hdlr\n");
2848 WARN_ON(1);
2849 return -EINVAL;
2850 }
2851 }
2852
2853 return 0;
2854}
2855
2856/**
2857 * ipa3_tx_client_rx_notify_release() - Callback function
2858 * which will call the user supplied callback function to
2859 * release the skb, or release it on its own if no callback
2860 * function was supplied
2861 *
2862 * @user1: [in] - Data Descriptor
2863 * @user2: [in] - endpoint idx
2864 *
2865 * This notified callback is for the destination client
2866 * This function is supplied in ipa3_tx_dp_mul
2867 */
2868static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
2869{
2870 struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
2871 int ep_idx = user2;
2872
2873 IPADBG_LOW("Received data desc anchor:%p\n", dd);
2874
2875 atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
2876 ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
2877
2878 /* wlan host driver waits till tx complete before unload */
2879 IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
2880 ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
2881 IPADBG_LOW("calling client notify callback with priv:%p\n",
2882 ipa3_ctx->ep[ep_idx].priv);
2883
2884 if (ipa3_ctx->ep[ep_idx].client_notify) {
2885 ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
2886 IPA_WRITE_DONE, (unsigned long)user1);
2887 ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
2888 }
2889}
2890/**
2891 * ipa3_tx_client_rx_pkt_status() - Callback function
2892 * which will call the user supplied callback function to
2893 * increase the available fifo descriptor
2894 *
2895 * @user1: [in] - Data Descriptor
2896 * @user2: [in] - endpoint idx
2897 *
2898 * This notified callback is for the destination client
2899 * This function is supplied in ipa3_tx_dp_mul
2900 */
2901static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
2902{
2903 int ep_idx = user2;
2904
2905 atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
2906 ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
2907}
2908
2909
2910/**
2911 * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
2912 * @src: [in] - Client that is sending data
2913 * @ipa_tx_data_desc: [in] data descriptors from wlan
2914 *
2915 * this is used for to transfer data descriptors that received
2916 * from WLAN1_PROD pipe to IPA HW
2917 *
2918 * The function will send data descriptors from WLAN1_PROD (one
Amir Levya59ed3f2017-03-05 17:30:55 +02002919 * at a time). Will set EOT flag for last descriptor Once this send was done
2920 * from transport point-of-view the IPA driver will get notified by the
2921 * supplied callback - ipa_gsi_irq_tx_notify_cb()
Amir Levy9659e592016-10-27 18:08:27 +03002922 *
Amir Levya59ed3f2017-03-05 17:30:55 +02002923 * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback
Amir Levy9659e592016-10-27 18:08:27 +03002924 *
2925 * Returns: 0 on success, negative on failure
2926 */
2927int ipa3_tx_dp_mul(enum ipa_client_type src,
2928 struct ipa_tx_data_desc *data_desc)
2929{
2930 /* The second byte in wlan header holds qmap id */
2931#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
2932 struct ipa_tx_data_desc *entry;
2933 struct ipa3_sys_context *sys;
2934 struct ipa3_desc desc[2];
2935 u32 num_desc, cnt;
2936 int ep_idx;
2937
2938 IPADBG_LOW("Received data desc anchor:%p\n", data_desc);
2939
2940 spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
2941
2942 ep_idx = ipa3_get_ep_mapping(src);
2943 if (unlikely(ep_idx == -1)) {
2944 IPAERR("dest EP does not exist.\n");
2945 goto fail_send;
2946 }
2947 IPADBG_LOW("ep idx:%d\n", ep_idx);
2948 sys = ipa3_ctx->ep[ep_idx].sys;
2949
2950 if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
2951 IPAERR("dest EP not valid.\n");
2952 goto fail_send;
2953 }
2954 sys->ep->wstats.rx_hd_rcvd++;
2955
2956 /* Calculate the number of descriptors */
2957 num_desc = 0;
2958 list_for_each_entry(entry, &data_desc->link, link) {
2959 num_desc++;
2960 }
2961 IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
2962
2963 if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
2964 IPAERR("Insufficient data descriptors available\n");
2965 goto fail_send;
2966 }
2967
2968 /* Assign callback only for last data descriptor */
2969 cnt = 0;
2970 list_for_each_entry(entry, &data_desc->link, link) {
2971 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
2972
2973 IPADBG_LOW("Parsing data desc :%d\n", cnt);
2974 cnt++;
2975 ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
2976 (u8)sys->ep->cfg.meta.qmap_id;
2977
2978 /* the tag field will be populated in ipa3_send() function */
2979 desc[0].opcode =
2980 ipahal_imm_cmd_get_opcode(
2981 IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
2982 desc[0].type = IPA_IMM_CMD_DESC;
2983 desc[0].callback = ipa3_tag_destroy_imm;
2984 desc[1].pyld = entry->pyld_buffer;
2985 desc[1].len = entry->pyld_len;
2986 desc[1].type = IPA_DATA_DESC_SKB;
2987 desc[1].user1 = data_desc;
2988 desc[1].user2 = ep_idx;
2989 IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n",
2990 entry->priv, desc[1].pyld, desc[1].len);
2991
2992 /* In case of last descriptor populate callback */
2993 if (cnt == num_desc) {
2994 IPADBG_LOW("data desc:%p\n", data_desc);
2995 desc[1].callback = ipa3_tx_client_rx_notify_release;
2996 } else {
2997 desc[1].callback = ipa3_tx_client_rx_pkt_status;
2998 }
2999
3000 IPADBG_LOW("calling ipa3_send_one()\n");
3001 if (ipa3_send(sys, 2, desc, true)) {
3002 IPAERR("fail to send skb\n");
3003 sys->ep->wstats.rx_pkt_leak += (cnt-1);
3004 sys->ep->wstats.rx_dp_fail++;
3005 goto fail_send;
3006 }
3007
3008 if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
3009 atomic_dec(&sys->ep->avail_fifo_desc);
3010
3011 sys->ep->wstats.rx_pkts_rcvd++;
3012 IPADBG_LOW("ep=%d fifo desc=%d\n",
3013 ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
3014 }
3015
3016 sys->ep->wstats.rx_hd_processed++;
3017 spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
3018 return 0;
3019
3020fail_send:
3021 spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
3022 return -EFAULT;
3023
3024}
3025
3026void ipa3_free_skb(struct ipa_rx_data *data)
3027{
3028 struct ipa3_rx_pkt_wrapper *rx_pkt;
3029
3030 spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
3031
3032 ipa3_ctx->wc_memb.total_tx_pkts_freed++;
3033 rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
3034
3035 ipa3_skb_recycle(rx_pkt->data.skb);
3036 (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
3037
3038 list_add_tail(&rx_pkt->link,
3039 &ipa3_ctx->wc_memb.wlan_comm_desc_list);
3040 ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
3041
3042 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
3043}
3044
3045/* Functions added to support kernel tests */
3046
3047int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
Amir Levya59ed3f2017-03-05 17:30:55 +02003048 unsigned long *ipa_transport_hdl,
Amir Levy9659e592016-10-27 18:08:27 +03003049 u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
3050{
3051 struct ipa3_ep_context *ep;
3052 int ipa_ep_idx;
3053 int result = -EINVAL;
3054
3055 if (sys_in == NULL || clnt_hdl == NULL) {
3056 IPAERR("NULL args\n");
3057 goto fail_gen;
3058 }
3059
Amir Levya59ed3f2017-03-05 17:30:55 +02003060 if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) {
Amir Levy9659e592016-10-27 18:08:27 +03003061 IPAERR("NULL args\n");
3062 goto fail_gen;
3063 }
3064 if (sys_in->client >= IPA_CLIENT_MAX) {
3065 IPAERR("bad parm client:%d\n", sys_in->client);
3066 goto fail_gen;
3067 }
3068
3069 ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
3070 if (ipa_ep_idx == -1) {
3071 IPAERR("Invalid client :%d\n", sys_in->client);
3072 goto fail_gen;
3073 }
3074
3075 ep = &ipa3_ctx->ep[ipa_ep_idx];
3076 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
3077
3078 if (ep->valid == 1) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003079 if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03003080 IPAERR("EP %d already allocated\n", ipa_ep_idx);
3081 goto fail_and_disable_clocks;
3082 } else {
3083 if (ipa3_cfg_ep_hdr(ipa_ep_idx,
3084 &sys_in->ipa_ep_cfg.hdr)) {
3085 IPAERR("fail to configure hdr prop of EP %d\n",
3086 ipa_ep_idx);
3087 result = -EFAULT;
3088 goto fail_and_disable_clocks;
3089 }
3090 if (ipa3_cfg_ep_cfg(ipa_ep_idx,
3091 &sys_in->ipa_ep_cfg.cfg)) {
3092 IPAERR("fail to configure cfg prop of EP %d\n",
3093 ipa_ep_idx);
3094 result = -EFAULT;
3095 goto fail_and_disable_clocks;
3096 }
3097 IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
3098 sys_in->client, ipa_ep_idx, ep->sys);
3099 ep->client_notify = sys_in->notify;
3100 ep->priv = sys_in->priv;
3101 *clnt_hdl = ipa_ep_idx;
3102 if (!ep->keep_ipa_awake)
3103 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3104
3105 return 0;
3106 }
3107 }
3108
3109 memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
3110
3111 ep->valid = 1;
3112 ep->client = sys_in->client;
3113 ep->client_notify = sys_in->notify;
3114 ep->priv = sys_in->priv;
3115 ep->keep_ipa_awake = true;
3116 if (en_status) {
3117 ep->status.status_en = true;
3118 ep->status.status_ep = ipa_ep_idx;
3119 }
3120
3121 result = ipa3_enable_data_path(ipa_ep_idx);
3122 if (result) {
3123 IPAERR("enable data path failed res=%d clnt=%d.\n",
3124 result, ipa_ep_idx);
3125 goto fail_gen2;
3126 }
3127
3128 if (!ep->skip_ep_cfg) {
3129 if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
3130 IPAERR("fail to configure EP.\n");
3131 goto fail_gen2;
3132 }
3133 if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
3134 IPAERR("fail to configure status of EP.\n");
3135 goto fail_gen2;
3136 }
3137 IPADBG("ep configuration successful\n");
3138 } else {
3139 IPADBG("skipping ep configuration\n");
3140 }
3141
3142 *clnt_hdl = ipa_ep_idx;
3143
3144 *ipa_pipe_num = ipa_ep_idx;
Amir Levya59ed3f2017-03-05 17:30:55 +02003145 *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl;
Amir Levy9659e592016-10-27 18:08:27 +03003146
3147 if (!ep->keep_ipa_awake)
3148 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3149
3150 ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
3151 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
3152 ipa_ep_idx, ep->sys);
3153
3154 return 0;
3155
3156fail_gen2:
3157fail_and_disable_clocks:
3158 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3159fail_gen:
3160 return result;
3161}
3162
3163int ipa3_sys_teardown(u32 clnt_hdl)
3164{
3165 struct ipa3_ep_context *ep;
3166
3167 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3168 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3169 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3170 return -EINVAL;
3171 }
3172
3173 ep = &ipa3_ctx->ep[clnt_hdl];
3174
3175 if (!ep->keep_ipa_awake)
3176 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
3177
3178 ipa3_disable_data_path(clnt_hdl);
3179 ep->valid = 0;
3180
3181 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
3182
3183 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
3184
3185 return 0;
3186}
3187
3188int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
3189 unsigned long gsi_ev_hdl)
3190{
3191 struct ipa3_ep_context *ep;
3192
3193 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3194 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3195 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3196 return -EINVAL;
3197 }
3198
3199 ep = &ipa3_ctx->ep[clnt_hdl];
3200
3201 ep->gsi_chan_hdl = gsi_ch_hdl;
3202 ep->gsi_evt_ring_hdl = gsi_ev_hdl;
3203
3204 return 0;
3205}
3206
3207static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
3208{
3209 switch (notify->evt_id) {
3210 case GSI_EVT_OUT_OF_BUFFERS_ERR:
3211 IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
3212 break;
3213 case GSI_EVT_OUT_OF_RESOURCES_ERR:
3214 IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
3215 break;
3216 case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
3217 IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
3218 break;
3219 case GSI_EVT_EVT_RING_EMPTY_ERR:
3220 IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
3221 break;
3222 default:
3223 IPAERR("Unexpected err evt: %d\n", notify->evt_id);
3224 }
3225}
3226
3227static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
3228{
3229 switch (notify->evt_id) {
3230 case GSI_CHAN_INVALID_TRE_ERR:
3231 IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
3232 break;
3233 case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
3234 IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
3235 break;
3236 case GSI_CHAN_OUT_OF_BUFFERS_ERR:
3237 IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
3238 break;
3239 case GSI_CHAN_OUT_OF_RESOURCES_ERR:
3240 IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
3241 break;
3242 case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
3243 IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
3244 break;
3245 case GSI_CHAN_HWO_1_ERR:
3246 IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
3247 break;
3248 default:
3249 IPAERR("Unexpected err evt: %d\n", notify->evt_id);
3250 }
3251}
3252
3253static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
3254{
3255 struct ipa3_tx_pkt_wrapper *tx_pkt;
3256
3257 IPADBG_LOW("event %d notified\n", notify->evt_id);
3258
3259 switch (notify->evt_id) {
3260 case GSI_CHAN_EVT_EOT:
3261 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
3262 tx_pkt = notify->xfer_user_data;
3263 queue_work(tx_pkt->sys->wq, &tx_pkt->work);
3264 break;
3265 default:
3266 IPAERR("received unexpected event id %d\n", notify->evt_id);
3267 }
3268}
3269
3270static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
3271{
3272 struct ipa3_sys_context *sys;
3273 struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
3274
3275 if (!notify) {
3276 IPAERR("gsi notify is NULL.\n");
3277 return;
3278 }
3279 IPADBG_LOW("event %d notified\n", notify->evt_id);
3280
3281 sys = (struct ipa3_sys_context *)notify->chan_user_data;
3282 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
3283 struct ipa3_rx_pkt_wrapper, link);
3284 rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data;
3285
3286 if (rx_pkt_expected != rx_pkt_rcvd) {
3287 IPAERR("Pkt was not filled in head of rx buffer.\n");
3288 WARN_ON(1);
3289 return;
3290 }
3291 sys->ep->bytes_xfered_valid = true;
3292 sys->ep->bytes_xfered = notify->bytes_xfered;
3293 sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr;
3294
3295 switch (notify->evt_id) {
3296 case GSI_CHAN_EVT_EOT:
3297 case GSI_CHAN_EVT_EOB:
3298 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
3299 if (!atomic_read(&sys->curr_polling_state)) {
3300 /* put the gsi channel into polling mode */
3301 gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
3302 GSI_CHAN_MODE_POLL);
3303 ipa3_inc_acquire_wakelock();
3304 atomic_set(&sys->curr_polling_state, 1);
3305 queue_work(sys->wq, &sys->work);
3306 }
3307 break;
3308 default:
3309 IPAERR("received unexpected event id %d\n", notify->evt_id);
3310 }
3311}
3312
3313static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
3314{
3315 struct ipa3_sys_context *sys;
3316 struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
3317
3318 if (!notify) {
3319 IPAERR("gsi notify is NULL.\n");
3320 return;
3321 }
3322 IPADBG_LOW("event %d notified\n", notify->evt_id);
3323
3324 sys = (struct ipa3_sys_context *)notify->chan_user_data;
3325 if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
3326 IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
3327 return;
3328 }
3329 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
3330 struct ipa3_dma_xfer_wrapper, link);
3331 rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify
3332 ->xfer_user_data;
3333 if (rx_pkt_expected != rx_pkt_rcvd) {
3334 IPAERR("Pkt was not filled in head of rx buffer.\n");
3335 WARN_ON(1);
3336 return;
3337 }
3338
3339 sys->ep->bytes_xfered_valid = true;
3340 sys->ep->bytes_xfered = notify->bytes_xfered;
3341 sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest;
3342
3343 switch (notify->evt_id) {
3344 case GSI_CHAN_EVT_EOT:
3345 if (!atomic_read(&sys->curr_polling_state)) {
3346 /* put the gsi channel into polling mode */
3347 gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
3348 GSI_CHAN_MODE_POLL);
3349 ipa3_inc_acquire_wakelock();
3350 atomic_set(&sys->curr_polling_state, 1);
3351 queue_work(sys->wq, &sys->work);
3352 }
3353 break;
3354 default:
3355 IPAERR("received unexpected event id %d\n", notify->evt_id);
3356 }
3357}
3358
Skylar Changd407e592017-03-30 11:25:30 -07003359int ipa3_alloc_common_event_ring(void)
3360{
3361 struct gsi_evt_ring_props gsi_evt_ring_props;
3362 dma_addr_t evt_dma_addr;
3363 int result;
3364
3365 memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
3366 gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
3367 gsi_evt_ring_props.intr = GSI_INTR_IRQ;
3368 gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
3369
3370 gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
3371
3372 gsi_evt_ring_props.ring_base_vaddr =
3373 dma_alloc_coherent(ipa3_ctx->pdev,
3374 gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
3375 if (!gsi_evt_ring_props.ring_base_vaddr) {
3376 IPAERR("fail to dma alloc %u bytes\n",
3377 gsi_evt_ring_props.ring_len);
3378 return -ENOMEM;
3379 }
3380 gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
3381 gsi_evt_ring_props.int_modt = 0;
3382 gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
3383 gsi_evt_ring_props.rp_update_addr = 0;
3384 gsi_evt_ring_props.exclusive = false;
3385 gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
3386 gsi_evt_ring_props.user_data = NULL;
3387
3388 result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
3389 ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
3390 if (result) {
3391 IPAERR("gsi_alloc_evt_ring failed %d\n", result);
3392 return result;
3393 }
3394 ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
3395
3396 return 0;
3397}
3398
Amir Levy9659e592016-10-27 18:08:27 +03003399static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
3400 struct ipa3_ep_context *ep)
3401{
3402 struct gsi_evt_ring_props gsi_evt_ring_props;
3403 struct gsi_chan_props gsi_channel_props;
3404 union __packed gsi_channel_scratch ch_scratch;
Amir Levy3be373c2017-03-05 16:31:30 +02003405 const struct ipa_gsi_ep_config *gsi_ep_info;
Amir Levy9659e592016-10-27 18:08:27 +03003406 dma_addr_t dma_addr;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003407 dma_addr_t evt_dma_addr;
Amir Levy9659e592016-10-27 18:08:27 +03003408 int result;
3409
3410 if (!ep) {
3411 IPAERR("EP context is empty\n");
3412 return -EINVAL;
3413 }
3414
Amir Levyb7d205e2016-12-19 11:31:08 +02003415 evt_dma_addr = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003416 ep->gsi_evt_ring_hdl = ~0;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003417 memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
Skylar Changd407e592017-03-30 11:25:30 -07003418 if (ep->sys->use_comm_evt_ring) {
3419 if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) {
3420 IPAERR("not enough space in common event ring\n");
3421 IPAERR("available: %d needed: %d\n",
3422 ipa3_ctx->gsi_evt_comm_ring_rem,
3423 2 * in->desc_fifo_sz);
3424 WARN_ON(1);
3425 return -EFAULT;
3426 }
3427 ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz);
3428 ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
3429 } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
Amir Levy9659e592016-10-27 18:08:27 +03003430 IPA_CLIENT_IS_CONS(ep->client)) {
Amir Levy9659e592016-10-27 18:08:27 +03003431 gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
3432 gsi_evt_ring_props.intr = GSI_INTR_IRQ;
3433 gsi_evt_ring_props.re_size =
3434 GSI_EVT_RING_RE_SIZE_16B;
3435
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003436 /*
3437 * GSI ring length is calculated based on the desc_fifo_sz
3438 * which was meant to define the BAM desc fifo. GSI descriptors
3439 * are 16B as opposed to 8B for BAM.
3440 */
Amir Levyb7d205e2016-12-19 11:31:08 +02003441 gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003442
Amir Levy9659e592016-10-27 18:08:27 +03003443 gsi_evt_ring_props.ring_base_vaddr =
Amir Levyb7d205e2016-12-19 11:31:08 +02003444 dma_alloc_coherent(ipa3_ctx->pdev,
3445 gsi_evt_ring_props.ring_len,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003446 &evt_dma_addr, GFP_KERNEL);
3447 if (!gsi_evt_ring_props.ring_base_vaddr) {
3448 IPAERR("fail to dma alloc %u bytes\n",
Amir Levyb7d205e2016-12-19 11:31:08 +02003449 gsi_evt_ring_props.ring_len);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003450 return -ENOMEM;
3451 }
3452 gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
Amir Levy9659e592016-10-27 18:08:27 +03003453
3454 /* copy mem info */
3455 ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
3456 ep->gsi_mem_info.evt_ring_base_addr =
3457 gsi_evt_ring_props.ring_base_addr;
3458 ep->gsi_mem_info.evt_ring_base_vaddr =
3459 gsi_evt_ring_props.ring_base_vaddr;
3460
3461 gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
Skylar Changd407e592017-03-30 11:25:30 -07003462 gsi_evt_ring_props.int_modc = 1;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003463
3464 IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
3465 ep->client,
3466 gsi_evt_ring_props.int_modt,
3467 gsi_evt_ring_props.int_modc);
Amir Levy9659e592016-10-27 18:08:27 +03003468 gsi_evt_ring_props.rp_update_addr = 0;
3469 gsi_evt_ring_props.exclusive = true;
3470 gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
3471 gsi_evt_ring_props.user_data = NULL;
3472
3473 result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
3474 ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
3475 if (result != GSI_STATUS_SUCCESS)
3476 goto fail_alloc_evt_ring;
3477 }
3478
3479 memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
3480 gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
3481 if (IPA_CLIENT_IS_PROD(ep->client)) {
3482 gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
3483 } else {
3484 gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
3485 gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
3486 }
3487
Amir Levy3be373c2017-03-05 16:31:30 +02003488 gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
Amir Levy9659e592016-10-27 18:08:27 +03003489 if (!gsi_ep_info) {
Amir Levy3be373c2017-03-05 16:31:30 +02003490 IPAERR("Failed getting GSI EP info for client=%d\n",
3491 ep->client);
Amir Levy9659e592016-10-27 18:08:27 +03003492 result = -EINVAL;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003493 goto fail_get_gsi_ep_info;
Amir Levy9659e592016-10-27 18:08:27 +03003494 } else
3495 gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
3496
3497 gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
3498 gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
3499
3500 /*
3501 * GSI ring length is calculated based on the desc_fifo_sz which was
3502 * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed
3503 * to 8B for BAM. For PROD pipes there is also an additional descriptor
Ghanim Fodic6b67492017-03-15 14:19:56 +02003504 * for TAG STATUS immediate command. APPS_WAN_PROD pipe is an exception
3505 * as this pipe do not use TAG STATUS for completion. Instead it uses
3506 * event ring based completions.
Amir Levy9659e592016-10-27 18:08:27 +03003507 */
Ghanim Fodic6b67492017-03-15 14:19:56 +02003508 if (ep->client == IPA_CLIENT_APPS_WAN_PROD)
3509 gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
3510 else if (IPA_CLIENT_IS_PROD(ep->client))
Amir Levy9659e592016-10-27 18:08:27 +03003511 gsi_channel_props.ring_len = 4 * in->desc_fifo_sz;
3512 else
3513 gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
3514 gsi_channel_props.ring_base_vaddr =
3515 dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003516 &dma_addr, GFP_KERNEL);
3517 if (!gsi_channel_props.ring_base_vaddr) {
3518 IPAERR("fail to dma alloc %u bytes\n",
3519 gsi_channel_props.ring_len);
3520 goto fail_alloc_channel_ring;
3521 }
Amir Levy9659e592016-10-27 18:08:27 +03003522 gsi_channel_props.ring_base_addr = dma_addr;
3523
3524 /* copy mem info */
3525 ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
3526 ep->gsi_mem_info.chan_ring_base_addr =
3527 gsi_channel_props.ring_base_addr;
3528 ep->gsi_mem_info.chan_ring_base_vaddr =
3529 gsi_channel_props.ring_base_vaddr;
3530
3531 gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
3532 gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
3533 if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
3534 gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
3535 else
3536 gsi_channel_props.low_weight = 1;
3537 gsi_channel_props.chan_user_data = ep->sys;
3538 gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
3539 if (IPA_CLIENT_IS_PROD(ep->client))
3540 gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
3541 else
3542 gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
3543 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
3544 gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
3545 result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
3546 &ep->gsi_chan_hdl);
3547 if (result != GSI_STATUS_SUCCESS)
3548 goto fail_alloc_channel;
3549
3550 memset(&ch_scratch, 0, sizeof(ch_scratch));
3551 ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
3552 GSI_CHAN_RE_SIZE_16B;
3553 ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
3554 result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
3555 if (result != GSI_STATUS_SUCCESS) {
3556 IPAERR("failed to write scratch %d\n", result);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003557 goto fail_write_channel_scratch;
Amir Levy9659e592016-10-27 18:08:27 +03003558 }
3559
3560 result = gsi_start_channel(ep->gsi_chan_hdl);
3561 if (result != GSI_STATUS_SUCCESS)
3562 goto fail_start_channel;
3563 if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
3564 gsi_config_channel_mode(ep->gsi_chan_hdl,
3565 GSI_CHAN_MODE_POLL);
3566 return 0;
3567
3568fail_start_channel:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003569fail_write_channel_scratch:
Amir Levy9659e592016-10-27 18:08:27 +03003570 if (gsi_dealloc_channel(ep->gsi_chan_hdl)
3571 != GSI_STATUS_SUCCESS) {
3572 IPAERR("Failed to dealloc GSI chan.\n");
3573 BUG();
3574 }
3575fail_alloc_channel:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003576 dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
3577 gsi_channel_props.ring_base_vaddr, dma_addr);
3578fail_alloc_channel_ring:
3579fail_get_gsi_ep_info:
Amir Levy9659e592016-10-27 18:08:27 +03003580 if (ep->gsi_evt_ring_hdl != ~0) {
3581 gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
3582 ep->gsi_evt_ring_hdl = ~0;
3583 }
3584fail_alloc_evt_ring:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003585 if (gsi_evt_ring_props.ring_base_vaddr)
Amir Levyb7d205e2016-12-19 11:31:08 +02003586 dma_free_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003587 gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr);
Amir Levy9659e592016-10-27 18:08:27 +03003588 IPAERR("Return with err: %d\n", result);
3589 return result;
3590}
3591
3592static int ipa_populate_tag_field(struct ipa3_desc *desc,
3593 struct ipa3_tx_pkt_wrapper *tx_pkt,
3594 struct ipahal_imm_cmd_pyld **tag_pyld_ret)
3595{
3596 struct ipahal_imm_cmd_pyld *tag_pyld;
3597 struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
3598
3599 /* populate tag field only if it is NULL */
3600 if (desc->pyld == NULL) {
3601 tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
3602 tag_pyld = ipahal_construct_imm_cmd(
3603 IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
3604 if (unlikely(!tag_pyld)) {
3605 IPAERR("Failed to construct ip_packet_tag_status\n");
3606 return -EFAULT;
3607 }
3608 /*
3609 * This is for 32-bit pointer, will need special
3610 * handling if 64-bit pointer is used
3611 */
3612 IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
3613 desc->pyld = tag_pyld->data;
3614 desc->len = tag_pyld->len;
3615 desc->user1 = tag_pyld;
3616
3617 *tag_pyld_ret = tag_pyld;
3618 }
3619 return 0;
3620}
3621
3622static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
3623 struct ipa_mem_buffer *mem_info)
3624{
3625 int ret;
3626 struct gsi_chan_xfer_notify xfer_notify;
3627 struct ipa3_rx_pkt_wrapper *rx_pkt;
3628
3629 if (sys->ep->bytes_xfered_valid) {
3630 mem_info->phys_base = sys->ep->phys_base;
3631 mem_info->size = (u32)sys->ep->bytes_xfered;
3632 sys->ep->bytes_xfered_valid = false;
3633 return GSI_STATUS_SUCCESS;
3634 }
3635
3636 ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
3637 &xfer_notify);
3638 if (ret == GSI_STATUS_POLL_EMPTY)
3639 return ret;
3640 else if (ret != GSI_STATUS_SUCCESS) {
3641 IPAERR("Poll channel err: %d\n", ret);
3642 return ret;
3643 }
3644
3645 rx_pkt = (struct ipa3_rx_pkt_wrapper *)
3646 xfer_notify.xfer_user_data;
3647 mem_info->phys_base = rx_pkt->data.dma_addr;
3648 mem_info->size = xfer_notify.bytes_xfered;
3649
3650 return ret;
3651}
3652
Amir Levy9659e592016-10-27 18:08:27 +03003653/**
3654 * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
3655 * function is exectued in the softirq context
3656 *
3657 * if input budget is zero, the driver switches back to
3658 * interrupt mode
3659 *
3660 * return number of polled packets, on error 0(zero)
3661 */
3662int ipa3_rx_poll(u32 clnt_hdl, int weight)
3663{
3664 struct ipa3_ep_context *ep;
3665 int ret;
3666 int cnt = 0;
3667 unsigned int delay = 1;
3668 struct ipa_mem_buffer mem_info = {0};
3669
3670 IPADBG("\n");
3671 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3672 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3673 IPAERR("bad parm 0x%x\n", clnt_hdl);
3674 return cnt;
3675 }
3676
3677 ep = &ipa3_ctx->ep[clnt_hdl];
3678
3679 while (cnt < weight &&
3680 atomic_read(&ep->sys->curr_polling_state)) {
3681
Amir Levya59ed3f2017-03-05 17:30:55 +02003682 ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
Amir Levy9659e592016-10-27 18:08:27 +03003683 if (ret)
3684 break;
3685
3686 ipa3_wq_rx_common(ep->sys, mem_info.size);
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003687 cnt += IPA_WAN_AGGR_PKT_CNT;
Amir Levy9659e592016-10-27 18:08:27 +03003688 };
3689
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003690 if (cnt == 0 || cnt < weight) {
Amir Levy9659e592016-10-27 18:08:27 +03003691 ep->inactive_cycles++;
3692 ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
3693
3694 if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
3695 ep->switch_to_intr = true;
3696 delay = 0;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003697 } else if (cnt < weight) {
3698 delay = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003699 }
3700 queue_delayed_work(ep->sys->wq,
3701 &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
3702 } else
3703 ep->inactive_cycles = 0;
3704
3705 return cnt;
3706}
3707
3708static unsigned long tag_to_pointer_wa(uint64_t tag)
3709{
3710 return 0xFFFF000000000000 | (unsigned long) tag;
3711}
3712
3713static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
3714{
3715 u16 temp;
3716 /* Add the check but it might have throughput issue */
3717 if (ipa3_is_msm_device()) {
3718 temp = (u16) (~((unsigned long) tx_pkt &
3719 0xFFFF000000000000) >> 48);
3720 if (temp) {
3721 IPAERR("The 16 prefix is not all 1s (%p)\n",
3722 tx_pkt);
3723 BUG();
3724 }
3725 }
3726 return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
3727}
3728
3729/**
3730 * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
3731 *
3732 * A hardware limitation requires to avoid using GSI physical channel 20.
3733 * This function allocates GSI physical channel 20 and holds it to prevent
3734 * others to use it.
3735 *
3736 * Return codes: 0 on success, negative on failure
3737 */
3738int ipa_gsi_ch20_wa(void)
3739{
3740 struct gsi_chan_props gsi_channel_props;
3741 dma_addr_t dma_addr;
3742 int result;
3743 int i;
3744 unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
3745 unsigned long chan_hdl_to_keep;
3746
3747
3748 memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
3749 gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
3750 gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
3751 gsi_channel_props.evt_ring_hdl = ~0;
3752 gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
3753 gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
3754 gsi_channel_props.ring_base_vaddr =
3755 dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
3756 &dma_addr, 0);
3757 gsi_channel_props.ring_base_addr = dma_addr;
3758 gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
3759 gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
3760 gsi_channel_props.low_weight = 1;
3761 gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
3762 gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
3763
3764 /* first allocate channels up to channel 20 */
3765 for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
3766 gsi_channel_props.ch_id = i;
3767 result = gsi_alloc_channel(&gsi_channel_props,
3768 ipa3_ctx->gsi_dev_hdl,
3769 &chan_hdl[i]);
3770 if (result != GSI_STATUS_SUCCESS) {
3771 IPAERR("failed to alloc channel %d err %d\n",
3772 i, result);
3773 return result;
3774 }
3775 }
3776
3777 /* allocate channel 20 */
3778 gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
3779 result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
3780 &chan_hdl_to_keep);
3781 if (result != GSI_STATUS_SUCCESS) {
3782 IPAERR("failed to alloc channel %d err %d\n",
3783 i, result);
3784 return result;
3785 }
3786
3787 /* release all other channels */
3788 for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
3789 result = gsi_dealloc_channel(chan_hdl[i]);
3790 if (result != GSI_STATUS_SUCCESS) {
3791 IPAERR("failed to dealloc channel %d err %d\n",
3792 i, result);
3793 return result;
3794 }
3795 }
3796
3797 /* DMA memory shall not be freed as it is used by channel 20 */
3798 return 0;
3799}
3800
3801/**
3802 * ipa_adjust_ra_buff_base_sz()
3803 *
3804 * Return value: the largest power of two which is smaller
3805 * than the input value
3806 */
3807static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
3808{
3809 aggr_byte_limit += IPA_MTU;
3810 aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
3811 aggr_byte_limit--;
3812 aggr_byte_limit |= aggr_byte_limit >> 1;
3813 aggr_byte_limit |= aggr_byte_limit >> 2;
3814 aggr_byte_limit |= aggr_byte_limit >> 4;
3815 aggr_byte_limit |= aggr_byte_limit >> 8;
3816 aggr_byte_limit |= aggr_byte_limit >> 16;
3817 aggr_byte_limit++;
3818 return aggr_byte_limit >> 1;
3819}