blob: 02c59919d185397d46d3fd67aa0037c9c77e1514 [file] [log] [blame]
Skylar Chang652ee8e2017-02-10 11:40:30 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include <linux/msm_gsi.h>
19#include "ipa_i.h"
20#include "ipa_trace.h"
21#include "ipahal/ipahal.h"
22#include "ipahal/ipahal_fltrt.h"
23
Sunil Paidimarri226cf032016-10-14 13:33:08 -070024#define IPA_WAN_AGGR_PKT_CNT 5
Amir Levy9659e592016-10-27 18:08:27 +030025#define IPA_LAST_DESC_CNT 0xFFFF
26#define POLLING_INACTIVITY_RX 40
27#define POLLING_MIN_SLEEP_RX 1010
28#define POLLING_MAX_SLEEP_RX 1050
29#define POLLING_INACTIVITY_TX 40
30#define POLLING_MIN_SLEEP_TX 400
31#define POLLING_MAX_SLEEP_TX 500
32/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
33#define IPA_MTU 1500
34#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
35#define IPA_GENERIC_AGGR_TIME_LIMIT 1
36#define IPA_GENERIC_AGGR_PKT_LIMIT 0
37
38#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
39#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
40 (X) + NET_SKB_PAD) +\
41 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
42#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
43 (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
44#define IPA_GENERIC_RX_BUFF_LIMIT (\
45 IPA_REAL_GENERIC_RX_BUFF_SZ(\
46 IPA_GENERIC_RX_BUFF_BASE_SZ) -\
47 IPA_GENERIC_RX_BUFF_BASE_SZ)
48
49/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
50#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
51
52#define IPA_RX_BUFF_CLIENT_HEADROOM 256
53
54#define IPA_WLAN_RX_POOL_SZ 100
55#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
56#define IPA_WLAN_RX_BUFF_SZ 2048
57#define IPA_WLAN_COMM_RX_POOL_LOW 100
58#define IPA_WLAN_COMM_RX_POOL_HIGH 900
59
60#define IPA_ODU_RX_BUFF_SZ 2048
61#define IPA_ODU_RX_POOL_SZ 64
62#define IPA_SIZE_DL_CSUM_META_TRAILER 8
63
Amir Levy9659e592016-10-27 18:08:27 +030064#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
Ghanim Fodic6b67492017-03-15 14:19:56 +020065#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
Amir Levy9659e592016-10-27 18:08:27 +030066
67#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
68/* The below virtual channel cannot be used by any entity */
69#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
70
71#define IPA_DEFAULT_SYS_YELLOW_WM 32
Skylar Changd407e592017-03-30 11:25:30 -070072#define IPA_REPL_XFER_THRESH 10
73
74#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
Amir Levy9659e592016-10-27 18:08:27 +030075
76static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
77static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
78static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
79static void ipa3_replenish_rx_work_func(struct work_struct *work);
80static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
81static void ipa3_wq_handle_rx(struct work_struct *work);
Amir Levy9659e592016-10-27 18:08:27 +030082static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
83static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
84 u32 size);
85static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
86 struct ipa3_sys_context *sys);
87static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
88static void ipa3_wq_rx_avail(struct work_struct *work);
89static void ipa3_alloc_wlan_rx_common_cache(u32 size);
90static void ipa3_cleanup_wlan_rx_common_cache(void);
91static void ipa3_wq_repl_rx(struct work_struct *work);
92static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
93 struct ipa_mem_buffer *mem_info);
94static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
95 struct ipa3_ep_context *ep);
96static int ipa_populate_tag_field(struct ipa3_desc *desc,
97 struct ipa3_tx_pkt_wrapper *tx_pkt,
98 struct ipahal_imm_cmd_pyld **tag_pyld_ret);
Amir Levya59ed3f2017-03-05 17:30:55 +020099static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
100 struct ipa_mem_buffer *mem_info);
Amir Levy9659e592016-10-27 18:08:27 +0300101static unsigned long tag_to_pointer_wa(uint64_t tag);
102static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
103
104static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
105
106static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
107 struct ipa3_tx_pkt_wrapper *tx_pkt)
108{
109 struct ipa3_tx_pkt_wrapper *next_pkt;
110 int i, cnt;
111
112 if (unlikely(tx_pkt == NULL)) {
113 IPAERR("tx_pkt is NULL\n");
114 return;
115 }
116
117 cnt = tx_pkt->cnt;
118 IPADBG_LOW("cnt: %d\n", cnt);
119 for (i = 0; i < cnt; i++) {
120 spin_lock_bh(&sys->spinlock);
121 if (unlikely(list_empty(&sys->head_desc_list))) {
122 spin_unlock_bh(&sys->spinlock);
123 return;
124 }
125 next_pkt = list_next_entry(tx_pkt, link);
126 list_del(&tx_pkt->link);
127 sys->len--;
128 spin_unlock_bh(&sys->spinlock);
129 if (!tx_pkt->no_unmap_dma) {
130 if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
131 dma_unmap_single(ipa3_ctx->pdev,
132 tx_pkt->mem.phys_base,
133 tx_pkt->mem.size,
134 DMA_TO_DEVICE);
135 } else {
136 dma_unmap_page(ipa3_ctx->pdev,
137 next_pkt->mem.phys_base,
138 next_pkt->mem.size,
139 DMA_TO_DEVICE);
140 }
141 }
142 if (tx_pkt->callback)
143 tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
144
Amir Levy9659e592016-10-27 18:08:27 +0300145 kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
146 tx_pkt = next_pkt;
147 }
148}
149
150static void ipa3_wq_write_done_status(int src_pipe,
151 struct ipa3_tx_pkt_wrapper *tx_pkt)
152{
153 struct ipa3_sys_context *sys;
154
155 WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
156
157 if (!ipa3_ctx->ep[src_pipe].status.status_en)
158 return;
159
160 sys = ipa3_ctx->ep[src_pipe].sys;
161 if (!sys)
162 return;
163
164 ipa3_wq_write_done_common(sys, tx_pkt);
165}
166
167/**
168 * ipa_write_done() - this function will be (eventually) called when a Tx
169 * operation is complete
170 * * @work: work_struct used by the work queue
171 *
172 * Will be called in deferred context.
173 * - invoke the callback supplied by the client who sent this command
174 * - iterate over all packets and validate that
175 * the order for sent packet is the same as expected
176 * - delete all the tx packet descriptors from the system
177 * pipe context (not needed anymore)
Amir Levy9659e592016-10-27 18:08:27 +0300178 */
179static void ipa3_wq_write_done(struct work_struct *work)
180{
181 struct ipa3_tx_pkt_wrapper *tx_pkt;
182 struct ipa3_sys_context *sys;
Skylar Changd407e592017-03-30 11:25:30 -0700183 struct ipa3_tx_pkt_wrapper *this_pkt;
Amir Levy9659e592016-10-27 18:08:27 +0300184
185 tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
186 sys = tx_pkt->sys;
Skylar Changd407e592017-03-30 11:25:30 -0700187 spin_lock_bh(&sys->spinlock);
188 this_pkt = list_first_entry(&sys->head_desc_list,
189 struct ipa3_tx_pkt_wrapper, link);
190 while (tx_pkt != this_pkt) {
191 spin_unlock_bh(&sys->spinlock);
192 ipa3_wq_write_done_common(sys, this_pkt);
193 spin_lock_bh(&sys->spinlock);
194 this_pkt = list_first_entry(&sys->head_desc_list,
195 struct ipa3_tx_pkt_wrapper, link);
196 }
197 spin_unlock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +0300198 ipa3_wq_write_done_common(sys, tx_pkt);
199}
200
Skylar Changd407e592017-03-30 11:25:30 -0700201
202static void ipa3_send_nop_desc(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +0300203{
Skylar Changd407e592017-03-30 11:25:30 -0700204 struct ipa3_sys_context *sys = container_of(work,
205 struct ipa3_sys_context, work);
206 struct gsi_xfer_elem nop_xfer;
Amir Levy9659e592016-10-27 18:08:27 +0300207 struct ipa3_tx_pkt_wrapper *tx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +0300208
Skylar Changd407e592017-03-30 11:25:30 -0700209 IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
210 tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +0300211 if (!tx_pkt) {
212 IPAERR("failed to alloc tx wrapper\n");
Skylar Changd407e592017-03-30 11:25:30 -0700213 queue_work(sys->wq, &sys->work);
214 return;
Amir Levy9659e592016-10-27 18:08:27 +0300215 }
216
217 INIT_LIST_HEAD(&tx_pkt->link);
Skylar Changd407e592017-03-30 11:25:30 -0700218 tx_pkt->cnt = 1;
Amir Levy9659e592016-10-27 18:08:27 +0300219 INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
Skylar Changd407e592017-03-30 11:25:30 -0700220 tx_pkt->no_unmap_dma = true;
221 tx_pkt->sys = sys;
Amir Levy9659e592016-10-27 18:08:27 +0300222 spin_lock_bh(&sys->spinlock);
223 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
Skylar Changd407e592017-03-30 11:25:30 -0700224 spin_unlock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +0300225
Skylar Changd407e592017-03-30 11:25:30 -0700226 memset(&nop_xfer, 0, sizeof(nop_xfer));
227 nop_xfer.type = GSI_XFER_ELEM_NOP;
228 nop_xfer.flags = GSI_XFER_FLAG_EOT;
229 nop_xfer.xfer_user_data = tx_pkt;
230 if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
231 IPAERR("gsi_queue_xfer for ch:%lu failed\n",
232 sys->ep->gsi_chan_hdl);
233 queue_work(sys->wq, &sys->work);
234 return;
Amir Levy9659e592016-10-27 18:08:27 +0300235 }
Skylar Changd407e592017-03-30 11:25:30 -0700236 sys->len_pending_xfer = 0;
Amir Levy9659e592016-10-27 18:08:27 +0300237
Amir Levy9659e592016-10-27 18:08:27 +0300238}
239
Skylar Changd407e592017-03-30 11:25:30 -0700240
Amir Levy9659e592016-10-27 18:08:27 +0300241/**
242 * ipa3_send() - Send multiple descriptors in one HW transaction
243 * @sys: system pipe context
244 * @num_desc: number of packets
245 * @desc: packets to send (may be immediate command or data)
246 * @in_atomic: whether caller is in atomic context
247 *
Amir Levya59ed3f2017-03-05 17:30:55 +0200248 * This function is used for GPI connection.
Amir Levy9659e592016-10-27 18:08:27 +0300249 * - ipa3_tx_pkt_wrapper will be used for each ipa
250 * descriptor (allocated from wrappers cache)
251 * - The wrapper struct will be configured for each ipa-desc payload and will
252 * contain information which will be later used by the user callbacks
Amir Levy9659e592016-10-27 18:08:27 +0300253 * - Each packet (command or data) that will be sent will also be saved in
254 * ipa3_sys_context for later check that all data was sent
255 *
256 * Return codes: 0: success, -EFAULT: failure
257 */
258int ipa3_send(struct ipa3_sys_context *sys,
259 u32 num_desc,
260 struct ipa3_desc *desc,
261 bool in_atomic)
262{
263 struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
264 struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
265 struct ipa3_tx_pkt_wrapper *next_pkt;
Amir Levy9659e592016-10-27 18:08:27 +0300266 struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300267 int i = 0;
268 int j;
269 int result;
Amir Levy9659e592016-10-27 18:08:27 +0300270 u32 mem_flag = GFP_ATOMIC;
Amir Levy3be373c2017-03-05 16:31:30 +0200271 const struct ipa_gsi_ep_config *gsi_ep_cfg;
Amir Levy9659e592016-10-27 18:08:27 +0300272
273 if (unlikely(!in_atomic))
274 mem_flag = GFP_KERNEL;
275
Amir Levya59ed3f2017-03-05 17:30:55 +0200276 gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
277 if (unlikely(!gsi_ep_cfg)) {
278 IPAERR("failed to get gsi EP config for client=%d\n",
279 sys->ep->client);
280 return -EFAULT;
281 }
282 if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
283 IPAERR("Too many chained descriptors need=%d max=%d\n",
284 num_desc, gsi_ep_cfg->ipa_if_tlv);
285 WARN_ON(1);
286 return -EPERM;
287 }
Amir Levy9659e592016-10-27 18:08:27 +0300288
Amir Levya59ed3f2017-03-05 17:30:55 +0200289 gsi_xfer_elem_array =
290 kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
291 mem_flag);
292 if (!gsi_xfer_elem_array) {
293 IPAERR("Failed to alloc mem for gsi xfer array.\n");
294 return -EFAULT;
Amir Levy9659e592016-10-27 18:08:27 +0300295 }
296
297 spin_lock_bh(&sys->spinlock);
298
299 for (i = 0; i < num_desc; i++) {
Amir Levy9659e592016-10-27 18:08:27 +0300300 tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
301 mem_flag);
302 if (!tx_pkt) {
303 IPAERR("failed to alloc tx wrapper\n");
304 goto failure;
305 }
306
307 INIT_LIST_HEAD(&tx_pkt->link);
308
309 if (i == 0) {
310 tx_pkt_first = tx_pkt;
311 tx_pkt->cnt = num_desc;
312 INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
313 }
314
315 /* populate tag field */
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700316 if (desc[i].is_tag_status) {
Amir Levy9659e592016-10-27 18:08:27 +0300317 if (ipa_populate_tag_field(&desc[i], tx_pkt,
318 &tag_pyld_ret)) {
319 IPAERR("Failed to populate tag field\n");
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530320 goto failure_dma_map;
Amir Levy9659e592016-10-27 18:08:27 +0300321 }
322 }
323
324 tx_pkt->type = desc[i].type;
325
326 if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
327 tx_pkt->mem.base = desc[i].pyld;
328 tx_pkt->mem.size = desc[i].len;
329
330 if (!desc[i].dma_address_valid) {
331 tx_pkt->mem.phys_base =
332 dma_map_single(ipa3_ctx->pdev,
333 tx_pkt->mem.base,
334 tx_pkt->mem.size,
335 DMA_TO_DEVICE);
Amir Levy9659e592016-10-27 18:08:27 +0300336 } else {
337 tx_pkt->mem.phys_base =
338 desc[i].dma_address;
339 tx_pkt->no_unmap_dma = true;
340 }
341 } else {
342 tx_pkt->mem.base = desc[i].frag;
343 tx_pkt->mem.size = desc[i].len;
344
345 if (!desc[i].dma_address_valid) {
346 tx_pkt->mem.phys_base =
347 skb_frag_dma_map(ipa3_ctx->pdev,
348 desc[i].frag,
349 0, tx_pkt->mem.size,
350 DMA_TO_DEVICE);
Amir Levy9659e592016-10-27 18:08:27 +0300351 } else {
352 tx_pkt->mem.phys_base =
353 desc[i].dma_address;
354 tx_pkt->no_unmap_dma = true;
355 }
356 }
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530357 if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
358 IPAERR("failed to do dma map.\n");
359 goto failure_dma_map;
360 }
361
Amir Levy9659e592016-10-27 18:08:27 +0300362 tx_pkt->sys = sys;
363 tx_pkt->callback = desc[i].callback;
364 tx_pkt->user1 = desc[i].user1;
365 tx_pkt->user2 = desc[i].user2;
366
367 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
368
Amir Levya59ed3f2017-03-05 17:30:55 +0200369 gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
Amir Levy9659e592016-10-27 18:08:27 +0300370
Amir Levya59ed3f2017-03-05 17:30:55 +0200371 /*
372 * Special treatment for immediate commands, where
373 * the structure of the descriptor is different
374 */
375 if (desc[i].type == IPA_IMM_CMD_DESC) {
376 gsi_xfer_elem_array[i].len = desc[i].opcode;
377 gsi_xfer_elem_array[i].type =
378 GSI_XFER_ELEM_IMME_CMD;
Amir Levy9659e592016-10-27 18:08:27 +0300379 } else {
Amir Levya59ed3f2017-03-05 17:30:55 +0200380 gsi_xfer_elem_array[i].len = desc[i].len;
381 gsi_xfer_elem_array[i].type =
382 GSI_XFER_ELEM_DATA;
Amir Levy9659e592016-10-27 18:08:27 +0300383 }
Amir Levya59ed3f2017-03-05 17:30:55 +0200384
385 if (i == (num_desc - 1)) {
Skylar Changd407e592017-03-30 11:25:30 -0700386 if (!sys->use_comm_evt_ring) {
387 gsi_xfer_elem_array[i].flags |=
388 GSI_XFER_FLAG_EOT;
Ghanim Fodic6b67492017-03-15 14:19:56 +0200389 gsi_xfer_elem_array[i].flags |=
390 GSI_XFER_FLAG_BEI;
Skylar Changd407e592017-03-30 11:25:30 -0700391 }
Amir Levya59ed3f2017-03-05 17:30:55 +0200392 gsi_xfer_elem_array[i].xfer_user_data =
393 tx_pkt_first;
Skylar Changd407e592017-03-30 11:25:30 -0700394 } else {
395 gsi_xfer_elem_array[i].flags |=
396 GSI_XFER_FLAG_CHAIN;
397 }
Amir Levy9659e592016-10-27 18:08:27 +0300398 }
399
Skylar Changd407e592017-03-30 11:25:30 -0700400 IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
Amir Levya59ed3f2017-03-05 17:30:55 +0200401 result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
402 gsi_xfer_elem_array, true);
403 if (result != GSI_STATUS_SUCCESS) {
404 IPAERR("GSI xfer failed.\n");
405 goto failure;
Amir Levy9659e592016-10-27 18:08:27 +0300406 }
Amir Levya59ed3f2017-03-05 17:30:55 +0200407 kfree(gsi_xfer_elem_array);
Amir Levy9659e592016-10-27 18:08:27 +0300408
409 spin_unlock_bh(&sys->spinlock);
Skylar Changd407e592017-03-30 11:25:30 -0700410
411 /* set the timer for sending the NOP descriptor */
412 if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
413 ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
414
415 IPADBG_LOW("scheduling timer for ch %lu\n",
416 sys->ep->gsi_chan_hdl);
417 hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
418 }
419
Amir Levy9659e592016-10-27 18:08:27 +0300420 return 0;
421
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530422failure_dma_map:
423 kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
424
Amir Levy9659e592016-10-27 18:08:27 +0300425failure:
426 ipahal_destroy_imm_cmd(tag_pyld_ret);
427 tx_pkt = tx_pkt_first;
428 for (j = 0; j < i; j++) {
429 next_pkt = list_next_entry(tx_pkt, link);
430 list_del(&tx_pkt->link);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530431
432 if (!tx_pkt->no_unmap_dma) {
433 if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
434 dma_unmap_single(ipa3_ctx->pdev,
435 tx_pkt->mem.phys_base,
436 tx_pkt->mem.size, DMA_TO_DEVICE);
437 } else {
438 dma_unmap_page(ipa3_ctx->pdev,
439 tx_pkt->mem.phys_base,
440 tx_pkt->mem.size,
441 DMA_TO_DEVICE);
442 }
Amir Levy9659e592016-10-27 18:08:27 +0300443 }
444 kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
445 tx_pkt = next_pkt;
446 }
Amir Levy9659e592016-10-27 18:08:27 +0300447
Amir Levya59ed3f2017-03-05 17:30:55 +0200448 kfree(gsi_xfer_elem_array);
449
Amir Levy9659e592016-10-27 18:08:27 +0300450 spin_unlock_bh(&sys->spinlock);
451 return -EFAULT;
452}
453
454/**
Skylar Changd407e592017-03-30 11:25:30 -0700455 * ipa3_send_one() - Send a single descriptor
456 * @sys: system pipe context
457 * @desc: descriptor to send
458 * @in_atomic: whether caller is in atomic context
459 *
460 * - Allocate tx_packet wrapper
461 * - transfer data to the IPA
462 * - after the transfer was done the SPS will
463 * notify the sending user via ipa_sps_irq_comp_tx()
464 *
465 * Return codes: 0: success, -EFAULT: failure
466 */
467int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
468 bool in_atomic)
469{
470 return ipa3_send(sys, 1, desc, in_atomic);
471}
472
473/**
Amir Levy9659e592016-10-27 18:08:27 +0300474 * ipa3_transport_irq_cmd_ack - callback function which will be called by
Amir Levya59ed3f2017-03-05 17:30:55 +0200475 * the transport driver after an immediate command is complete.
Amir Levy9659e592016-10-27 18:08:27 +0300476 * @user1: pointer to the descriptor of the transfer
477 * @user2:
478 *
479 * Complete the immediate commands completion object, this will release the
480 * thread which waits on this completion object (ipa3_send_cmd())
481 */
482static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
483{
484 struct ipa3_desc *desc = (struct ipa3_desc *)user1;
485
486 if (!desc) {
487 IPAERR("desc is NULL\n");
488 WARN_ON(1);
489 return;
490 }
491 IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
492 complete(&desc->xfer_done);
493}
494
495/**
Skylar Chang19ab2322016-10-28 14:22:59 -0700496 * ipa3_transport_irq_cmd_ack_free - callback function which will be
Amir Levya59ed3f2017-03-05 17:30:55 +0200497 * called by the transport driver after an immediate command is complete.
Skylar Chang19ab2322016-10-28 14:22:59 -0700498 * This function will also free the completion object once it is done.
499 * @tag_comp: pointer to the completion object
500 * @ignored: parameter not used
501 *
502 * Complete the immediate commands completion object, this will release the
503 * thread which waits on this completion object (ipa3_send_cmd())
504 */
505static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
506{
507 struct ipa3_tag_completion *comp = tag_comp;
508
509 if (!comp) {
510 IPAERR("comp is NULL\n");
511 return;
512 }
513
514 complete(&comp->comp);
515 if (atomic_dec_return(&comp->cnt) == 0)
516 kfree(comp);
517}
518
519/**
Amir Levy9659e592016-10-27 18:08:27 +0300520 * ipa3_send_cmd - send immediate commands
521 * @num_desc: number of descriptors within the desc struct
522 * @descr: descriptor structure
523 *
524 * Function will block till command gets ACK from IPA HW, caller needs
525 * to free any resources it allocated after function returns
526 * The callback in ipa3_desc should not be set by the caller
527 * for this function.
528 */
529int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
530{
Skylar Chang19ab2322016-10-28 14:22:59 -0700531 struct ipa3_desc *desc;
532 int i, result = 0;
533 struct ipa3_sys_context *sys;
534 int ep_idx;
535
536 for (i = 0; i < num_desc; i++)
537 IPADBG("sending imm cmd %d\n", descr[i].opcode);
538
539 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
540 if (-1 == ep_idx) {
541 IPAERR("Client %u is not mapped\n",
542 IPA_CLIENT_APPS_CMD_PROD);
543 return -EFAULT;
544 }
545
546 sys = ipa3_ctx->ep[ep_idx].sys;
547 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
548
549 if (num_desc == 1) {
550 init_completion(&descr->xfer_done);
551
552 if (descr->callback || descr->user1)
553 WARN_ON(1);
554
555 descr->callback = ipa3_transport_irq_cmd_ack;
556 descr->user1 = descr;
557 if (ipa3_send_one(sys, descr, true)) {
558 IPAERR("fail to send immediate command\n");
559 result = -EFAULT;
560 goto bail;
561 }
562 wait_for_completion(&descr->xfer_done);
563 } else {
564 desc = &descr[num_desc - 1];
565 init_completion(&desc->xfer_done);
566
567 if (desc->callback || desc->user1)
568 WARN_ON(1);
569
570 desc->callback = ipa3_transport_irq_cmd_ack;
571 desc->user1 = desc;
572 if (ipa3_send(sys, num_desc, descr, true)) {
573 IPAERR("fail to send multiple immediate command set\n");
574 result = -EFAULT;
575 goto bail;
576 }
577 wait_for_completion(&desc->xfer_done);
578 }
579
580bail:
581 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
582 return result;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200583}
584
585/**
586 * ipa3_send_cmd_timeout - send immediate commands with limited time
587 * waiting for ACK from IPA HW
588 * @num_desc: number of descriptors within the desc struct
589 * @descr: descriptor structure
590 * @timeout: millisecond to wait till get ACK from IPA HW
591 *
592 * Function will block till command gets ACK from IPA HW or timeout.
593 * Caller needs to free any resources it allocated after function returns
594 * The callback in ipa3_desc should not be set by the caller
595 * for this function.
596 */
597int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
598{
Amir Levy9659e592016-10-27 18:08:27 +0300599 struct ipa3_desc *desc;
600 int i, result = 0;
601 struct ipa3_sys_context *sys;
602 int ep_idx;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200603 int completed;
Skylar Chang19ab2322016-10-28 14:22:59 -0700604 struct ipa3_tag_completion *comp;
Amir Levy9659e592016-10-27 18:08:27 +0300605
606 for (i = 0; i < num_desc; i++)
607 IPADBG("sending imm cmd %d\n", descr[i].opcode);
608
609 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
610 if (-1 == ep_idx) {
611 IPAERR("Client %u is not mapped\n",
612 IPA_CLIENT_APPS_CMD_PROD);
613 return -EFAULT;
614 }
Skylar Chang19ab2322016-10-28 14:22:59 -0700615
616 comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
617 if (!comp) {
618 IPAERR("no mem\n");
619 return -ENOMEM;
620 }
621 init_completion(&comp->comp);
622
623 /* completion needs to be released from both here and in ack callback */
624 atomic_set(&comp->cnt, 2);
625
Amir Levy9659e592016-10-27 18:08:27 +0300626 sys = ipa3_ctx->ep[ep_idx].sys;
627 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
628
629 if (num_desc == 1) {
Amir Levy9659e592016-10-27 18:08:27 +0300630 if (descr->callback || descr->user1)
631 WARN_ON(1);
632
Skylar Chang19ab2322016-10-28 14:22:59 -0700633 descr->callback = ipa3_transport_irq_cmd_ack_free;
634 descr->user1 = comp;
Amir Levy9659e592016-10-27 18:08:27 +0300635 if (ipa3_send_one(sys, descr, true)) {
636 IPAERR("fail to send immediate command\n");
Skylar Chang19ab2322016-10-28 14:22:59 -0700637 kfree(comp);
Amir Levy9659e592016-10-27 18:08:27 +0300638 result = -EFAULT;
639 goto bail;
640 }
Amir Levy9659e592016-10-27 18:08:27 +0300641 } else {
642 desc = &descr[num_desc - 1];
Amir Levy9659e592016-10-27 18:08:27 +0300643
644 if (desc->callback || desc->user1)
645 WARN_ON(1);
646
Skylar Chang19ab2322016-10-28 14:22:59 -0700647 desc->callback = ipa3_transport_irq_cmd_ack_free;
648 desc->user1 = comp;
Amir Levy9659e592016-10-27 18:08:27 +0300649 if (ipa3_send(sys, num_desc, descr, true)) {
650 IPAERR("fail to send multiple immediate command set\n");
Skylar Chang19ab2322016-10-28 14:22:59 -0700651 kfree(comp);
Amir Levy9659e592016-10-27 18:08:27 +0300652 result = -EFAULT;
653 goto bail;
654 }
Amir Levy9659e592016-10-27 18:08:27 +0300655 }
656
Skylar Chang19ab2322016-10-28 14:22:59 -0700657 completed = wait_for_completion_timeout(
658 &comp->comp, msecs_to_jiffies(timeout));
659 if (!completed)
660 IPADBG("timeout waiting for imm-cmd ACK\n");
661
662 if (atomic_dec_return(&comp->cnt) == 0)
663 kfree(comp);
664
Amir Levy9659e592016-10-27 18:08:27 +0300665bail:
666 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
667 return result;
668}
669
670/**
Amir Levy9659e592016-10-27 18:08:27 +0300671 * ipa3_handle_rx_core() - The core functionality of packet reception. This
672 * function is read from multiple code paths.
673 *
674 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
675 * endpoint. The function runs as long as there are packets in the pipe.
676 * For each packet:
677 * - Disconnect the packet from the system pipe linked list
678 * - Unmap the packets skb, make it non DMAable
679 * - Free the packet from the cache
680 * - Prepare a proper skb
681 * - Call the endpoints notify function, passing the skb in the parameters
682 * - Replenish the rx cache
683 */
684static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
685 bool in_poll_state)
686{
Amir Levya59ed3f2017-03-05 17:30:55 +0200687 int ret;
688 int cnt = 0;
689 struct ipa_mem_buffer mem_info = { 0 };
Amir Levy9659e592016-10-27 18:08:27 +0300690
Amir Levya59ed3f2017-03-05 17:30:55 +0200691 while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
692 !atomic_read(&sys->curr_polling_state))) {
693 if (cnt && !process_all)
694 break;
Amir Levy9659e592016-10-27 18:08:27 +0300695
Amir Levya59ed3f2017-03-05 17:30:55 +0200696 ret = ipa_poll_gsi_pkt(sys, &mem_info);
697 if (ret)
698 break;
699
700 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
701 ipa3_dma_memcpy_notify(sys, &mem_info);
702 else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
703 ipa3_wlan_wq_rx_common(sys, mem_info.size);
704 else
705 ipa3_wq_rx_common(sys, mem_info.size);
706
707 ++cnt;
708 }
Amir Levy9659e592016-10-27 18:08:27 +0300709 return cnt;
710}
711
712/**
713 * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
714 */
715static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
716{
717 int ret;
718
Amir Levya59ed3f2017-03-05 17:30:55 +0200719 if (!atomic_read(&sys->curr_polling_state)) {
720 IPAERR("already in intr mode\n");
721 goto fail;
722 }
723 atomic_set(&sys->curr_polling_state, 0);
724 ipa3_dec_release_wakelock();
725 ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
726 GSI_CHAN_MODE_CALLBACK);
727 if (ret != GSI_STATUS_SUCCESS) {
728 IPAERR("Failed to switch to intr mode.\n");
729 goto fail;
Amir Levy9659e592016-10-27 18:08:27 +0300730 }
731 return;
732
733fail:
734 queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
735 msecs_to_jiffies(1));
736}
737
738/**
Amir Levy9659e592016-10-27 18:08:27 +0300739 * ipa3_handle_rx() - handle packet reception. This function is executed in the
740 * context of a work queue.
741 * @work: work struct needed by the work queue
742 *
743 * ipa3_handle_rx_core() is run in polling mode. After all packets has been
744 * received, the driver switches back to interrupt mode.
745 */
746static void ipa3_handle_rx(struct ipa3_sys_context *sys)
747{
748 int inactive_cycles = 0;
749 int cnt;
750
751 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
752 do {
753 cnt = ipa3_handle_rx_core(sys, true, true);
Skylar Changd407e592017-03-30 11:25:30 -0700754 if (cnt == 0)
Amir Levy9659e592016-10-27 18:08:27 +0300755 inactive_cycles++;
Skylar Changd407e592017-03-30 11:25:30 -0700756 else
Amir Levy9659e592016-10-27 18:08:27 +0300757 inactive_cycles = 0;
Skylar Changd407e592017-03-30 11:25:30 -0700758
759 trace_idle_sleep_enter3(sys->ep->client);
760 usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
761 trace_idle_sleep_exit3(sys->ep->client);
Skylar Chang472fb0f2017-07-06 15:47:12 -0700762
763 /*
764 * if pipe is out of buffers there is no point polling for
765 * completed descs; release the worker so delayed work can
766 * run in a timely manner
767 */
768 if (sys->len - sys->len_pending_xfer == 0)
769 break;
770
Amir Levy9659e592016-10-27 18:08:27 +0300771 } while (inactive_cycles <= POLLING_INACTIVITY_RX);
772
773 trace_poll_to_intr3(sys->ep->client);
774 ipa3_rx_switch_to_intr_mode(sys);
775 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
776}
777
778static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
779{
780 struct delayed_work *dwork;
781 struct ipa3_sys_context *sys;
782
783 dwork = container_of(work, struct delayed_work, work);
784 sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
785
786 if (sys->ep->napi_enabled) {
Skylar Chang42339a52017-07-19 17:58:45 -0700787 /* interrupt mode is done in ipa3_rx_poll context */
788 ipa_assert();
Amir Levy9659e592016-10-27 18:08:27 +0300789 } else
790 ipa3_handle_rx(sys);
791}
792
Skylar Changd407e592017-03-30 11:25:30 -0700793enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
794{
795 struct ipa3_sys_context *sys = container_of(param,
796 struct ipa3_sys_context, db_timer);
797
798 queue_work(sys->wq, &sys->work);
799 return HRTIMER_NORESTART;
800}
801
Amir Levy9659e592016-10-27 18:08:27 +0300802/**
Amir Levya59ed3f2017-03-05 17:30:55 +0200803 * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
Amir Levy9659e592016-10-27 18:08:27 +0300804 * IPA EP configuration
Amir Levya59ed3f2017-03-05 17:30:55 +0200805 * @sys_in: [in] input needed to setup the pipe and configure EP
Amir Levy9659e592016-10-27 18:08:27 +0300806 * @clnt_hdl: [out] client handle
807 *
808 * - configure the end-point registers with the supplied
809 * parameters from the user.
Amir Levya59ed3f2017-03-05 17:30:55 +0200810 * - Creates a GPI connection with IPA.
Amir Levy9659e592016-10-27 18:08:27 +0300811 * - allocate descriptor FIFO
Amir Levy9659e592016-10-27 18:08:27 +0300812 *
813 * Returns: 0 on success, negative on failure
814 */
815int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
816{
817 struct ipa3_ep_context *ep;
818 int ipa_ep_idx;
819 int result = -EINVAL;
Amir Levy9659e592016-10-27 18:08:27 +0300820 char buff[IPA_RESOURCE_NAME_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300821
822 if (sys_in == NULL || clnt_hdl == NULL) {
823 IPAERR("NULL args\n");
824 goto fail_gen;
825 }
826
827 if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
828 IPAERR("bad parm client:%d fifo_sz:%d\n",
829 sys_in->client, sys_in->desc_fifo_sz);
830 goto fail_gen;
831 }
832
833 ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
834 if (ipa_ep_idx == -1) {
835 IPAERR("Invalid client.\n");
836 goto fail_gen;
837 }
838
839 ep = &ipa3_ctx->ep[ipa_ep_idx];
Amir Levy9659e592016-10-27 18:08:27 +0300840 if (ep->valid == 1) {
Ghanim Fodic6b67492017-03-15 14:19:56 +0200841 IPAERR("EP %d already allocated.\n", ipa_ep_idx);
842 goto fail_gen;
Amir Levy9659e592016-10-27 18:08:27 +0300843 }
844
Ghanim Fodic6b67492017-03-15 14:19:56 +0200845 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
Amir Levy9659e592016-10-27 18:08:27 +0300846 memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
847
848 if (!ep->sys) {
849 ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
850 if (!ep->sys) {
851 IPAERR("failed to sys ctx for client %d\n",
852 sys_in->client);
853 result = -ENOMEM;
854 goto fail_and_disable_clocks;
855 }
856
857 ep->sys->ep = ep;
858 snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
859 sys_in->client);
860 ep->sys->wq = alloc_workqueue(buff,
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -0800861 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
862
Amir Levy9659e592016-10-27 18:08:27 +0300863 if (!ep->sys->wq) {
864 IPAERR("failed to create wq for client %d\n",
865 sys_in->client);
866 result = -EFAULT;
867 goto fail_wq;
868 }
869
870 snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
871 sys_in->client);
872 ep->sys->repl_wq = alloc_workqueue(buff,
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -0800873 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
Amir Levy9659e592016-10-27 18:08:27 +0300874 if (!ep->sys->repl_wq) {
875 IPAERR("failed to create rep wq for client %d\n",
876 sys_in->client);
877 result = -EFAULT;
878 goto fail_wq2;
879 }
880
881 INIT_LIST_HEAD(&ep->sys->head_desc_list);
882 INIT_LIST_HEAD(&ep->sys->rcycl_list);
883 spin_lock_init(&ep->sys->spinlock);
Skylar Changd407e592017-03-30 11:25:30 -0700884 hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
885 HRTIMER_MODE_REL);
886 ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
Amir Levy9659e592016-10-27 18:08:27 +0300887 } else {
888 memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
889 }
890
891 ep->skip_ep_cfg = sys_in->skip_ep_cfg;
892 if (ipa3_assign_policy(sys_in, ep->sys)) {
893 IPAERR("failed to sys ctx for client %d\n", sys_in->client);
894 result = -ENOMEM;
895 goto fail_gen2;
896 }
897
898 ep->valid = 1;
899 ep->client = sys_in->client;
900 ep->client_notify = sys_in->notify;
901 ep->napi_enabled = sys_in->napi_enabled;
902 ep->priv = sys_in->priv;
903 ep->keep_ipa_awake = sys_in->keep_ipa_awake;
904 atomic_set(&ep->avail_fifo_desc,
Amir Levya59ed3f2017-03-05 17:30:55 +0200905 ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
Amir Levy9659e592016-10-27 18:08:27 +0300906
907 if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
908 ep->sys->status_stat == NULL) {
909 ep->sys->status_stat =
910 kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
911 if (!ep->sys->status_stat) {
912 IPAERR("no memory\n");
913 goto fail_gen2;
914 }
915 }
916
Amir Levy9659e592016-10-27 18:08:27 +0300917 if (!ep->skip_ep_cfg) {
918 if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
919 IPAERR("fail to configure EP.\n");
920 goto fail_gen2;
921 }
922 if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
923 IPAERR("fail to configure status of EP.\n");
924 goto fail_gen2;
925 }
Ghanim Fodic6b67492017-03-15 14:19:56 +0200926 IPADBG("ep %d configuration successful\n", ipa_ep_idx);
Amir Levy9659e592016-10-27 18:08:27 +0300927 } else {
Ghanim Fodic6b67492017-03-15 14:19:56 +0200928 IPADBG("skipping ep %d configuration\n", ipa_ep_idx);
Amir Levy9659e592016-10-27 18:08:27 +0300929 }
930
Amir Levya59ed3f2017-03-05 17:30:55 +0200931 result = ipa_gsi_setup_channel(sys_in, ep);
932 if (result) {
933 IPAERR("Failed to setup GSI channel\n");
934 goto fail_gen2;
935 }
Amir Levy9659e592016-10-27 18:08:27 +0300936
937 *clnt_hdl = ipa_ep_idx;
938
939 if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
940 ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
941 ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
942 sizeof(void *), GFP_KERNEL);
943 if (!ep->sys->repl.cache) {
944 IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
945 ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
946 ep->sys->repl.capacity = 0;
947 } else {
948 atomic_set(&ep->sys->repl.head_idx, 0);
949 atomic_set(&ep->sys->repl.tail_idx, 0);
950 ipa3_wq_repl_rx(&ep->sys->repl_work);
951 }
952 }
953
954 if (IPA_CLIENT_IS_CONS(sys_in->client))
955 ipa3_replenish_rx_cache(ep->sys);
956
957 if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
958 ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
959 atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
960 }
961
962 ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
963 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
964 if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
Ghanim Fodic6b67492017-03-15 14:19:56 +0200965 sys_in->client == IPA_CLIENT_APPS_WAN_PROD)
Amir Levy9659e592016-10-27 18:08:27 +0300966 IPADBG("modem cfg emb pipe flt\n");
967 else
968 ipa3_install_dflt_flt_rules(ipa_ep_idx);
969 }
970
Skylar Chang652ee8e2017-02-10 11:40:30 -0800971 result = ipa3_enable_data_path(ipa_ep_idx);
972 if (result) {
973 IPAERR("enable data path failed res=%d ep=%d.\n", result,
974 ipa_ep_idx);
975 goto fail_gen2;
976 }
977
Amir Levy9659e592016-10-27 18:08:27 +0300978 if (!ep->keep_ipa_awake)
979 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
980
981 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
982 ipa_ep_idx, ep->sys);
983
984 return 0;
985
Amir Levy9659e592016-10-27 18:08:27 +0300986fail_gen2:
987 destroy_workqueue(ep->sys->repl_wq);
988fail_wq2:
989 destroy_workqueue(ep->sys->wq);
990fail_wq:
991 kfree(ep->sys);
992 memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
993fail_and_disable_clocks:
994 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
995fail_gen:
996 return result;
997}
998
999/**
Amir Levya59ed3f2017-03-05 17:30:55 +02001000 * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
Amir Levy9659e592016-10-27 18:08:27 +03001001 * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
1002 *
1003 * Returns: 0 on success, negative on failure
1004 */
1005int ipa3_teardown_sys_pipe(u32 clnt_hdl)
1006{
1007 struct ipa3_ep_context *ep;
1008 int empty;
1009 int result;
Skylar Chang10488552017-05-02 11:02:58 -07001010 int i;
Amir Levy9659e592016-10-27 18:08:27 +03001011
1012 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1013 ipa3_ctx->ep[clnt_hdl].valid == 0) {
1014 IPAERR("bad parm.\n");
1015 return -EINVAL;
1016 }
1017
1018 ep = &ipa3_ctx->ep[clnt_hdl];
1019
1020 if (!ep->keep_ipa_awake)
1021 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1022
1023 ipa3_disable_data_path(clnt_hdl);
1024 if (ep->napi_enabled) {
Amir Levy9659e592016-10-27 18:08:27 +03001025 do {
1026 usleep_range(95, 105);
1027 } while (atomic_read(&ep->sys->curr_polling_state));
1028 }
1029
1030 if (IPA_CLIENT_IS_PROD(ep->client)) {
1031 do {
1032 spin_lock_bh(&ep->sys->spinlock);
1033 empty = list_empty(&ep->sys->head_desc_list);
1034 spin_unlock_bh(&ep->sys->spinlock);
1035 if (!empty)
1036 usleep_range(95, 105);
1037 else
1038 break;
1039 } while (1);
1040 }
1041
1042 if (IPA_CLIENT_IS_CONS(ep->client))
1043 cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
1044 flush_workqueue(ep->sys->wq);
Skylar Chang10488552017-05-02 11:02:58 -07001045 /* channel stop might fail on timeout if IPA is busy */
1046 for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
1047 result = ipa3_stop_gsi_channel(clnt_hdl);
1048 if (result == GSI_STATUS_SUCCESS)
1049 break;
1050
1051 if (result != -GSI_STATUS_AGAIN &&
1052 result != -GSI_STATUS_TIMED_OUT)
1053 break;
1054 }
1055
Amir Levya59ed3f2017-03-05 17:30:55 +02001056 if (result != GSI_STATUS_SUCCESS) {
1057 IPAERR("GSI stop chan err: %d.\n", result);
1058 ipa_assert();
1059 return result;
1060 }
Skylar Chang214b6992017-05-09 12:07:20 -07001061 result = ipa3_reset_gsi_channel(clnt_hdl);
Amir Levya59ed3f2017-03-05 17:30:55 +02001062 if (result != GSI_STATUS_SUCCESS) {
1063 IPAERR("Failed to reset chan: %d.\n", result);
1064 ipa_assert();
1065 return result;
1066 }
1067 dma_free_coherent(ipa3_ctx->pdev,
1068 ep->gsi_mem_info.chan_ring_len,
1069 ep->gsi_mem_info.chan_ring_base_vaddr,
1070 ep->gsi_mem_info.chan_ring_base_addr);
1071 result = gsi_dealloc_channel(ep->gsi_chan_hdl);
1072 if (result != GSI_STATUS_SUCCESS) {
1073 IPAERR("Failed to dealloc chan: %d.\n", result);
1074 ipa_assert();
1075 return result;
1076 }
1077
1078 /* free event ring only when it is present */
Skylar Changd407e592017-03-30 11:25:30 -07001079 if (ep->sys->use_comm_evt_ring) {
1080 ipa3_ctx->gsi_evt_comm_ring_rem +=
1081 ep->gsi_mem_info.chan_ring_len;
1082 } else if (ep->gsi_evt_ring_hdl != ~0) {
Amir Levya59ed3f2017-03-05 17:30:55 +02001083 result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03001084 if (result != GSI_STATUS_SUCCESS) {
Amir Levya59ed3f2017-03-05 17:30:55 +02001085 IPAERR("Failed to reset evt ring: %d.\n",
1086 result);
Amir Levy9659e592016-10-27 18:08:27 +03001087 BUG();
1088 return result;
1089 }
1090 dma_free_coherent(ipa3_ctx->pdev,
Amir Levya59ed3f2017-03-05 17:30:55 +02001091 ep->gsi_mem_info.evt_ring_len,
1092 ep->gsi_mem_info.evt_ring_base_vaddr,
1093 ep->gsi_mem_info.evt_ring_base_addr);
1094 result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03001095 if (result != GSI_STATUS_SUCCESS) {
Amir Levya59ed3f2017-03-05 17:30:55 +02001096 IPAERR("Failed to dealloc evt ring: %d.\n",
1097 result);
Amir Levy9659e592016-10-27 18:08:27 +03001098 BUG();
1099 return result;
1100 }
Amir Levy9659e592016-10-27 18:08:27 +03001101 }
1102 if (ep->sys->repl_wq)
1103 flush_workqueue(ep->sys->repl_wq);
1104 if (IPA_CLIENT_IS_CONS(ep->client))
1105 ipa3_cleanup_rx(ep->sys);
1106
1107 if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
1108 if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
Ghanim Fodic6b67492017-03-15 14:19:56 +02001109 ep->client == IPA_CLIENT_APPS_WAN_PROD)
Amir Levy9659e592016-10-27 18:08:27 +03001110 IPADBG("modem cfg emb pipe flt\n");
1111 else
1112 ipa3_delete_dflt_flt_rules(clnt_hdl);
1113 }
1114
1115 if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
1116 atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
1117
1118 memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
1119
1120 if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
1121 ipa3_cleanup_wlan_rx_common_cache();
1122
1123 ep->valid = 0;
1124 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1125
1126 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
1127
1128 return 0;
1129}
1130
1131/**
1132 * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
1133 * user supplied callback function to release the skb, or release it on
1134 * its own if no callback function was supplied.
1135 * @user1
1136 * @user2
1137 *
1138 * This notified callback is for the destination client.
Amir Levy9659e592016-10-27 18:08:27 +03001139 */
1140static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
1141{
1142 struct sk_buff *skb = (struct sk_buff *)user1;
1143 int ep_idx = user2;
1144
1145 IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx);
1146
1147 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
1148
1149 if (ipa3_ctx->ep[ep_idx].client_notify)
1150 ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
1151 IPA_WRITE_DONE, (unsigned long)skb);
1152 else
1153 dev_kfree_skb_any(skb);
1154}
1155
Skylar Changcd3902d2017-03-27 18:08:27 -07001156void ipa3_tx_cmd_comp(void *user1, int user2)
Amir Levy9659e592016-10-27 18:08:27 +03001157{
1158 ipahal_destroy_imm_cmd(user1);
1159}
1160
1161/**
1162 * ipa3_tx_dp() - Data-path tx handler
1163 * @dst: [in] which IPA destination to route tx packets to
1164 * @skb: [in] the packet to send
1165 * @metadata: [in] TX packet meta-data
1166 *
1167 * Data-path tx handler, this is used for both SW data-path which by-passes most
1168 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
1169 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
1170 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
1171 * is an error. For errors, client needs to free the skb as needed. For success,
1172 * IPA driver will later invoke client callback if one was supplied. That
1173 * callback should free the skb. If no callback supplied, IPA driver will free
1174 * the skb internally
1175 *
1176 * The function will use two descriptors for this send command
1177 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
1178 * the first descriptor will be used to inform the IPA hardware that
1179 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
Amir Levya59ed3f2017-03-05 17:30:55 +02001180 * Once this send was done from transport point-of-view the IPA driver will
1181 * get notified by the supplied callback.
Amir Levy9659e592016-10-27 18:08:27 +03001182 *
1183 * Returns: 0 on success, negative on failure
1184 */
1185int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
1186 struct ipa_tx_meta *meta)
1187{
1188 struct ipa3_desc *desc;
1189 struct ipa3_desc _desc[3];
1190 int dst_ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03001191 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
1192 struct ipa3_sys_context *sys;
1193 int src_ep_idx;
1194 int num_frags, f;
Amir Levy3be373c2017-03-05 16:31:30 +02001195 const struct ipa_gsi_ep_config *gsi_ep;
Ghanim Fodic6b67492017-03-15 14:19:56 +02001196 int data_idx;
Amir Levy9659e592016-10-27 18:08:27 +03001197
1198 if (unlikely(!ipa3_ctx)) {
1199 IPAERR("IPA3 driver was not initialized\n");
1200 return -EINVAL;
1201 }
1202
1203 if (skb->len == 0) {
1204 IPAERR("packet size is 0\n");
1205 return -EINVAL;
1206 }
1207
Amir Levy9659e592016-10-27 18:08:27 +03001208 /*
1209 * USB_CONS: PKT_INIT ep_idx = dst pipe
1210 * Q6_CONS: PKT_INIT ep_idx = sender pipe
1211 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
1212 *
1213 * LAN TX: all PKT_INIT
1214 * WAN TX: PKT_INIT (cmd) + HW (data)
1215 *
1216 */
1217 if (IPA_CLIENT_IS_CONS(dst)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001218 src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
Amir Levy9659e592016-10-27 18:08:27 +03001219 if (-1 == src_ep_idx) {
1220 IPAERR("Client %u is not mapped\n",
Ghanim Fodic6b67492017-03-15 14:19:56 +02001221 IPA_CLIENT_APPS_LAN_PROD);
Amir Levy9659e592016-10-27 18:08:27 +03001222 goto fail_gen;
1223 }
1224 dst_ep_idx = ipa3_get_ep_mapping(dst);
1225 } else {
1226 src_ep_idx = ipa3_get_ep_mapping(dst);
1227 if (-1 == src_ep_idx) {
1228 IPAERR("Client %u is not mapped\n", dst);
1229 goto fail_gen;
1230 }
1231 if (meta && meta->pkt_init_dst_ep_valid)
1232 dst_ep_idx = meta->pkt_init_dst_ep;
1233 else
1234 dst_ep_idx = -1;
1235 }
1236
1237 sys = ipa3_ctx->ep[src_ep_idx].sys;
1238
1239 if (!sys->ep->valid) {
1240 IPAERR("pipe not valid\n");
1241 goto fail_gen;
1242 }
1243
Skylar Chang6adfc142016-11-08 09:56:24 -08001244 num_frags = skb_shinfo(skb)->nr_frags;
1245 /*
1246 * make sure TLV FIFO supports the needed frags.
1247 * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
1248 * 1 descriptor needed for the linear portion of skb.
1249 */
Amir Levy3be373c2017-03-05 16:31:30 +02001250 gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
Skylar Chang6adfc142016-11-08 09:56:24 -08001251 if (gsi_ep && (num_frags + 3 > gsi_ep->ipa_if_tlv)) {
1252 if (skb_linearize(skb)) {
1253 IPAERR("Failed to linear skb with %d frags\n",
1254 num_frags);
1255 goto fail_gen;
1256 }
1257 num_frags = 0;
1258 }
1259 if (num_frags) {
1260 /* 1 desc for tag to resolve status out-of-order issue;
1261 * 1 desc is needed for the linear portion of skb;
1262 * 1 desc may be needed for the PACKET_INIT;
1263 * 1 desc for each frag
1264 */
1265 desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
1266 if (!desc) {
1267 IPAERR("failed to alloc desc array\n");
1268 goto fail_gen;
1269 }
1270 } else {
1271 memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
1272 desc = &_desc[0];
1273 }
1274
Amir Levy9659e592016-10-27 18:08:27 +03001275 if (dst_ep_idx != -1) {
1276 /* SW data path */
Skylar Changd407e592017-03-30 11:25:30 -07001277 data_idx = 0;
1278 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
1279 /*
1280 * For non-interrupt mode channel (where there is no
1281 * event ring) TAG STATUS are used for completion
1282 * notification. IPA will generate a status packet with
1283 * tag info as a result of the TAG STATUS command.
1284 */
Michael Adisumartab5d170f2017-05-17 14:34:11 -07001285 desc[data_idx].is_tag_status = true;
Skylar Changd407e592017-03-30 11:25:30 -07001286 data_idx++;
1287 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07001288 desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode;
Skylar Changcd3902d2017-03-27 18:08:27 -07001289 desc[data_idx].dma_address_valid = true;
1290 desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
Skylar Changd407e592017-03-30 11:25:30 -07001291 desc[data_idx].type = IPA_IMM_CMD_DESC;
Skylar Changcd3902d2017-03-27 18:08:27 -07001292 desc[data_idx].callback = NULL;
Skylar Changd407e592017-03-30 11:25:30 -07001293 data_idx++;
1294 desc[data_idx].pyld = skb->data;
1295 desc[data_idx].len = skb_headlen(skb);
1296 desc[data_idx].type = IPA_DATA_DESC_SKB;
1297 desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
1298 desc[data_idx].user1 = skb;
1299 desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
Amir Levy9659e592016-10-27 18:08:27 +03001300 meta->pkt_init_dst_ep_remote) ?
1301 src_ep_idx :
1302 dst_ep_idx;
1303 if (meta && meta->dma_address_valid) {
Skylar Changd407e592017-03-30 11:25:30 -07001304 desc[data_idx].dma_address_valid = true;
1305 desc[data_idx].dma_address = meta->dma_address;
Amir Levy9659e592016-10-27 18:08:27 +03001306 }
Skylar Changd407e592017-03-30 11:25:30 -07001307 data_idx++;
Amir Levy9659e592016-10-27 18:08:27 +03001308
1309 for (f = 0; f < num_frags; f++) {
Skylar Changd407e592017-03-30 11:25:30 -07001310 desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
1311 desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
1312 desc[data_idx + f].len =
1313 skb_frag_size(desc[data_idx + f].frag);
Amir Levy9659e592016-10-27 18:08:27 +03001314 }
1315 /* don't free skb till frag mappings are released */
1316 if (num_frags) {
Skylar Changd407e592017-03-30 11:25:30 -07001317 desc[data_idx + f - 1].callback = desc[2].callback;
1318 desc[data_idx + f - 1].user1 = desc[2].user1;
1319 desc[data_idx + f - 1].user2 = desc[2].user2;
1320 desc[data_idx - 1].callback = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03001321 }
1322
Skylar Changd407e592017-03-30 11:25:30 -07001323 if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001324 IPAERR("fail to send skb %p num_frags %u SWP\n",
1325 skb, num_frags);
1326 goto fail_send;
1327 }
1328 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
1329 } else {
1330 /* HW data path */
Ghanim Fodic6b67492017-03-15 14:19:56 +02001331 data_idx = 0;
1332 if (sys->policy == IPA_POLICY_NOINTR_MODE) {
1333 /*
1334 * For non-interrupt mode channel (where there is no
1335 * event ring) TAG STATUS are used for completion
1336 * notification. IPA will generate a status packet with
1337 * tag info as a result of the TAG STATUS command.
1338 */
Michael Adisumartab5d170f2017-05-17 14:34:11 -07001339 desc[data_idx].is_tag_status = true;
Ghanim Fodic6b67492017-03-15 14:19:56 +02001340 data_idx++;
1341 }
1342 desc[data_idx].pyld = skb->data;
1343 desc[data_idx].len = skb_headlen(skb);
1344 desc[data_idx].type = IPA_DATA_DESC_SKB;
1345 desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
1346 desc[data_idx].user1 = skb;
1347 desc[data_idx].user2 = src_ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03001348
1349 if (meta && meta->dma_address_valid) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001350 desc[data_idx].dma_address_valid = true;
1351 desc[data_idx].dma_address = meta->dma_address;
Amir Levy9659e592016-10-27 18:08:27 +03001352 }
1353 if (num_frags == 0) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001354 if (ipa3_send(sys, data_idx + 1, desc, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001355 IPAERR("fail to send skb %p HWP\n", skb);
Skylar Chang6adfc142016-11-08 09:56:24 -08001356 goto fail_mem;
Amir Levy9659e592016-10-27 18:08:27 +03001357 }
1358 } else {
1359 for (f = 0; f < num_frags; f++) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02001360 desc[data_idx+f+1].frag =
1361 &skb_shinfo(skb)->frags[f];
1362 desc[data_idx+f+1].type =
1363 IPA_DATA_DESC_SKB_PAGED;
1364 desc[data_idx+f+1].len =
1365 skb_frag_size(desc[data_idx+f+1].frag);
Amir Levy9659e592016-10-27 18:08:27 +03001366 }
1367 /* don't free skb till frag mappings are released */
Ghanim Fodic6b67492017-03-15 14:19:56 +02001368 desc[data_idx+f].callback = desc[data_idx].callback;
1369 desc[data_idx+f].user1 = desc[data_idx].user1;
1370 desc[data_idx+f].user2 = desc[data_idx].user2;
1371 desc[data_idx].callback = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03001372
Ghanim Fodic6b67492017-03-15 14:19:56 +02001373 if (ipa3_send(sys, num_frags + data_idx + 1,
1374 desc, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001375 IPAERR("fail to send skb %p num_frags %u HWP\n",
1376 skb, num_frags);
Skylar Chang6adfc142016-11-08 09:56:24 -08001377 goto fail_mem;
Amir Levy9659e592016-10-27 18:08:27 +03001378 }
1379 }
1380 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
1381 }
1382
1383 if (num_frags) {
1384 kfree(desc);
1385 IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
1386 }
1387 return 0;
1388
1389fail_send:
1390 ipahal_destroy_imm_cmd(cmd_pyld);
Skylar Chang6adfc142016-11-08 09:56:24 -08001391fail_mem:
Amir Levy9659e592016-10-27 18:08:27 +03001392 if (num_frags)
1393 kfree(desc);
Skylar Chang6adfc142016-11-08 09:56:24 -08001394fail_gen:
Amir Levy9659e592016-10-27 18:08:27 +03001395 return -EFAULT;
1396}
1397
1398static void ipa3_wq_handle_rx(struct work_struct *work)
1399{
1400 struct ipa3_sys_context *sys;
1401
1402 sys = container_of(work, struct ipa3_sys_context, work);
1403
1404 if (sys->ep->napi_enabled) {
1405 IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
1406 sys->ep->client_notify(sys->ep->priv,
1407 IPA_CLIENT_START_POLL, 0);
1408 } else
1409 ipa3_handle_rx(sys);
1410}
1411
1412static void ipa3_wq_repl_rx(struct work_struct *work)
1413{
1414 struct ipa3_sys_context *sys;
1415 void *ptr;
1416 struct ipa3_rx_pkt_wrapper *rx_pkt;
1417 gfp_t flag = GFP_KERNEL;
1418 u32 next;
1419 u32 curr;
1420
1421 sys = container_of(work, struct ipa3_sys_context, repl_work);
1422 curr = atomic_read(&sys->repl.tail_idx);
1423
1424begin:
1425 while (1) {
1426 next = (curr + 1) % sys->repl.capacity;
1427 if (next == atomic_read(&sys->repl.head_idx))
1428 goto fail_kmem_cache_alloc;
1429
1430 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
1431 flag);
1432 if (!rx_pkt) {
1433 pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
1434 __func__, sys);
1435 goto fail_kmem_cache_alloc;
1436 }
1437
1438 INIT_LIST_HEAD(&rx_pkt->link);
1439 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1440 rx_pkt->sys = sys;
1441
1442 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1443 if (rx_pkt->data.skb == NULL) {
1444 pr_err_ratelimited("%s fail alloc skb sys=%p\n",
1445 __func__, sys);
1446 goto fail_skb_alloc;
1447 }
1448 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1449 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
1450 sys->rx_buff_sz,
1451 DMA_FROM_DEVICE);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +05301452 if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
Amir Levy9659e592016-10-27 18:08:27 +03001453 pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
1454 __func__, (void *)rx_pkt->data.dma_addr,
1455 ptr, sys);
1456 goto fail_dma_mapping;
1457 }
1458
1459 sys->repl.cache[curr] = rx_pkt;
1460 curr = next;
1461 /* ensure write is done before setting tail index */
1462 mb();
1463 atomic_set(&sys->repl.tail_idx, next);
1464 }
1465
1466 return;
1467
1468fail_dma_mapping:
1469 sys->free_skb(rx_pkt->data.skb);
1470fail_skb_alloc:
1471 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1472fail_kmem_cache_alloc:
1473 if (atomic_read(&sys->repl.tail_idx) ==
1474 atomic_read(&sys->repl.head_idx)) {
1475 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1476 IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
1477 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1478 IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
1479 else
1480 WARN_ON(1);
1481 pr_err_ratelimited("%s sys=%p repl ring empty\n",
1482 __func__, sys);
1483 goto begin;
1484 }
1485}
1486
1487static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
1488{
1489 struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
1490 struct ipa3_rx_pkt_wrapper *tmp;
1491 int ret;
1492 struct gsi_xfer_elem gsi_xfer_elem_one;
1493 u32 rx_len_cached = 0;
1494
1495 IPADBG_LOW("\n");
1496
1497 spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1498 rx_len_cached = sys->len;
1499
1500 if (rx_len_cached < sys->rx_pool_sz) {
1501 list_for_each_entry_safe(rx_pkt, tmp,
1502 &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
1503 list_del(&rx_pkt->link);
1504
1505 if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
1506 ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
1507
1508 INIT_LIST_HEAD(&rx_pkt->link);
1509 rx_pkt->len = 0;
1510 rx_pkt->sys = sys;
1511
1512 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
Amir Levya59ed3f2017-03-05 17:30:55 +02001513 memset(&gsi_xfer_elem_one, 0,
1514 sizeof(gsi_xfer_elem_one));
1515 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1516 gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
1517 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1518 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1519 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1520 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001521
Amir Levya59ed3f2017-03-05 17:30:55 +02001522 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
1523 &gsi_xfer_elem_one, true);
Amir Levy9659e592016-10-27 18:08:27 +03001524
1525 if (ret) {
1526 IPAERR("failed to provide buffer: %d\n", ret);
1527 goto fail_provide_rx_buffer;
1528 }
1529
1530 rx_len_cached = ++sys->len;
1531
1532 if (rx_len_cached >= sys->rx_pool_sz) {
1533 spin_unlock_bh(
1534 &ipa3_ctx->wc_memb.wlan_spinlock);
1535 return;
1536 }
1537 }
1538 }
1539 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1540
1541 if (rx_len_cached < sys->rx_pool_sz &&
1542 ipa3_ctx->wc_memb.wlan_comm_total_cnt <
1543 IPA_WLAN_COMM_RX_POOL_HIGH) {
1544 ipa3_replenish_rx_cache(sys);
1545 ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
1546 (sys->rx_pool_sz - rx_len_cached);
1547 }
1548
1549 return;
1550
1551fail_provide_rx_buffer:
1552 list_del(&rx_pkt->link);
1553 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1554}
1555
1556static void ipa3_cleanup_wlan_rx_common_cache(void)
1557{
1558 struct ipa3_rx_pkt_wrapper *rx_pkt;
1559 struct ipa3_rx_pkt_wrapper *tmp;
1560
Mohammed Javide8daa2f2017-07-17 12:03:41 +05301561 spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1562
Amir Levy9659e592016-10-27 18:08:27 +03001563 list_for_each_entry_safe(rx_pkt, tmp,
1564 &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
1565 list_del(&rx_pkt->link);
1566 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1567 IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
1568 dev_kfree_skb_any(rx_pkt->data.skb);
1569 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1570 ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
1571 ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
1572 }
1573 ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
1574
1575 if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
1576 IPAERR("wlan comm buff free cnt: %d\n",
1577 ipa3_ctx->wc_memb.wlan_comm_free_cnt);
1578
1579 if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
1580 IPAERR("wlan comm buff total cnt: %d\n",
1581 ipa3_ctx->wc_memb.wlan_comm_total_cnt);
1582
Mohammed Javide8daa2f2017-07-17 12:03:41 +05301583 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
1584
Amir Levy9659e592016-10-27 18:08:27 +03001585}
1586
1587static void ipa3_alloc_wlan_rx_common_cache(u32 size)
1588{
1589 void *ptr;
1590 struct ipa3_rx_pkt_wrapper *rx_pkt;
1591 int rx_len_cached = 0;
1592 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
1593
1594 rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
1595 while (rx_len_cached < size) {
1596 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
1597 flag);
1598 if (!rx_pkt) {
1599 IPAERR("failed to alloc rx wrapper\n");
1600 goto fail_kmem_cache_alloc;
1601 }
1602
1603 INIT_LIST_HEAD(&rx_pkt->link);
1604 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1605
1606 rx_pkt->data.skb =
1607 ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
1608 flag);
1609 if (rx_pkt->data.skb == NULL) {
1610 IPAERR("failed to alloc skb\n");
1611 goto fail_skb_alloc;
1612 }
1613 ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
1614 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
1615 IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +05301616 if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
Amir Levy9659e592016-10-27 18:08:27 +03001617 IPAERR("dma_map_single failure %p for %p\n",
1618 (void *)rx_pkt->data.dma_addr, ptr);
1619 goto fail_dma_mapping;
1620 }
1621
Mohammed Javide8daa2f2017-07-17 12:03:41 +05301622 spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
Amir Levy9659e592016-10-27 18:08:27 +03001623 list_add_tail(&rx_pkt->link,
1624 &ipa3_ctx->wc_memb.wlan_comm_desc_list);
1625 rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
1626
1627 ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
Mohammed Javide8daa2f2017-07-17 12:03:41 +05301628 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
Amir Levy9659e592016-10-27 18:08:27 +03001629
1630 }
1631
1632 return;
1633
1634fail_dma_mapping:
1635 dev_kfree_skb_any(rx_pkt->data.skb);
1636fail_skb_alloc:
1637 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1638fail_kmem_cache_alloc:
1639 return;
1640}
1641
1642
1643/**
1644 * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
1645 *
1646 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
1647 * are IPA_RX_POOL_CEIL buffers in the cache.
1648 * - Allocate a buffer in the cache
1649 * - Initialized the packets link
1650 * - Initialize the packets work struct
1651 * - Allocate the packets socket buffer (skb)
1652 * - Fill the packets skb with data
1653 * - Make the packet DMAable
1654 * - Add the packet to the system pipe linked list
Amir Levy9659e592016-10-27 18:08:27 +03001655 */
1656static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
1657{
1658 void *ptr;
1659 struct ipa3_rx_pkt_wrapper *rx_pkt;
1660 int ret;
1661 int rx_len_cached = 0;
1662 struct gsi_xfer_elem gsi_xfer_elem_one;
1663 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
1664
1665 rx_len_cached = sys->len;
1666
1667 while (rx_len_cached < sys->rx_pool_sz) {
1668 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
1669 flag);
1670 if (!rx_pkt) {
1671 IPAERR("failed to alloc rx wrapper\n");
1672 goto fail_kmem_cache_alloc;
1673 }
1674
1675 INIT_LIST_HEAD(&rx_pkt->link);
1676 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1677 rx_pkt->sys = sys;
1678
1679 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1680 if (rx_pkt->data.skb == NULL) {
1681 IPAERR("failed to alloc skb\n");
1682 goto fail_skb_alloc;
1683 }
1684 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1685 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
1686 sys->rx_buff_sz,
1687 DMA_FROM_DEVICE);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +05301688 if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
Amir Levy9659e592016-10-27 18:08:27 +03001689 IPAERR("dma_map_single failure %p for %p\n",
1690 (void *)rx_pkt->data.dma_addr, ptr);
1691 goto fail_dma_mapping;
1692 }
1693
1694 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1695 rx_len_cached = ++sys->len;
1696
Amir Levya59ed3f2017-03-05 17:30:55 +02001697 memset(&gsi_xfer_elem_one, 0,
1698 sizeof(gsi_xfer_elem_one));
1699 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1700 gsi_xfer_elem_one.len = sys->rx_buff_sz;
1701 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1702 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1703 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1704 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001705
Amir Levya59ed3f2017-03-05 17:30:55 +02001706 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
Skylar Changd407e592017-03-30 11:25:30 -07001707 1, &gsi_xfer_elem_one, false);
Amir Levya59ed3f2017-03-05 17:30:55 +02001708 if (ret != GSI_STATUS_SUCCESS) {
1709 IPAERR("failed to provide buffer: %d\n",
1710 ret);
1711 goto fail_provide_rx_buffer;
Amir Levy9659e592016-10-27 18:08:27 +03001712 }
Skylar Changd407e592017-03-30 11:25:30 -07001713
1714 /*
1715 * As doorbell is a costly operation, notify to GSI
1716 * of new buffers if threshold is exceeded
1717 */
1718 if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
1719 sys->len_pending_xfer = 0;
1720 gsi_start_xfer(sys->ep->gsi_chan_hdl);
1721 }
Amir Levy9659e592016-10-27 18:08:27 +03001722 }
1723
1724 return;
1725
1726fail_provide_rx_buffer:
1727 list_del(&rx_pkt->link);
1728 rx_len_cached = --sys->len;
1729 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1730 sys->rx_buff_sz, DMA_FROM_DEVICE);
1731fail_dma_mapping:
1732 sys->free_skb(rx_pkt->data.skb);
1733fail_skb_alloc:
1734 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1735fail_kmem_cache_alloc:
Skylar Changd407e592017-03-30 11:25:30 -07001736 if (rx_len_cached - sys->len_pending_xfer == 0)
Amir Levy9659e592016-10-27 18:08:27 +03001737 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
1738 msecs_to_jiffies(1));
1739}
1740
1741static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
1742{
1743 void *ptr;
1744 struct ipa3_rx_pkt_wrapper *rx_pkt;
1745 int ret;
1746 int rx_len_cached = 0;
1747 struct gsi_xfer_elem gsi_xfer_elem_one;
1748 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
1749
1750 rx_len_cached = sys->len;
1751
1752 while (rx_len_cached < sys->rx_pool_sz) {
1753 if (list_empty(&sys->rcycl_list)) {
1754 rx_pkt = kmem_cache_zalloc(
1755 ipa3_ctx->rx_pkt_wrapper_cache, flag);
1756 if (!rx_pkt) {
1757 IPAERR("failed to alloc rx wrapper\n");
1758 goto fail_kmem_cache_alloc;
1759 }
1760
1761 INIT_LIST_HEAD(&rx_pkt->link);
1762 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
1763 rx_pkt->sys = sys;
1764
1765 rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
1766 if (rx_pkt->data.skb == NULL) {
1767 IPAERR("failed to alloc skb\n");
1768 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
1769 rx_pkt);
1770 goto fail_kmem_cache_alloc;
1771 }
1772 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1773 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
1774 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +05301775 if (dma_mapping_error(ipa3_ctx->pdev,
1776 rx_pkt->data.dma_addr)) {
Amir Levy9659e592016-10-27 18:08:27 +03001777 IPAERR("dma_map_single failure %p for %p\n",
1778 (void *)rx_pkt->data.dma_addr, ptr);
1779 goto fail_dma_mapping;
1780 }
1781 } else {
1782 spin_lock_bh(&sys->spinlock);
1783 rx_pkt = list_first_entry(&sys->rcycl_list,
1784 struct ipa3_rx_pkt_wrapper, link);
1785 list_del(&rx_pkt->link);
1786 spin_unlock_bh(&sys->spinlock);
1787 INIT_LIST_HEAD(&rx_pkt->link);
1788 ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
1789 rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
1790 ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +05301791 if (dma_mapping_error(ipa3_ctx->pdev,
1792 rx_pkt->data.dma_addr)) {
Amir Levy9659e592016-10-27 18:08:27 +03001793 IPAERR("dma_map_single failure %p for %p\n",
1794 (void *)rx_pkt->data.dma_addr, ptr);
1795 goto fail_dma_mapping;
1796 }
1797 }
1798
1799 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1800 rx_len_cached = ++sys->len;
Amir Levya59ed3f2017-03-05 17:30:55 +02001801 memset(&gsi_xfer_elem_one, 0,
1802 sizeof(gsi_xfer_elem_one));
1803 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1804 gsi_xfer_elem_one.len = sys->rx_buff_sz;
1805 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1806 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1807 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1808 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001809
Amir Levya59ed3f2017-03-05 17:30:55 +02001810 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
Skylar Changd407e592017-03-30 11:25:30 -07001811 1, &gsi_xfer_elem_one, false);
Amir Levya59ed3f2017-03-05 17:30:55 +02001812 if (ret != GSI_STATUS_SUCCESS) {
1813 IPAERR("failed to provide buffer: %d\n",
1814 ret);
1815 goto fail_provide_rx_buffer;
Amir Levy9659e592016-10-27 18:08:27 +03001816 }
Skylar Changd407e592017-03-30 11:25:30 -07001817
1818 /*
1819 * As doorbell is a costly operation, notify to GSI
1820 * of new buffers if threshold is exceeded
1821 */
1822 if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
1823 sys->len_pending_xfer = 0;
1824 gsi_start_xfer(sys->ep->gsi_chan_hdl);
1825 }
Amir Levy9659e592016-10-27 18:08:27 +03001826 }
1827
1828 return;
1829fail_provide_rx_buffer:
1830 rx_len_cached = --sys->len;
1831 list_del(&rx_pkt->link);
1832 INIT_LIST_HEAD(&rx_pkt->link);
1833 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1834 sys->rx_buff_sz, DMA_FROM_DEVICE);
1835fail_dma_mapping:
1836 spin_lock_bh(&sys->spinlock);
1837 list_add_tail(&rx_pkt->link, &sys->rcycl_list);
1838 INIT_LIST_HEAD(&rx_pkt->link);
1839 spin_unlock_bh(&sys->spinlock);
1840fail_kmem_cache_alloc:
Skylar Changd407e592017-03-30 11:25:30 -07001841 if (rx_len_cached - sys->len_pending_xfer == 0)
Amir Levy9659e592016-10-27 18:08:27 +03001842 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
1843 msecs_to_jiffies(1));
1844}
1845
1846static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
1847{
1848 struct ipa3_rx_pkt_wrapper *rx_pkt;
1849 int ret;
1850 int rx_len_cached = 0;
1851 struct gsi_xfer_elem gsi_xfer_elem_one;
1852 u32 curr;
1853
Michael Adisumarta326dbb82017-06-27 11:29:41 -07001854 spin_lock_bh(&sys->spinlock);
1855
Amir Levy9659e592016-10-27 18:08:27 +03001856 rx_len_cached = sys->len;
1857 curr = atomic_read(&sys->repl.head_idx);
1858
1859 while (rx_len_cached < sys->rx_pool_sz) {
1860 if (curr == atomic_read(&sys->repl.tail_idx))
1861 break;
1862
1863 rx_pkt = sys->repl.cache[curr];
1864 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1865
Amir Levya59ed3f2017-03-05 17:30:55 +02001866 memset(&gsi_xfer_elem_one, 0,
1867 sizeof(gsi_xfer_elem_one));
1868 gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
1869 gsi_xfer_elem_one.len = sys->rx_buff_sz;
1870 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
1871 gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
1872 gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
1873 gsi_xfer_elem_one.xfer_user_data = rx_pkt;
Amir Levy9659e592016-10-27 18:08:27 +03001874
Amir Levya59ed3f2017-03-05 17:30:55 +02001875 ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
Skylar Changd407e592017-03-30 11:25:30 -07001876 &gsi_xfer_elem_one, false);
Amir Levya59ed3f2017-03-05 17:30:55 +02001877 if (ret != GSI_STATUS_SUCCESS) {
1878 IPAERR("failed to provide buffer: %d\n",
1879 ret);
1880 break;
Amir Levy9659e592016-10-27 18:08:27 +03001881 }
Skylar Changd407e592017-03-30 11:25:30 -07001882
1883 /*
1884 * As doorbell is a costly operation, notify to GSI
1885 * of new buffers if threshold is exceeded
1886 */
1887 if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
1888 sys->len_pending_xfer = 0;
1889 gsi_start_xfer(sys->ep->gsi_chan_hdl);
1890 }
1891
Amir Levy9659e592016-10-27 18:08:27 +03001892 rx_len_cached = ++sys->len;
1893 curr = (curr + 1) % sys->repl.capacity;
1894 /* ensure write is done before setting head index */
1895 mb();
1896 atomic_set(&sys->repl.head_idx, curr);
1897 }
Michael Adisumarta326dbb82017-06-27 11:29:41 -07001898 spin_unlock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +03001899
1900 queue_work(sys->repl_wq, &sys->repl_work);
1901
Skylar Changd407e592017-03-30 11:25:30 -07001902 if (rx_len_cached - sys->len_pending_xfer
1903 <= IPA_DEFAULT_SYS_YELLOW_WM) {
Amir Levy9659e592016-10-27 18:08:27 +03001904 if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
1905 IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
1906 else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
1907 IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
1908 else
1909 WARN_ON(1);
1910 queue_delayed_work(sys->wq, &sys->replenish_rx_work,
1911 msecs_to_jiffies(1));
1912 }
1913}
1914
1915static void ipa3_replenish_rx_work_func(struct work_struct *work)
1916{
1917 struct delayed_work *dwork;
1918 struct ipa3_sys_context *sys;
1919
1920 dwork = container_of(work, struct delayed_work, work);
1921 sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
1922 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1923 sys->repl_hdlr(sys);
1924 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1925}
1926
1927/**
1928 * ipa3_cleanup_rx() - release RX queue resources
1929 *
1930 */
1931static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
1932{
1933 struct ipa3_rx_pkt_wrapper *rx_pkt;
1934 struct ipa3_rx_pkt_wrapper *r;
1935 u32 head;
1936 u32 tail;
1937
1938 list_for_each_entry_safe(rx_pkt, r,
1939 &sys->head_desc_list, link) {
1940 list_del(&rx_pkt->link);
1941 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1942 sys->rx_buff_sz, DMA_FROM_DEVICE);
1943 sys->free_skb(rx_pkt->data.skb);
1944 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1945 }
1946
1947 list_for_each_entry_safe(rx_pkt, r,
1948 &sys->rcycl_list, link) {
1949 list_del(&rx_pkt->link);
1950 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1951 sys->rx_buff_sz, DMA_FROM_DEVICE);
1952 sys->free_skb(rx_pkt->data.skb);
1953 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1954 }
1955
1956 if (sys->repl.cache) {
1957 head = atomic_read(&sys->repl.head_idx);
1958 tail = atomic_read(&sys->repl.tail_idx);
1959 while (head != tail) {
1960 rx_pkt = sys->repl.cache[head];
1961 dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
1962 sys->rx_buff_sz, DMA_FROM_DEVICE);
1963 sys->free_skb(rx_pkt->data.skb);
1964 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
1965 head = (head + 1) % sys->repl.capacity;
1966 }
1967 kfree(sys->repl.cache);
1968 }
1969}
1970
1971static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
1972{
1973 struct sk_buff *skb2 = NULL;
1974
1975 skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
1976 if (likely(skb2)) {
1977 /* Set the data pointer */
1978 skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
1979 memcpy(skb2->data, skb->data, len);
1980 skb2->len = len;
1981 skb_set_tail_pointer(skb2, len);
1982 }
1983
1984 return skb2;
1985}
1986
1987static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
1988 struct ipa3_sys_context *sys)
1989{
1990 int rc = 0;
1991 struct ipahal_pkt_status status;
1992 u32 pkt_status_sz;
1993 struct sk_buff *skb2;
1994 int pad_len_byte;
1995 int len;
1996 unsigned char *buf;
1997 int src_pipe;
1998 unsigned int used = *(unsigned int *)skb->cb;
1999 unsigned int used_align = ALIGN(used, 32);
2000 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2001 struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
2002 unsigned long ptr;
2003
2004 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2005
2006 if (skb->len == 0) {
2007 IPAERR("ZLT\n");
2008 return rc;
2009 }
2010
2011 if (sys->len_partial) {
2012 IPADBG_LOW("len_partial %d\n", sys->len_partial);
2013 buf = skb_push(skb, sys->len_partial);
2014 memcpy(buf, sys->prev_skb->data, sys->len_partial);
2015 sys->len_partial = 0;
2016 sys->free_skb(sys->prev_skb);
2017 sys->prev_skb = NULL;
2018 goto begin;
2019 }
2020
2021 /* this pipe has TX comp (status only) + mux-ed LAN RX data
2022 * (status+data)
2023 */
2024 if (sys->len_rem) {
2025 IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
2026 sys->len_pad);
2027 if (sys->len_rem <= skb->len) {
2028 if (sys->prev_skb) {
2029 skb2 = skb_copy_expand(sys->prev_skb, 0,
2030 sys->len_rem, GFP_KERNEL);
2031 if (likely(skb2)) {
2032 memcpy(skb_put(skb2, sys->len_rem),
2033 skb->data, sys->len_rem);
2034 skb_trim(skb2,
2035 skb2->len - sys->len_pad);
2036 skb2->truesize = skb2->len +
2037 sizeof(struct sk_buff);
2038 if (sys->drop_packet)
2039 dev_kfree_skb_any(skb2);
2040 else
2041 sys->ep->client_notify(
2042 sys->ep->priv,
2043 IPA_RECEIVE,
2044 (unsigned long)(skb2));
2045 } else {
2046 IPAERR("copy expand failed\n");
2047 }
2048 dev_kfree_skb_any(sys->prev_skb);
2049 }
2050 skb_pull(skb, sys->len_rem);
2051 sys->prev_skb = NULL;
2052 sys->len_rem = 0;
2053 sys->len_pad = 0;
2054 } else {
2055 if (sys->prev_skb) {
2056 skb2 = skb_copy_expand(sys->prev_skb, 0,
2057 skb->len, GFP_KERNEL);
2058 if (likely(skb2)) {
2059 memcpy(skb_put(skb2, skb->len),
2060 skb->data, skb->len);
2061 } else {
2062 IPAERR("copy expand failed\n");
2063 }
2064 dev_kfree_skb_any(sys->prev_skb);
2065 sys->prev_skb = skb2;
2066 }
2067 sys->len_rem -= skb->len;
2068 return rc;
2069 }
2070 }
2071
2072begin:
2073 pkt_status_sz = ipahal_pkt_status_get_size();
2074 while (skb->len) {
2075 sys->drop_packet = false;
2076 IPADBG_LOW("LEN_REM %d\n", skb->len);
2077
2078 if (skb->len < pkt_status_sz) {
2079 WARN_ON(sys->prev_skb != NULL);
2080 IPADBG_LOW("status straddles buffer\n");
2081 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
2082 sys->len_partial = skb->len;
2083 return rc;
2084 }
2085
2086 ipahal_pkt_status_parse(skb->data, &status);
2087 IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
2088 status.status_opcode, status.endp_src_idx,
2089 status.endp_dest_idx, status.pkt_len);
2090 if (sys->status_stat) {
2091 sys->status_stat->status[sys->status_stat->curr] =
2092 status;
2093 sys->status_stat->curr++;
2094 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2095 sys->status_stat->curr = 0;
2096 }
2097
2098 if ((status.status_opcode !=
2099 IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
2100 (status.status_opcode !=
2101 IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
2102 (status.status_opcode !=
2103 IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
2104 (status.status_opcode !=
2105 IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
2106 IPAERR("unsupported opcode(%d)\n",
2107 status.status_opcode);
2108 skb_pull(skb, pkt_status_sz);
2109 continue;
2110 }
2111 IPA_STATS_EXCP_CNT(status.exception,
2112 ipa3_ctx->stats.rx_excp_pkts);
2113 if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
2114 status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
2115 IPAERR("status fields invalid\n");
2116 IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
2117 status.status_opcode, status.endp_src_idx,
2118 status.endp_dest_idx, status.pkt_len);
2119 WARN_ON(1);
2120 BUG();
2121 }
2122 if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
2123 IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
2124 struct ipa3_tag_completion *comp;
2125
2126 IPADBG_LOW("TAG packet arrived\n");
2127 if (status.tag_info == IPA_COOKIE) {
2128 skb_pull(skb, pkt_status_sz);
2129 if (skb->len < sizeof(comp)) {
2130 IPAERR("TAG arrived without packet\n");
2131 return rc;
2132 }
2133 memcpy(&comp, skb->data, sizeof(comp));
2134 skb_pull(skb, sizeof(comp) +
2135 IPA_SIZE_DL_CSUM_META_TRAILER);
2136 complete(&comp->comp);
2137 if (atomic_dec_return(&comp->cnt) == 0)
2138 kfree(comp);
2139 continue;
2140 } else {
2141 ptr = tag_to_pointer_wa(status.tag_info);
2142 tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
2143 IPADBG_LOW("tx_pkt recv = %p\n", tx_pkt);
2144 }
2145 }
2146 if (status.pkt_len == 0) {
2147 IPADBG_LOW("Skip aggr close status\n");
2148 skb_pull(skb, pkt_status_sz);
2149 IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
2150 IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
2151 [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
2152 continue;
2153 }
2154
2155 if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
2156 /* RX data */
2157 src_pipe = status.endp_src_idx;
2158
2159 /*
2160 * A packet which is received back to the AP after
2161 * there was no route match.
2162 */
2163 if (status.exception ==
2164 IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
2165 ipahal_is_rule_miss_id(status.rt_rule_id))
2166 sys->drop_packet = true;
2167
2168 if (skb->len == pkt_status_sz &&
2169 status.exception ==
2170 IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
2171 WARN_ON(sys->prev_skb != NULL);
2172 IPADBG_LOW("Ins header in next buffer\n");
2173 sys->prev_skb = skb_copy(skb, GFP_KERNEL);
2174 sys->len_partial = skb->len;
2175 return rc;
2176 }
2177
2178 pad_len_byte = ((status.pkt_len + 3) & ~3) -
2179 status.pkt_len;
2180
2181 len = status.pkt_len + pad_len_byte +
2182 IPA_SIZE_DL_CSUM_META_TRAILER;
2183 IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
2184 status.pkt_len, len);
2185
2186 if (status.exception ==
2187 IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
2188 IPADBG_LOW(
2189 "Dropping packet on DeAggr Exception\n");
2190 sys->drop_packet = true;
2191 }
2192
2193 skb2 = ipa3_skb_copy_for_client(skb,
2194 min(status.pkt_len + pkt_status_sz, skb->len));
2195 if (likely(skb2)) {
2196 if (skb->len < len + pkt_status_sz) {
2197 IPADBG_LOW("SPL skb len %d len %d\n",
2198 skb->len, len);
2199 sys->prev_skb = skb2;
2200 sys->len_rem = len - skb->len +
2201 pkt_status_sz;
2202 sys->len_pad = pad_len_byte;
2203 skb_pull(skb, skb->len);
2204 } else {
2205 skb_trim(skb2, status.pkt_len +
2206 pkt_status_sz);
2207 IPADBG_LOW("rx avail for %d\n",
2208 status.endp_dest_idx);
2209 if (sys->drop_packet) {
2210 dev_kfree_skb_any(skb2);
2211 } else if (status.pkt_len >
2212 IPA_GENERIC_AGGR_BYTE_LIMIT *
2213 1024) {
2214 IPAERR("packet size invalid\n");
2215 IPAERR("STATUS opcode=%d\n",
2216 status.status_opcode);
2217 IPAERR("src=%d dst=%d len=%d\n",
2218 status.endp_src_idx,
2219 status.endp_dest_idx,
2220 status.pkt_len);
2221 BUG();
2222 } else {
2223 skb2->truesize = skb2->len +
2224 sizeof(struct sk_buff) +
2225 (ALIGN(len +
2226 pkt_status_sz, 32) *
2227 unused / used_align);
2228 sys->ep->client_notify(
2229 sys->ep->priv,
2230 IPA_RECEIVE,
2231 (unsigned long)(skb2));
2232 }
2233 skb_pull(skb, len + pkt_status_sz);
2234 }
2235 } else {
2236 IPAERR("fail to alloc skb\n");
2237 if (skb->len < len) {
2238 sys->prev_skb = NULL;
2239 sys->len_rem = len - skb->len +
2240 pkt_status_sz;
2241 sys->len_pad = pad_len_byte;
2242 skb_pull(skb, skb->len);
2243 } else {
2244 skb_pull(skb, len + pkt_status_sz);
2245 }
2246 }
2247 /* TX comp */
2248 ipa3_wq_write_done_status(src_pipe, tx_pkt);
2249 IPADBG_LOW("tx comp imp for %d\n", src_pipe);
2250 } else {
2251 /* TX comp */
2252 ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
2253 IPADBG_LOW("tx comp exp for %d\n",
2254 status.endp_src_idx);
2255 skb_pull(skb, pkt_status_sz);
2256 IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
2257 IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
2258 [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
2259 }
2260 };
2261
2262 return rc;
2263}
2264
2265static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
2266 struct sk_buff *skb, unsigned int len)
2267{
2268 struct sk_buff *skb2;
2269
2270 skb2 = skb_copy_expand(prev_skb, 0,
2271 len, GFP_KERNEL);
2272 if (likely(skb2)) {
2273 memcpy(skb_put(skb2, len),
2274 skb->data, len);
2275 } else {
2276 IPAERR("copy expand failed\n");
2277 skb2 = NULL;
2278 }
2279 dev_kfree_skb_any(prev_skb);
2280
2281 return skb2;
2282}
2283
2284static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
2285 struct ipa3_sys_context *sys)
2286{
2287 struct sk_buff *skb2;
2288
2289 IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
2290 if (sys->len_rem <= skb->len) {
2291 if (sys->prev_skb) {
2292 skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
2293 sys->len_rem);
2294 if (likely(skb2)) {
2295 IPADBG_LOW(
2296 "removing Status element from skb and sending to WAN client");
2297 skb_pull(skb2, ipahal_pkt_status_get_size());
2298 skb2->truesize = skb2->len +
2299 sizeof(struct sk_buff);
2300 sys->ep->client_notify(sys->ep->priv,
2301 IPA_RECEIVE,
2302 (unsigned long)(skb2));
2303 }
2304 }
2305 skb_pull(skb, sys->len_rem);
2306 sys->prev_skb = NULL;
2307 sys->len_rem = 0;
2308 } else {
2309 if (sys->prev_skb) {
2310 skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
2311 skb->len);
2312 sys->prev_skb = skb2;
2313 }
2314 sys->len_rem -= skb->len;
2315 skb_pull(skb, skb->len);
2316 }
2317}
2318
2319static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
2320 struct ipa3_sys_context *sys)
2321{
2322 int rc = 0;
2323 struct ipahal_pkt_status status;
2324 unsigned char *skb_data;
2325 u32 pkt_status_sz;
2326 struct sk_buff *skb2;
2327 u16 pkt_len_with_pad;
2328 u32 qmap_hdr;
2329 int checksum_trailer_exists;
2330 int frame_len;
2331 int ep_idx;
2332 unsigned int used = *(unsigned int *)skb->cb;
2333 unsigned int used_align = ALIGN(used, 32);
2334 unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
2335
2336 IPA_DUMP_BUFF(skb->data, 0, skb->len);
2337 if (skb->len == 0) {
2338 IPAERR("ZLT\n");
2339 goto bail;
2340 }
2341
2342 if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
2343 sys->ep->client_notify(sys->ep->priv,
2344 IPA_RECEIVE, (unsigned long)(skb));
2345 return rc;
2346 }
2347 if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
2348 IPAERR("Recycle should enable only with GRO Aggr\n");
2349 ipa_assert();
2350 }
2351
2352 /*
2353 * payload splits across 2 buff or more,
2354 * take the start of the payload from prev_skb
2355 */
2356 if (sys->len_rem)
2357 ipa3_wan_rx_handle_splt_pyld(skb, sys);
2358
2359 pkt_status_sz = ipahal_pkt_status_get_size();
2360 while (skb->len) {
2361 IPADBG_LOW("LEN_REM %d\n", skb->len);
2362 if (skb->len < pkt_status_sz) {
2363 IPAERR("status straddles buffer\n");
2364 WARN_ON(1);
2365 goto bail;
2366 }
2367 ipahal_pkt_status_parse(skb->data, &status);
2368 skb_data = skb->data;
2369 IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
2370 status.status_opcode, status.endp_src_idx,
2371 status.endp_dest_idx, status.pkt_len);
2372
2373 if (sys->status_stat) {
2374 sys->status_stat->status[sys->status_stat->curr] =
2375 status;
2376 sys->status_stat->curr++;
2377 if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
2378 sys->status_stat->curr = 0;
2379 }
2380
2381 if ((status.status_opcode !=
2382 IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
2383 (status.status_opcode !=
2384 IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
2385 (status.status_opcode !=
2386 IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
2387 IPAERR("unsupported opcode(%d)\n",
2388 status.status_opcode);
2389 skb_pull(skb, pkt_status_sz);
2390 continue;
2391 }
2392
2393 IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
2394 if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
2395 status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
2396 status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
2397 IPAERR("status fields invalid\n");
2398 WARN_ON(1);
2399 goto bail;
2400 }
2401 if (status.pkt_len == 0) {
2402 IPADBG_LOW("Skip aggr close status\n");
2403 skb_pull(skb, pkt_status_sz);
2404 IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
2405 IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
2406 continue;
2407 }
2408 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
2409 if (status.endp_dest_idx != ep_idx) {
2410 IPAERR("expected endp_dest_idx %d received %d\n",
2411 ep_idx, status.endp_dest_idx);
2412 WARN_ON(1);
2413 goto bail;
2414 }
2415 /* RX data */
2416 if (skb->len == pkt_status_sz) {
2417 IPAERR("Ins header in next buffer\n");
2418 WARN_ON(1);
2419 goto bail;
2420 }
2421 qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
2422 /*
2423 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
2424 * header
2425 */
2426
2427 /*QMAP is BE: convert the pkt_len field from BE to LE*/
2428 pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
2429 IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
2430 /*get the CHECKSUM_PROCESS bit*/
2431 checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
2432 IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
2433 IPADBG_LOW("checksum_trailer_exists %d\n",
2434 checksum_trailer_exists);
2435
2436 frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
2437 pkt_len_with_pad;
2438 if (checksum_trailer_exists)
2439 frame_len += IPA_DL_CHECKSUM_LENGTH;
2440 IPADBG_LOW("frame_len %d\n", frame_len);
2441
2442 skb2 = skb_clone(skb, GFP_KERNEL);
2443 if (likely(skb2)) {
2444 /*
2445 * the len of actual data is smaller than expected
2446 * payload split across 2 buff
2447 */
2448 if (skb->len < frame_len) {
2449 IPADBG_LOW("SPL skb len %d len %d\n",
2450 skb->len, frame_len);
2451 sys->prev_skb = skb2;
2452 sys->len_rem = frame_len - skb->len;
2453 skb_pull(skb, skb->len);
2454 } else {
2455 skb_trim(skb2, frame_len);
2456 IPADBG_LOW("rx avail for %d\n",
2457 status.endp_dest_idx);
2458 IPADBG_LOW(
2459 "removing Status element from skb and sending to WAN client");
2460 skb_pull(skb2, pkt_status_sz);
2461 skb2->truesize = skb2->len +
2462 sizeof(struct sk_buff) +
2463 (ALIGN(frame_len, 32) *
2464 unused / used_align);
2465 sys->ep->client_notify(sys->ep->priv,
2466 IPA_RECEIVE, (unsigned long)(skb2));
2467 skb_pull(skb, frame_len);
2468 }
2469 } else {
2470 IPAERR("fail to clone\n");
2471 if (skb->len < frame_len) {
2472 sys->prev_skb = NULL;
2473 sys->len_rem = frame_len - skb->len;
2474 skb_pull(skb, skb->len);
2475 } else {
2476 skb_pull(skb, frame_len);
2477 }
2478 }
2479 };
2480bail:
2481 sys->free_skb(skb);
2482 return rc;
2483}
2484
2485static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
2486{
2487 return __dev_alloc_skb(len, flags);
2488}
2489
2490static void ipa3_free_skb_rx(struct sk_buff *skb)
2491{
2492 dev_kfree_skb_any(skb);
2493}
2494
2495void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
2496{
2497 struct sk_buff *rx_skb = (struct sk_buff *)data;
2498 struct ipahal_pkt_status status;
2499 struct ipa3_ep_context *ep;
2500 unsigned int src_pipe;
2501 u32 metadata;
2502
2503 ipahal_pkt_status_parse(rx_skb->data, &status);
2504 src_pipe = status.endp_src_idx;
2505 metadata = status.metadata;
2506 ep = &ipa3_ctx->ep[src_pipe];
2507 if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
2508 !ep->valid ||
2509 !ep->client_notify)) {
2510 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
2511 src_pipe, ep->valid, ep->client_notify);
2512 dev_kfree_skb_any(rx_skb);
2513 return;
2514 }
2515 if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
2516 skb_pull(rx_skb, ipahal_pkt_status_get_size() +
2517 IPA_LAN_RX_HEADER_LENGTH);
2518 else
2519 skb_pull(rx_skb, ipahal_pkt_status_get_size());
2520
2521 /* Metadata Info
2522 * ------------------------------------------
2523 * | 3 | 2 | 1 | 0 |
2524 * | fw_desc | vdev_id | qmap mux id | Resv |
2525 * ------------------------------------------
2526 */
2527 *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
2528 IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
2529 metadata, *(u32 *)rx_skb->cb);
2530
2531 ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
2532}
2533
2534static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
2535{
2536 rx_pkt->data.dma_addr = 0;
2537 ipa3_skb_recycle(rx_pkt->data.skb);
2538 INIT_LIST_HEAD(&rx_pkt->link);
2539 spin_lock_bh(&rx_pkt->sys->spinlock);
2540 list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
2541 spin_unlock_bh(&rx_pkt->sys->spinlock);
2542}
2543
2544void ipa3_recycle_wan_skb(struct sk_buff *skb)
2545{
2546 struct ipa3_rx_pkt_wrapper *rx_pkt;
2547 int ep_idx = ipa3_get_ep_mapping(
2548 IPA_CLIENT_APPS_WAN_CONS);
2549 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
2550
2551 if (unlikely(ep_idx == -1)) {
2552 IPAERR("dest EP does not exist\n");
2553 ipa_assert();
2554 }
2555
2556 rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
2557 flag);
2558 if (!rx_pkt)
2559 ipa_assert();
2560
2561 INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
2562 rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
2563
2564 rx_pkt->data.skb = skb;
2565 ipa3_recycle_rx_wrapper(rx_pkt);
2566}
2567
2568static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
2569{
2570 struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
2571 struct sk_buff *rx_skb;
2572
2573 if (unlikely(list_empty(&sys->head_desc_list))) {
2574 WARN_ON(1);
2575 return;
2576 }
Michael Adisumartaeec048b2017-08-16 11:20:06 -07002577 spin_lock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +03002578 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2579 struct ipa3_rx_pkt_wrapper,
2580 link);
2581 list_del(&rx_pkt_expected->link);
2582 sys->len--;
2583 if (size)
2584 rx_pkt_expected->len = size;
Michael Adisumartaeec048b2017-08-16 11:20:06 -07002585 spin_unlock_bh(&sys->spinlock);
Amir Levy9659e592016-10-27 18:08:27 +03002586 rx_skb = rx_pkt_expected->data.skb;
2587 dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
2588 sys->rx_buff_sz, DMA_FROM_DEVICE);
2589 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2590 rx_skb->len = rx_pkt_expected->len;
2591 *(unsigned int *)rx_skb->cb = rx_skb->len;
2592 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2593 sys->pyld_hdlr(rx_skb, sys);
2594 sys->free_rx_wrapper(rx_pkt_expected);
2595 sys->repl_hdlr(sys);
2596}
2597
2598static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
2599{
2600 struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
2601 struct sk_buff *rx_skb;
2602
2603 if (unlikely(list_empty(&sys->head_desc_list))) {
2604 WARN_ON(1);
2605 return;
2606 }
2607 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
2608 struct ipa3_rx_pkt_wrapper,
2609 link);
2610 list_del(&rx_pkt_expected->link);
2611 sys->len--;
2612
2613 if (size)
2614 rx_pkt_expected->len = size;
2615
2616 rx_skb = rx_pkt_expected->data.skb;
2617 skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
2618 rx_skb->len = rx_pkt_expected->len;
2619 rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
2620 sys->ep->wstats.tx_pkts_rcvd++;
2621 if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
2622 ipa3_free_skb(&rx_pkt_expected->data);
2623 sys->ep->wstats.tx_pkts_dropped++;
2624 } else {
2625 sys->ep->wstats.tx_pkts_sent++;
2626 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
2627 (unsigned long)(&rx_pkt_expected->data));
2628 }
2629 ipa3_replenish_wlan_rx_cache(sys);
2630}
2631
2632static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
2633 struct ipa_mem_buffer *mem_info)
2634{
2635 IPADBG_LOW("ENTER.\n");
2636 if (unlikely(list_empty(&sys->head_desc_list))) {
2637 IPAERR("descriptor list is empty!\n");
2638 WARN_ON(1);
2639 return;
2640 }
2641 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
2642 (unsigned long)(mem_info));
2643 IPADBG_LOW("EXIT\n");
2644}
2645
2646static void ipa3_wq_rx_avail(struct work_struct *work)
2647{
2648 struct ipa3_rx_pkt_wrapper *rx_pkt;
2649 struct ipa3_sys_context *sys;
2650
2651 rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
2652 if (unlikely(rx_pkt == NULL))
2653 WARN_ON(1);
2654 sys = rx_pkt->sys;
2655 ipa3_wq_rx_common(sys, 0);
2656}
2657
Amir Levy9659e592016-10-27 18:08:27 +03002658static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
2659 struct ipa3_sys_context *sys)
2660{
2661 if (sys->ep->client_notify) {
2662 sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
2663 (unsigned long)(rx_skb));
2664 } else {
2665 dev_kfree_skb_any(rx_skb);
2666 WARN_ON(1);
2667 }
2668
2669 return 0;
2670}
2671
2672static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
2673{
2674 kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
2675}
2676
2677static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
2678 struct ipa3_sys_context *sys)
2679{
Skylar Changa8f4b402017-04-06 17:20:03 -07002680 if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03002681 sys->policy = IPA_POLICY_INTR_MODE;
Skylar Changd407e592017-03-30 11:25:30 -07002682 sys->use_comm_evt_ring = false;
Amir Levy9659e592016-10-27 18:08:27 +03002683 return 0;
2684 }
2685
2686 if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
2687 sys->policy = IPA_POLICY_NOINTR_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002688 return 0;
2689 }
2690
2691 if (IPA_CLIENT_IS_PROD(in->client)) {
2692 if (sys->ep->skip_ep_cfg) {
2693 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Skylar Changd407e592017-03-30 11:25:30 -07002694 sys->use_comm_evt_ring = true;
Amir Levy9659e592016-10-27 18:08:27 +03002695 atomic_set(&sys->curr_polling_state, 0);
2696 } else {
Skylar Changd407e592017-03-30 11:25:30 -07002697 sys->policy = IPA_POLICY_INTR_MODE;
2698 sys->use_comm_evt_ring = true;
2699 INIT_WORK(&sys->work, ipa3_send_nop_desc);
Amir Levy9659e592016-10-27 18:08:27 +03002700 }
2701 } else {
2702 if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
2703 in->client == IPA_CLIENT_APPS_WAN_CONS) {
2704 sys->ep->status.status_en = true;
2705 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002706 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2707 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
2708 ipa3_switch_to_intr_rx_work_func);
2709 INIT_DELAYED_WORK(&sys->replenish_rx_work,
2710 ipa3_replenish_rx_work_func);
2711 INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
2712 atomic_set(&sys->curr_polling_state, 0);
2713 sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
2714 IPA_GENERIC_RX_BUFF_BASE_SZ);
2715 sys->get_skb = ipa3_get_skb_ipa_rx;
2716 sys->free_skb = ipa3_free_skb_rx;
2717 in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
2718 in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
2719 in->ipa_ep_cfg.aggr.aggr_time_limit =
2720 IPA_GENERIC_AGGR_TIME_LIMIT;
2721 if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
2722 sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
2723 sys->repl_hdlr =
2724 ipa3_replenish_rx_cache_recycle;
2725 sys->free_rx_wrapper =
2726 ipa3_recycle_rx_wrapper;
2727 sys->rx_pool_sz =
2728 ipa3_ctx->lan_rx_ring_size;
2729 in->ipa_ep_cfg.aggr.aggr_byte_limit =
2730 IPA_GENERIC_AGGR_BYTE_LIMIT;
2731 in->ipa_ep_cfg.aggr.aggr_pkt_limit =
2732 IPA_GENERIC_AGGR_PKT_LIMIT;
2733 } else if (in->client ==
2734 IPA_CLIENT_APPS_WAN_CONS) {
2735 sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
2736 sys->free_rx_wrapper = ipa3_free_rx_wrapper;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002737 sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
2738 if (nr_cpu_ids > 1) {
Amir Levy9659e592016-10-27 18:08:27 +03002739 sys->repl_hdlr =
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002740 ipa3_fast_replenish_rx_cache;
2741 } else {
2742 sys->repl_hdlr =
2743 ipa3_replenish_rx_cache;
2744 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002745 if (in->napi_enabled && in->recycle_enabled)
2746 sys->repl_hdlr =
2747 ipa3_replenish_rx_cache_recycle;
Amir Levy9659e592016-10-27 18:08:27 +03002748 in->ipa_ep_cfg.aggr.aggr_sw_eof_active
2749 = true;
2750 if (ipa3_ctx->
2751 ipa_client_apps_wan_cons_agg_gro) {
2752 IPAERR("get close-by %u\n",
2753 ipa_adjust_ra_buff_base_sz(
2754 in->ipa_ep_cfg.aggr.
2755 aggr_byte_limit));
2756 IPAERR("set rx_buff_sz %lu\n",
2757 (unsigned long int)
2758 IPA_GENERIC_RX_BUFF_SZ(
2759 ipa_adjust_ra_buff_base_sz(
2760 in->ipa_ep_cfg.
2761 aggr.aggr_byte_limit)));
2762 /* disable ipa_status */
2763 sys->ep->status.
2764 status_en = false;
2765 sys->rx_buff_sz =
2766 IPA_GENERIC_RX_BUFF_SZ(
2767 ipa_adjust_ra_buff_base_sz(
2768 in->ipa_ep_cfg.aggr.
2769 aggr_byte_limit));
2770 in->ipa_ep_cfg.aggr.
2771 aggr_byte_limit =
2772 sys->rx_buff_sz < in->
2773 ipa_ep_cfg.aggr.
2774 aggr_byte_limit ?
2775 IPA_ADJUST_AGGR_BYTE_LIMIT(
2776 sys->rx_buff_sz) :
2777 IPA_ADJUST_AGGR_BYTE_LIMIT(
2778 in->ipa_ep_cfg.
2779 aggr.aggr_byte_limit);
2780 IPAERR("set aggr_limit %lu\n",
2781 (unsigned long int)
2782 in->ipa_ep_cfg.aggr.
2783 aggr_byte_limit);
2784 } else {
2785 in->ipa_ep_cfg.aggr.
2786 aggr_byte_limit =
2787 IPA_GENERIC_AGGR_BYTE_LIMIT;
2788 in->ipa_ep_cfg.aggr.
2789 aggr_pkt_limit =
2790 IPA_GENERIC_AGGR_PKT_LIMIT;
2791 }
2792 }
2793 } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
2794 IPADBG("assigning policy to client:%d",
2795 in->client);
2796
2797 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002798 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2799 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
2800 ipa3_switch_to_intr_rx_work_func);
2801 INIT_DELAYED_WORK(&sys->replenish_rx_work,
2802 ipa3_replenish_rx_work_func);
2803 atomic_set(&sys->curr_polling_state, 0);
2804 sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
Amir Levya59ed3f2017-03-05 17:30:55 +02002805 sys->rx_pool_sz = in->desc_fifo_sz /
2806 IPA_FIFO_ELEMENT_SIZE - 1;
Amir Levy9659e592016-10-27 18:08:27 +03002807 if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
2808 sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
2809 sys->pyld_hdlr = NULL;
2810 sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
2811 sys->get_skb = ipa3_get_skb_ipa_rx;
2812 sys->free_skb = ipa3_free_skb_rx;
2813 sys->free_rx_wrapper = ipa3_free_rx_wrapper;
2814 in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
2815 } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
2816 IPADBG("assigning policy to client:%d",
2817 in->client);
2818
2819 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002820 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2821 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
Amir Levya59ed3f2017-03-05 17:30:55 +02002822 ipa3_switch_to_intr_rx_work_func);
Amir Levy9659e592016-10-27 18:08:27 +03002823 INIT_DELAYED_WORK(&sys->replenish_rx_work,
2824 ipa3_replenish_rx_work_func);
2825 atomic_set(&sys->curr_polling_state, 0);
2826 sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
2827 sys->rx_pool_sz = in->desc_fifo_sz /
Amir Levya59ed3f2017-03-05 17:30:55 +02002828 IPA_FIFO_ELEMENT_SIZE - 1;
Amir Levy9659e592016-10-27 18:08:27 +03002829 if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
2830 sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
2831 sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
2832 sys->get_skb = ipa3_get_skb_ipa_rx;
2833 sys->free_skb = ipa3_free_skb_rx;
2834 sys->free_rx_wrapper = ipa3_free_rx_wrapper;
2835 sys->repl_hdlr = ipa3_replenish_rx_cache;
2836 } else if (in->client ==
2837 IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
2838 IPADBG("assigning policy to client:%d",
2839 in->client);
2840
2841 sys->policy = IPA_POLICY_INTR_POLL_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002842 INIT_WORK(&sys->work, ipa3_wq_handle_rx);
2843 INIT_DELAYED_WORK(&sys->switch_to_intr_work,
2844 ipa3_switch_to_intr_rx_work_func);
2845 } else if (in->client ==
2846 IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
2847 IPADBG("assigning policy to client:%d",
2848 in->client);
2849
2850 sys->policy = IPA_POLICY_NOINTR_MODE;
Amir Levy9659e592016-10-27 18:08:27 +03002851 } else {
2852 IPAERR("Need to install a RX pipe hdlr\n");
2853 WARN_ON(1);
2854 return -EINVAL;
2855 }
2856 }
2857
2858 return 0;
2859}
2860
2861/**
2862 * ipa3_tx_client_rx_notify_release() - Callback function
2863 * which will call the user supplied callback function to
2864 * release the skb, or release it on its own if no callback
2865 * function was supplied
2866 *
2867 * @user1: [in] - Data Descriptor
2868 * @user2: [in] - endpoint idx
2869 *
2870 * This notified callback is for the destination client
2871 * This function is supplied in ipa3_tx_dp_mul
2872 */
2873static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
2874{
2875 struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
2876 int ep_idx = user2;
2877
2878 IPADBG_LOW("Received data desc anchor:%p\n", dd);
2879
2880 atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
2881 ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
2882
2883 /* wlan host driver waits till tx complete before unload */
2884 IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
2885 ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
2886 IPADBG_LOW("calling client notify callback with priv:%p\n",
2887 ipa3_ctx->ep[ep_idx].priv);
2888
2889 if (ipa3_ctx->ep[ep_idx].client_notify) {
2890 ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
2891 IPA_WRITE_DONE, (unsigned long)user1);
2892 ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
2893 }
2894}
2895/**
2896 * ipa3_tx_client_rx_pkt_status() - Callback function
2897 * which will call the user supplied callback function to
2898 * increase the available fifo descriptor
2899 *
2900 * @user1: [in] - Data Descriptor
2901 * @user2: [in] - endpoint idx
2902 *
2903 * This notified callback is for the destination client
2904 * This function is supplied in ipa3_tx_dp_mul
2905 */
2906static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
2907{
2908 int ep_idx = user2;
2909
2910 atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
2911 ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
2912}
2913
2914
2915/**
2916 * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
2917 * @src: [in] - Client that is sending data
2918 * @ipa_tx_data_desc: [in] data descriptors from wlan
2919 *
2920 * this is used for to transfer data descriptors that received
2921 * from WLAN1_PROD pipe to IPA HW
2922 *
2923 * The function will send data descriptors from WLAN1_PROD (one
Amir Levya59ed3f2017-03-05 17:30:55 +02002924 * at a time). Will set EOT flag for last descriptor Once this send was done
2925 * from transport point-of-view the IPA driver will get notified by the
2926 * supplied callback - ipa_gsi_irq_tx_notify_cb()
Amir Levy9659e592016-10-27 18:08:27 +03002927 *
Amir Levya59ed3f2017-03-05 17:30:55 +02002928 * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback
Amir Levy9659e592016-10-27 18:08:27 +03002929 *
2930 * Returns: 0 on success, negative on failure
2931 */
2932int ipa3_tx_dp_mul(enum ipa_client_type src,
2933 struct ipa_tx_data_desc *data_desc)
2934{
2935 /* The second byte in wlan header holds qmap id */
2936#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
2937 struct ipa_tx_data_desc *entry;
2938 struct ipa3_sys_context *sys;
2939 struct ipa3_desc desc[2];
2940 u32 num_desc, cnt;
2941 int ep_idx;
2942
2943 IPADBG_LOW("Received data desc anchor:%p\n", data_desc);
2944
2945 spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
2946
2947 ep_idx = ipa3_get_ep_mapping(src);
2948 if (unlikely(ep_idx == -1)) {
2949 IPAERR("dest EP does not exist.\n");
2950 goto fail_send;
2951 }
2952 IPADBG_LOW("ep idx:%d\n", ep_idx);
2953 sys = ipa3_ctx->ep[ep_idx].sys;
2954
2955 if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
2956 IPAERR("dest EP not valid.\n");
2957 goto fail_send;
2958 }
2959 sys->ep->wstats.rx_hd_rcvd++;
2960
2961 /* Calculate the number of descriptors */
2962 num_desc = 0;
2963 list_for_each_entry(entry, &data_desc->link, link) {
2964 num_desc++;
2965 }
2966 IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
2967
2968 if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
2969 IPAERR("Insufficient data descriptors available\n");
2970 goto fail_send;
2971 }
2972
2973 /* Assign callback only for last data descriptor */
2974 cnt = 0;
2975 list_for_each_entry(entry, &data_desc->link, link) {
2976 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
2977
2978 IPADBG_LOW("Parsing data desc :%d\n", cnt);
2979 cnt++;
2980 ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
2981 (u8)sys->ep->cfg.meta.qmap_id;
2982
2983 /* the tag field will be populated in ipa3_send() function */
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002984 desc[0].is_tag_status = true;
Amir Levy9659e592016-10-27 18:08:27 +03002985 desc[1].pyld = entry->pyld_buffer;
2986 desc[1].len = entry->pyld_len;
2987 desc[1].type = IPA_DATA_DESC_SKB;
2988 desc[1].user1 = data_desc;
2989 desc[1].user2 = ep_idx;
2990 IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n",
2991 entry->priv, desc[1].pyld, desc[1].len);
2992
2993 /* In case of last descriptor populate callback */
2994 if (cnt == num_desc) {
2995 IPADBG_LOW("data desc:%p\n", data_desc);
2996 desc[1].callback = ipa3_tx_client_rx_notify_release;
2997 } else {
2998 desc[1].callback = ipa3_tx_client_rx_pkt_status;
2999 }
3000
3001 IPADBG_LOW("calling ipa3_send_one()\n");
3002 if (ipa3_send(sys, 2, desc, true)) {
3003 IPAERR("fail to send skb\n");
3004 sys->ep->wstats.rx_pkt_leak += (cnt-1);
3005 sys->ep->wstats.rx_dp_fail++;
3006 goto fail_send;
3007 }
3008
3009 if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
3010 atomic_dec(&sys->ep->avail_fifo_desc);
3011
3012 sys->ep->wstats.rx_pkts_rcvd++;
3013 IPADBG_LOW("ep=%d fifo desc=%d\n",
3014 ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
3015 }
3016
3017 sys->ep->wstats.rx_hd_processed++;
3018 spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
3019 return 0;
3020
3021fail_send:
3022 spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
3023 return -EFAULT;
3024
3025}
3026
3027void ipa3_free_skb(struct ipa_rx_data *data)
3028{
3029 struct ipa3_rx_pkt_wrapper *rx_pkt;
3030
3031 spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
3032
3033 ipa3_ctx->wc_memb.total_tx_pkts_freed++;
3034 rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
3035
3036 ipa3_skb_recycle(rx_pkt->data.skb);
3037 (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
3038
3039 list_add_tail(&rx_pkt->link,
3040 &ipa3_ctx->wc_memb.wlan_comm_desc_list);
3041 ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
3042
3043 spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
3044}
3045
3046/* Functions added to support kernel tests */
3047
3048int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
Amir Levya59ed3f2017-03-05 17:30:55 +02003049 unsigned long *ipa_transport_hdl,
Amir Levy9659e592016-10-27 18:08:27 +03003050 u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
3051{
3052 struct ipa3_ep_context *ep;
3053 int ipa_ep_idx;
3054 int result = -EINVAL;
3055
3056 if (sys_in == NULL || clnt_hdl == NULL) {
3057 IPAERR("NULL args\n");
3058 goto fail_gen;
3059 }
3060
Amir Levya59ed3f2017-03-05 17:30:55 +02003061 if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) {
Amir Levy9659e592016-10-27 18:08:27 +03003062 IPAERR("NULL args\n");
3063 goto fail_gen;
3064 }
3065 if (sys_in->client >= IPA_CLIENT_MAX) {
3066 IPAERR("bad parm client:%d\n", sys_in->client);
3067 goto fail_gen;
3068 }
3069
3070 ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
3071 if (ipa_ep_idx == -1) {
3072 IPAERR("Invalid client :%d\n", sys_in->client);
3073 goto fail_gen;
3074 }
3075
3076 ep = &ipa3_ctx->ep[ipa_ep_idx];
3077 IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
3078
3079 if (ep->valid == 1) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003080 if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03003081 IPAERR("EP %d already allocated\n", ipa_ep_idx);
3082 goto fail_and_disable_clocks;
3083 } else {
3084 if (ipa3_cfg_ep_hdr(ipa_ep_idx,
3085 &sys_in->ipa_ep_cfg.hdr)) {
3086 IPAERR("fail to configure hdr prop of EP %d\n",
3087 ipa_ep_idx);
3088 result = -EFAULT;
3089 goto fail_and_disable_clocks;
3090 }
3091 if (ipa3_cfg_ep_cfg(ipa_ep_idx,
3092 &sys_in->ipa_ep_cfg.cfg)) {
3093 IPAERR("fail to configure cfg prop of EP %d\n",
3094 ipa_ep_idx);
3095 result = -EFAULT;
3096 goto fail_and_disable_clocks;
3097 }
3098 IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
3099 sys_in->client, ipa_ep_idx, ep->sys);
3100 ep->client_notify = sys_in->notify;
3101 ep->priv = sys_in->priv;
3102 *clnt_hdl = ipa_ep_idx;
3103 if (!ep->keep_ipa_awake)
3104 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3105
3106 return 0;
3107 }
3108 }
3109
3110 memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
3111
3112 ep->valid = 1;
3113 ep->client = sys_in->client;
3114 ep->client_notify = sys_in->notify;
3115 ep->priv = sys_in->priv;
3116 ep->keep_ipa_awake = true;
3117 if (en_status) {
3118 ep->status.status_en = true;
3119 ep->status.status_ep = ipa_ep_idx;
3120 }
3121
3122 result = ipa3_enable_data_path(ipa_ep_idx);
3123 if (result) {
3124 IPAERR("enable data path failed res=%d clnt=%d.\n",
3125 result, ipa_ep_idx);
3126 goto fail_gen2;
3127 }
3128
3129 if (!ep->skip_ep_cfg) {
3130 if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
3131 IPAERR("fail to configure EP.\n");
3132 goto fail_gen2;
3133 }
3134 if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
3135 IPAERR("fail to configure status of EP.\n");
3136 goto fail_gen2;
3137 }
3138 IPADBG("ep configuration successful\n");
3139 } else {
3140 IPADBG("skipping ep configuration\n");
3141 }
3142
3143 *clnt_hdl = ipa_ep_idx;
3144
3145 *ipa_pipe_num = ipa_ep_idx;
Amir Levya59ed3f2017-03-05 17:30:55 +02003146 *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl;
Amir Levy9659e592016-10-27 18:08:27 +03003147
3148 if (!ep->keep_ipa_awake)
3149 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3150
3151 ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
3152 IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
3153 ipa_ep_idx, ep->sys);
3154
3155 return 0;
3156
3157fail_gen2:
3158fail_and_disable_clocks:
3159 IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
3160fail_gen:
3161 return result;
3162}
3163
3164int ipa3_sys_teardown(u32 clnt_hdl)
3165{
3166 struct ipa3_ep_context *ep;
3167
3168 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3169 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3170 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3171 return -EINVAL;
3172 }
3173
3174 ep = &ipa3_ctx->ep[clnt_hdl];
3175
3176 if (!ep->keep_ipa_awake)
3177 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
3178
3179 ipa3_disable_data_path(clnt_hdl);
3180 ep->valid = 0;
3181
3182 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
3183
3184 IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
3185
3186 return 0;
3187}
3188
3189int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
3190 unsigned long gsi_ev_hdl)
3191{
3192 struct ipa3_ep_context *ep;
3193
3194 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3195 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3196 IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
3197 return -EINVAL;
3198 }
3199
3200 ep = &ipa3_ctx->ep[clnt_hdl];
3201
3202 ep->gsi_chan_hdl = gsi_ch_hdl;
3203 ep->gsi_evt_ring_hdl = gsi_ev_hdl;
3204
3205 return 0;
3206}
3207
3208static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
3209{
3210 switch (notify->evt_id) {
3211 case GSI_EVT_OUT_OF_BUFFERS_ERR:
3212 IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
3213 break;
3214 case GSI_EVT_OUT_OF_RESOURCES_ERR:
3215 IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
3216 break;
3217 case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
3218 IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
3219 break;
3220 case GSI_EVT_EVT_RING_EMPTY_ERR:
3221 IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
3222 break;
3223 default:
3224 IPAERR("Unexpected err evt: %d\n", notify->evt_id);
3225 }
3226}
3227
3228static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
3229{
3230 switch (notify->evt_id) {
3231 case GSI_CHAN_INVALID_TRE_ERR:
3232 IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
3233 break;
3234 case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
3235 IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
3236 break;
3237 case GSI_CHAN_OUT_OF_BUFFERS_ERR:
3238 IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
3239 break;
3240 case GSI_CHAN_OUT_OF_RESOURCES_ERR:
3241 IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
3242 break;
3243 case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
3244 IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
3245 break;
3246 case GSI_CHAN_HWO_1_ERR:
3247 IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
3248 break;
3249 default:
3250 IPAERR("Unexpected err evt: %d\n", notify->evt_id);
3251 }
3252}
3253
3254static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
3255{
3256 struct ipa3_tx_pkt_wrapper *tx_pkt;
3257
3258 IPADBG_LOW("event %d notified\n", notify->evt_id);
3259
3260 switch (notify->evt_id) {
3261 case GSI_CHAN_EVT_EOT:
3262 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
3263 tx_pkt = notify->xfer_user_data;
3264 queue_work(tx_pkt->sys->wq, &tx_pkt->work);
3265 break;
3266 default:
3267 IPAERR("received unexpected event id %d\n", notify->evt_id);
3268 }
3269}
3270
3271static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
3272{
3273 struct ipa3_sys_context *sys;
3274 struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
Skylar Chang42339a52017-07-19 17:58:45 -07003275 int clk_off;
Amir Levy9659e592016-10-27 18:08:27 +03003276
3277 if (!notify) {
3278 IPAERR("gsi notify is NULL.\n");
3279 return;
3280 }
3281 IPADBG_LOW("event %d notified\n", notify->evt_id);
3282
3283 sys = (struct ipa3_sys_context *)notify->chan_user_data;
3284 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
3285 struct ipa3_rx_pkt_wrapper, link);
3286 rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data;
3287
3288 if (rx_pkt_expected != rx_pkt_rcvd) {
3289 IPAERR("Pkt was not filled in head of rx buffer.\n");
3290 WARN_ON(1);
3291 return;
3292 }
3293 sys->ep->bytes_xfered_valid = true;
3294 sys->ep->bytes_xfered = notify->bytes_xfered;
3295 sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr;
3296
3297 switch (notify->evt_id) {
3298 case GSI_CHAN_EVT_EOT:
3299 case GSI_CHAN_EVT_EOB:
3300 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
3301 if (!atomic_read(&sys->curr_polling_state)) {
3302 /* put the gsi channel into polling mode */
3303 gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
3304 GSI_CHAN_MODE_POLL);
3305 ipa3_inc_acquire_wakelock();
3306 atomic_set(&sys->curr_polling_state, 1);
Skylar Chang42339a52017-07-19 17:58:45 -07003307 if (sys->ep->napi_enabled) {
3308 struct ipa_active_client_logging_info log;
3309
3310 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
3311 clk_off = ipa3_inc_client_enable_clks_no_block(
3312 &log);
3313 if (!clk_off)
3314 sys->ep->client_notify(sys->ep->priv,
3315 IPA_CLIENT_START_POLL, 0);
3316 else
3317 queue_work(sys->wq, &sys->work);
3318 } else {
3319 queue_work(sys->wq, &sys->work);
3320 }
Amir Levy9659e592016-10-27 18:08:27 +03003321 }
3322 break;
3323 default:
3324 IPAERR("received unexpected event id %d\n", notify->evt_id);
3325 }
3326}
3327
3328static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
3329{
3330 struct ipa3_sys_context *sys;
3331 struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
3332
3333 if (!notify) {
3334 IPAERR("gsi notify is NULL.\n");
3335 return;
3336 }
3337 IPADBG_LOW("event %d notified\n", notify->evt_id);
3338
3339 sys = (struct ipa3_sys_context *)notify->chan_user_data;
3340 if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
3341 IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
3342 return;
3343 }
3344 rx_pkt_expected = list_first_entry(&sys->head_desc_list,
3345 struct ipa3_dma_xfer_wrapper, link);
3346 rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify
3347 ->xfer_user_data;
3348 if (rx_pkt_expected != rx_pkt_rcvd) {
3349 IPAERR("Pkt was not filled in head of rx buffer.\n");
3350 WARN_ON(1);
3351 return;
3352 }
3353
3354 sys->ep->bytes_xfered_valid = true;
3355 sys->ep->bytes_xfered = notify->bytes_xfered;
3356 sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest;
3357
3358 switch (notify->evt_id) {
3359 case GSI_CHAN_EVT_EOT:
3360 if (!atomic_read(&sys->curr_polling_state)) {
3361 /* put the gsi channel into polling mode */
3362 gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
3363 GSI_CHAN_MODE_POLL);
3364 ipa3_inc_acquire_wakelock();
3365 atomic_set(&sys->curr_polling_state, 1);
3366 queue_work(sys->wq, &sys->work);
3367 }
3368 break;
3369 default:
3370 IPAERR("received unexpected event id %d\n", notify->evt_id);
3371 }
3372}
3373
Skylar Changd407e592017-03-30 11:25:30 -07003374int ipa3_alloc_common_event_ring(void)
3375{
3376 struct gsi_evt_ring_props gsi_evt_ring_props;
3377 dma_addr_t evt_dma_addr;
3378 int result;
3379
3380 memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
3381 gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
3382 gsi_evt_ring_props.intr = GSI_INTR_IRQ;
3383 gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
3384
3385 gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
3386
3387 gsi_evt_ring_props.ring_base_vaddr =
3388 dma_alloc_coherent(ipa3_ctx->pdev,
3389 gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
3390 if (!gsi_evt_ring_props.ring_base_vaddr) {
3391 IPAERR("fail to dma alloc %u bytes\n",
3392 gsi_evt_ring_props.ring_len);
3393 return -ENOMEM;
3394 }
3395 gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
3396 gsi_evt_ring_props.int_modt = 0;
3397 gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
3398 gsi_evt_ring_props.rp_update_addr = 0;
3399 gsi_evt_ring_props.exclusive = false;
3400 gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
3401 gsi_evt_ring_props.user_data = NULL;
3402
3403 result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
3404 ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
3405 if (result) {
3406 IPAERR("gsi_alloc_evt_ring failed %d\n", result);
3407 return result;
3408 }
3409 ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
3410
3411 return 0;
3412}
3413
Amir Levy9659e592016-10-27 18:08:27 +03003414static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
3415 struct ipa3_ep_context *ep)
3416{
3417 struct gsi_evt_ring_props gsi_evt_ring_props;
3418 struct gsi_chan_props gsi_channel_props;
3419 union __packed gsi_channel_scratch ch_scratch;
Amir Levy3be373c2017-03-05 16:31:30 +02003420 const struct ipa_gsi_ep_config *gsi_ep_info;
Amir Levy9659e592016-10-27 18:08:27 +03003421 dma_addr_t dma_addr;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003422 dma_addr_t evt_dma_addr;
Amir Levy9659e592016-10-27 18:08:27 +03003423 int result;
3424
3425 if (!ep) {
3426 IPAERR("EP context is empty\n");
3427 return -EINVAL;
3428 }
3429
Amir Levyb7d205e2016-12-19 11:31:08 +02003430 evt_dma_addr = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003431 ep->gsi_evt_ring_hdl = ~0;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003432 memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
Skylar Changd407e592017-03-30 11:25:30 -07003433 if (ep->sys->use_comm_evt_ring) {
3434 if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) {
3435 IPAERR("not enough space in common event ring\n");
3436 IPAERR("available: %d needed: %d\n",
3437 ipa3_ctx->gsi_evt_comm_ring_rem,
3438 2 * in->desc_fifo_sz);
3439 WARN_ON(1);
3440 return -EFAULT;
3441 }
3442 ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz);
3443 ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
3444 } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
Amir Levy9659e592016-10-27 18:08:27 +03003445 IPA_CLIENT_IS_CONS(ep->client)) {
Amir Levy9659e592016-10-27 18:08:27 +03003446 gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
3447 gsi_evt_ring_props.intr = GSI_INTR_IRQ;
3448 gsi_evt_ring_props.re_size =
3449 GSI_EVT_RING_RE_SIZE_16B;
3450
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003451 /*
3452 * GSI ring length is calculated based on the desc_fifo_sz
3453 * which was meant to define the BAM desc fifo. GSI descriptors
3454 * are 16B as opposed to 8B for BAM.
3455 */
Amir Levyb7d205e2016-12-19 11:31:08 +02003456 gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003457
Amir Levy9659e592016-10-27 18:08:27 +03003458 gsi_evt_ring_props.ring_base_vaddr =
Amir Levyb7d205e2016-12-19 11:31:08 +02003459 dma_alloc_coherent(ipa3_ctx->pdev,
3460 gsi_evt_ring_props.ring_len,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003461 &evt_dma_addr, GFP_KERNEL);
3462 if (!gsi_evt_ring_props.ring_base_vaddr) {
3463 IPAERR("fail to dma alloc %u bytes\n",
Amir Levyb7d205e2016-12-19 11:31:08 +02003464 gsi_evt_ring_props.ring_len);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003465 return -ENOMEM;
3466 }
3467 gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
Amir Levy9659e592016-10-27 18:08:27 +03003468
3469 /* copy mem info */
3470 ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
3471 ep->gsi_mem_info.evt_ring_base_addr =
3472 gsi_evt_ring_props.ring_base_addr;
3473 ep->gsi_mem_info.evt_ring_base_vaddr =
3474 gsi_evt_ring_props.ring_base_vaddr;
3475
3476 gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
Skylar Changd407e592017-03-30 11:25:30 -07003477 gsi_evt_ring_props.int_modc = 1;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003478
3479 IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
3480 ep->client,
3481 gsi_evt_ring_props.int_modt,
3482 gsi_evt_ring_props.int_modc);
Amir Levy9659e592016-10-27 18:08:27 +03003483 gsi_evt_ring_props.rp_update_addr = 0;
3484 gsi_evt_ring_props.exclusive = true;
3485 gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
3486 gsi_evt_ring_props.user_data = NULL;
3487
3488 result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
3489 ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
3490 if (result != GSI_STATUS_SUCCESS)
3491 goto fail_alloc_evt_ring;
3492 }
3493
3494 memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
3495 gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
3496 if (IPA_CLIENT_IS_PROD(ep->client)) {
3497 gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
3498 } else {
3499 gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
3500 gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
3501 }
3502
Amir Levy3be373c2017-03-05 16:31:30 +02003503 gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
Amir Levy9659e592016-10-27 18:08:27 +03003504 if (!gsi_ep_info) {
Amir Levy3be373c2017-03-05 16:31:30 +02003505 IPAERR("Failed getting GSI EP info for client=%d\n",
3506 ep->client);
Amir Levy9659e592016-10-27 18:08:27 +03003507 result = -EINVAL;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003508 goto fail_get_gsi_ep_info;
Amir Levy9659e592016-10-27 18:08:27 +03003509 } else
3510 gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
3511
3512 gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
3513 gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
3514
3515 /*
3516 * GSI ring length is calculated based on the desc_fifo_sz which was
3517 * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed
3518 * to 8B for BAM. For PROD pipes there is also an additional descriptor
Ghanim Fodic6b67492017-03-15 14:19:56 +02003519 * for TAG STATUS immediate command. APPS_WAN_PROD pipe is an exception
3520 * as this pipe do not use TAG STATUS for completion. Instead it uses
3521 * event ring based completions.
Amir Levy9659e592016-10-27 18:08:27 +03003522 */
Ghanim Fodic6b67492017-03-15 14:19:56 +02003523 if (ep->client == IPA_CLIENT_APPS_WAN_PROD)
3524 gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
3525 else if (IPA_CLIENT_IS_PROD(ep->client))
Amir Levy9659e592016-10-27 18:08:27 +03003526 gsi_channel_props.ring_len = 4 * in->desc_fifo_sz;
3527 else
3528 gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
3529 gsi_channel_props.ring_base_vaddr =
3530 dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003531 &dma_addr, GFP_KERNEL);
3532 if (!gsi_channel_props.ring_base_vaddr) {
3533 IPAERR("fail to dma alloc %u bytes\n",
3534 gsi_channel_props.ring_len);
Skylar Chang6f50b4e2017-04-24 15:41:41 -07003535 result = -ENOMEM;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003536 goto fail_alloc_channel_ring;
3537 }
Amir Levy9659e592016-10-27 18:08:27 +03003538 gsi_channel_props.ring_base_addr = dma_addr;
3539
3540 /* copy mem info */
3541 ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
3542 ep->gsi_mem_info.chan_ring_base_addr =
3543 gsi_channel_props.ring_base_addr;
3544 ep->gsi_mem_info.chan_ring_base_vaddr =
3545 gsi_channel_props.ring_base_vaddr;
3546
3547 gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
3548 gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
3549 if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
3550 gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
3551 else
3552 gsi_channel_props.low_weight = 1;
3553 gsi_channel_props.chan_user_data = ep->sys;
3554 gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
3555 if (IPA_CLIENT_IS_PROD(ep->client))
3556 gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
3557 else
3558 gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
3559 if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
3560 gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
3561 result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
3562 &ep->gsi_chan_hdl);
3563 if (result != GSI_STATUS_SUCCESS)
3564 goto fail_alloc_channel;
3565
3566 memset(&ch_scratch, 0, sizeof(ch_scratch));
3567 ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
3568 GSI_CHAN_RE_SIZE_16B;
3569 ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
3570 result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
3571 if (result != GSI_STATUS_SUCCESS) {
3572 IPAERR("failed to write scratch %d\n", result);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003573 goto fail_write_channel_scratch;
Amir Levy9659e592016-10-27 18:08:27 +03003574 }
3575
3576 result = gsi_start_channel(ep->gsi_chan_hdl);
3577 if (result != GSI_STATUS_SUCCESS)
3578 goto fail_start_channel;
3579 if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
3580 gsi_config_channel_mode(ep->gsi_chan_hdl,
3581 GSI_CHAN_MODE_POLL);
3582 return 0;
3583
3584fail_start_channel:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003585fail_write_channel_scratch:
Amir Levy9659e592016-10-27 18:08:27 +03003586 if (gsi_dealloc_channel(ep->gsi_chan_hdl)
3587 != GSI_STATUS_SUCCESS) {
3588 IPAERR("Failed to dealloc GSI chan.\n");
3589 BUG();
3590 }
3591fail_alloc_channel:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003592 dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
3593 gsi_channel_props.ring_base_vaddr, dma_addr);
3594fail_alloc_channel_ring:
3595fail_get_gsi_ep_info:
Amir Levy9659e592016-10-27 18:08:27 +03003596 if (ep->gsi_evt_ring_hdl != ~0) {
3597 gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
3598 ep->gsi_evt_ring_hdl = ~0;
3599 }
3600fail_alloc_evt_ring:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003601 if (gsi_evt_ring_props.ring_base_vaddr)
Amir Levyb7d205e2016-12-19 11:31:08 +02003602 dma_free_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003603 gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr);
Amir Levy9659e592016-10-27 18:08:27 +03003604 IPAERR("Return with err: %d\n", result);
3605 return result;
3606}
3607
3608static int ipa_populate_tag_field(struct ipa3_desc *desc,
3609 struct ipa3_tx_pkt_wrapper *tx_pkt,
3610 struct ipahal_imm_cmd_pyld **tag_pyld_ret)
3611{
3612 struct ipahal_imm_cmd_pyld *tag_pyld;
3613 struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
3614
3615 /* populate tag field only if it is NULL */
3616 if (desc->pyld == NULL) {
3617 tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
3618 tag_pyld = ipahal_construct_imm_cmd(
3619 IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
3620 if (unlikely(!tag_pyld)) {
3621 IPAERR("Failed to construct ip_packet_tag_status\n");
3622 return -EFAULT;
3623 }
3624 /*
3625 * This is for 32-bit pointer, will need special
3626 * handling if 64-bit pointer is used
3627 */
3628 IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
3629 desc->pyld = tag_pyld->data;
Michael Adisumartab5d170f2017-05-17 14:34:11 -07003630 desc->opcode = tag_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03003631 desc->len = tag_pyld->len;
3632 desc->user1 = tag_pyld;
Michael Adisumartab5d170f2017-05-17 14:34:11 -07003633 desc->type = IPA_IMM_CMD_DESC;
3634 desc->callback = ipa3_tag_destroy_imm;
Amir Levy9659e592016-10-27 18:08:27 +03003635
3636 *tag_pyld_ret = tag_pyld;
3637 }
3638 return 0;
3639}
3640
3641static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
3642 struct ipa_mem_buffer *mem_info)
3643{
3644 int ret;
3645 struct gsi_chan_xfer_notify xfer_notify;
3646 struct ipa3_rx_pkt_wrapper *rx_pkt;
3647
3648 if (sys->ep->bytes_xfered_valid) {
3649 mem_info->phys_base = sys->ep->phys_base;
3650 mem_info->size = (u32)sys->ep->bytes_xfered;
3651 sys->ep->bytes_xfered_valid = false;
3652 return GSI_STATUS_SUCCESS;
3653 }
3654
3655 ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
3656 &xfer_notify);
3657 if (ret == GSI_STATUS_POLL_EMPTY)
3658 return ret;
3659 else if (ret != GSI_STATUS_SUCCESS) {
3660 IPAERR("Poll channel err: %d\n", ret);
3661 return ret;
3662 }
3663
3664 rx_pkt = (struct ipa3_rx_pkt_wrapper *)
3665 xfer_notify.xfer_user_data;
3666 mem_info->phys_base = rx_pkt->data.dma_addr;
3667 mem_info->size = xfer_notify.bytes_xfered;
3668
3669 return ret;
3670}
3671
Amir Levy9659e592016-10-27 18:08:27 +03003672/**
3673 * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
3674 * function is exectued in the softirq context
3675 *
3676 * if input budget is zero, the driver switches back to
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -08003677 * interrupt mode.
Amir Levy9659e592016-10-27 18:08:27 +03003678 *
3679 * return number of polled packets, on error 0(zero)
3680 */
3681int ipa3_rx_poll(u32 clnt_hdl, int weight)
3682{
3683 struct ipa3_ep_context *ep;
3684 int ret;
3685 int cnt = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003686 struct ipa_mem_buffer mem_info = {0};
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -08003687 static int total_cnt;
Skylar Chang42339a52017-07-19 17:58:45 -07003688 struct ipa_active_client_logging_info log;
3689
3690 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
Amir Levy9659e592016-10-27 18:08:27 +03003691
Amir Levy9659e592016-10-27 18:08:27 +03003692 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3693 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3694 IPAERR("bad parm 0x%x\n", clnt_hdl);
3695 return cnt;
3696 }
3697
3698 ep = &ipa3_ctx->ep[clnt_hdl];
3699
3700 while (cnt < weight &&
3701 atomic_read(&ep->sys->curr_polling_state)) {
3702
Skylar Chang42339a52017-07-19 17:58:45 -07003703 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
Amir Levya59ed3f2017-03-05 17:30:55 +02003704 ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
Amir Levy9659e592016-10-27 18:08:27 +03003705 if (ret)
3706 break;
3707
3708 ipa3_wq_rx_common(ep->sys, mem_info.size);
Sunil Paidimarri226cf032016-10-14 13:33:08 -07003709 cnt += IPA_WAN_AGGR_PKT_CNT;
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -08003710 total_cnt++;
3711
3712 if (ep->sys->len == 0 || total_cnt >= ep->sys->rx_pool_sz) {
3713 total_cnt = 0;
3714 cnt = cnt-1;
3715 break;
3716 }
Amir Levy9659e592016-10-27 18:08:27 +03003717 };
3718
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -08003719 if (cnt < weight) {
Amir Levy9659e592016-10-27 18:08:27 +03003720 ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
Skylar Chang42339a52017-07-19 17:58:45 -07003721 ipa3_rx_switch_to_intr_mode(ep->sys);
3722 ipa3_dec_client_disable_clks_no_block(&log);
Sunil Paidimarrid514d9d2017-02-21 11:29:50 -08003723 }
Amir Levy9659e592016-10-27 18:08:27 +03003724
3725 return cnt;
3726}
3727
3728static unsigned long tag_to_pointer_wa(uint64_t tag)
3729{
3730 return 0xFFFF000000000000 | (unsigned long) tag;
3731}
3732
3733static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
3734{
3735 u16 temp;
3736 /* Add the check but it might have throughput issue */
3737 if (ipa3_is_msm_device()) {
3738 temp = (u16) (~((unsigned long) tx_pkt &
3739 0xFFFF000000000000) >> 48);
3740 if (temp) {
3741 IPAERR("The 16 prefix is not all 1s (%p)\n",
3742 tx_pkt);
3743 BUG();
3744 }
3745 }
3746 return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
3747}
3748
3749/**
3750 * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
3751 *
3752 * A hardware limitation requires to avoid using GSI physical channel 20.
3753 * This function allocates GSI physical channel 20 and holds it to prevent
3754 * others to use it.
3755 *
3756 * Return codes: 0 on success, negative on failure
3757 */
3758int ipa_gsi_ch20_wa(void)
3759{
3760 struct gsi_chan_props gsi_channel_props;
3761 dma_addr_t dma_addr;
3762 int result;
3763 int i;
3764 unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
3765 unsigned long chan_hdl_to_keep;
3766
3767
3768 memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
3769 gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
3770 gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
3771 gsi_channel_props.evt_ring_hdl = ~0;
3772 gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
3773 gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
3774 gsi_channel_props.ring_base_vaddr =
3775 dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
3776 &dma_addr, 0);
3777 gsi_channel_props.ring_base_addr = dma_addr;
3778 gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
3779 gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
3780 gsi_channel_props.low_weight = 1;
3781 gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
3782 gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
3783
3784 /* first allocate channels up to channel 20 */
3785 for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
3786 gsi_channel_props.ch_id = i;
3787 result = gsi_alloc_channel(&gsi_channel_props,
3788 ipa3_ctx->gsi_dev_hdl,
3789 &chan_hdl[i]);
3790 if (result != GSI_STATUS_SUCCESS) {
3791 IPAERR("failed to alloc channel %d err %d\n",
3792 i, result);
3793 return result;
3794 }
3795 }
3796
3797 /* allocate channel 20 */
3798 gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
3799 result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
3800 &chan_hdl_to_keep);
3801 if (result != GSI_STATUS_SUCCESS) {
3802 IPAERR("failed to alloc channel %d err %d\n",
3803 i, result);
3804 return result;
3805 }
3806
3807 /* release all other channels */
3808 for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
3809 result = gsi_dealloc_channel(chan_hdl[i]);
3810 if (result != GSI_STATUS_SUCCESS) {
3811 IPAERR("failed to dealloc channel %d err %d\n",
3812 i, result);
3813 return result;
3814 }
3815 }
3816
3817 /* DMA memory shall not be freed as it is used by channel 20 */
3818 return 0;
3819}
3820
3821/**
3822 * ipa_adjust_ra_buff_base_sz()
3823 *
3824 * Return value: the largest power of two which is smaller
3825 * than the input value
3826 */
3827static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
3828{
3829 aggr_byte_limit += IPA_MTU;
3830 aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
3831 aggr_byte_limit--;
3832 aggr_byte_limit |= aggr_byte_limit >> 1;
3833 aggr_byte_limit |= aggr_byte_limit >> 2;
3834 aggr_byte_limit |= aggr_byte_limit >> 4;
3835 aggr_byte_limit |= aggr_byte_limit >> 8;
3836 aggr_byte_limit |= aggr_byte_limit >> 16;
3837 aggr_byte_limit++;
3838 return aggr_byte_limit >> 1;
3839}