blob: 38690e9b9a18d4ae6eaf1e099371c04051854909 [file] [log] [blame]
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Talel Atias8390f262012-11-15 16:33:18 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -080013#include <linux/delay.h>
Talel Atias8390f262012-11-15 16:33:18 +020014#include <linux/device.h>
15#include <linux/dmapool.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include "ipa_i.h"
19
20#define list_next_entry(pos, member) \
21 list_entry(pos->member.next, typeof(*pos), member)
Talel Atias9bc53892013-02-10 15:10:07 +020022#define IPA_LAST_DESC_CNT 0xFFFF
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -080023#define POLLING_INACTIVITY 40
24#define POLLING_MIN_SLEEP 950
25#define POLLING_MAX_SLEEP 1050
26
27static void replenish_rx_work_func(struct work_struct *work);
28static struct delayed_work replenish_rx_work;
29static void switch_to_intr_work_func(struct work_struct *work);
30static struct delayed_work switch_to_intr_work;
31static void ipa_wq_handle_rx(struct work_struct *work);
32static DECLARE_WORK(rx_work, ipa_wq_handle_rx);
33
Talel Atias8390f262012-11-15 16:33:18 +020034/**
Talel Atias6d842bf2012-12-06 16:58:35 +020035 * ipa_write_done() - this function will be (eventually) called when a Tx
Talel Atias8390f262012-11-15 16:33:18 +020036 * operation is complete
Talel Atias6d842bf2012-12-06 16:58:35 +020037 * * @work: work_struct used by the work queue
38 *
39 * Will be called in deferred context.
40 * - invoke the callback supplied by the client who sent this command
41 * - iterate over all packets and validate that
42 * the order for sent packet is the same as expected
43 * - delete all the tx packet descriptors from the system
44 * pipe context (not needed anymore)
45 * - return the tx buffer back to one_kb_no_straddle_pool
Talel Atias8390f262012-11-15 16:33:18 +020046 */
Talel Atias6d842bf2012-12-06 16:58:35 +020047void ipa_wq_write_done(struct work_struct *work)
Talel Atias8390f262012-11-15 16:33:18 +020048{
49 struct ipa_tx_pkt_wrapper *tx_pkt;
50 struct ipa_tx_pkt_wrapper *next_pkt;
51 struct ipa_tx_pkt_wrapper *tx_pkt_expected;
52 unsigned long irq_flags;
53 struct ipa_mem_buffer mult = { 0 };
54 int i;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -080055 u32 cnt;
Talel Atias8390f262012-11-15 16:33:18 +020056
57 tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
58 cnt = tx_pkt->cnt;
59 IPADBG("cnt=%d\n", cnt);
60
61 if (unlikely(cnt == 0))
62 WARN_ON(1);
63
Talel Atias9bc53892013-02-10 15:10:07 +020064 if (cnt > 1 && cnt != IPA_LAST_DESC_CNT)
Talel Atias8390f262012-11-15 16:33:18 +020065 mult = tx_pkt->mult;
66
67 for (i = 0; i < cnt; i++) {
68 if (unlikely(tx_pkt == NULL))
69 WARN_ON(1);
70 spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
71 tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
72 struct ipa_tx_pkt_wrapper,
73 link);
74 if (unlikely(tx_pkt != tx_pkt_expected)) {
75 spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
76 irq_flags);
77 WARN_ON(1);
78 }
79 next_pkt = list_next_entry(tx_pkt, link);
80 list_del(&tx_pkt->link);
Talel Atias8390f262012-11-15 16:33:18 +020081 spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -080082 if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -080083 dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
84 tx_pkt->bounce,
85 tx_pkt->mem.phys_base);
86 } else {
87 dma_unmap_single(NULL, tx_pkt->mem.phys_base,
88 tx_pkt->mem.size,
89 DMA_TO_DEVICE);
90 }
91
Talel Atias8390f262012-11-15 16:33:18 +020092 if (tx_pkt->callback)
93 tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
94
95 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
96 tx_pkt = next_pkt;
97 }
98
99 if (mult.phys_base)
100 dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
101}
102
103/**
104 * ipa_send_one() - Send a single descriptor
105 * @sys: system pipe context
106 * @desc: descriptor to send
Talel Atias9bc53892013-02-10 15:10:07 +0200107 * @in_atomic: whether caller is in atomic context
Talel Atias8390f262012-11-15 16:33:18 +0200108 *
Talel Atias6d842bf2012-12-06 16:58:35 +0200109 * - Allocate tx_packet wrapper
110 * - Allocate a bounce buffer due to HW constrains
111 * (This buffer will be used for the DMA command)
112 * - Copy the data (desc->pyld) to the bounce buffer
113 * - transfer data to the IPA
114 * - after the transfer was done the SPS will
115 * notify the sending user via ipa_sps_irq_comp_tx()
116 *
Talel Atias8390f262012-11-15 16:33:18 +0200117 * Return codes: 0: success, -EFAULT: failure
118 */
Talel Atias9bc53892013-02-10 15:10:07 +0200119int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
120 bool in_atomic)
Talel Atias8390f262012-11-15 16:33:18 +0200121{
122 struct ipa_tx_pkt_wrapper *tx_pkt;
123 unsigned long irq_flags;
124 int result;
125 u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
126 dma_addr_t dma_address;
127 u16 len;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800128 u32 mem_flag = GFP_ATOMIC;
Talel Atias8390f262012-11-15 16:33:18 +0200129
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800130 if (unlikely(!in_atomic))
131 mem_flag = GFP_KERNEL;
Talel Atias9bc53892013-02-10 15:10:07 +0200132
133 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
Talel Atias8390f262012-11-15 16:33:18 +0200134 if (!tx_pkt) {
135 IPAERR("failed to alloc tx wrapper\n");
136 goto fail_mem_alloc;
137 }
138
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800139 if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800140 WARN_ON(desc->len > 512);
Talel Atias8390f262012-11-15 16:33:18 +0200141
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800142 /*
143 * Due to a HW limitation, we need to make sure that the packet
144 * does not cross a 1KB boundary
145 */
146 tx_pkt->bounce = dma_pool_alloc(
147 ipa_ctx->one_kb_no_straddle_pool,
Talel Atias9bc53892013-02-10 15:10:07 +0200148 mem_flag, &dma_address);
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800149 if (!tx_pkt->bounce) {
150 dma_address = 0;
151 } else {
152 WARN_ON(!ipa_straddle_boundary
153 ((u32)dma_address,
154 (u32)dma_address + desc->len - 1,
155 1024));
156 memcpy(tx_pkt->bounce, desc->pyld, desc->len);
157 }
Talel Atias8390f262012-11-15 16:33:18 +0200158 } else {
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800159 dma_address = dma_map_single(NULL, desc->pyld, desc->len,
160 DMA_TO_DEVICE);
Talel Atias8390f262012-11-15 16:33:18 +0200161 }
Talel Atias8390f262012-11-15 16:33:18 +0200162 if (!dma_address) {
163 IPAERR("failed to DMA wrap\n");
164 goto fail_dma_map;
165 }
166
167 INIT_LIST_HEAD(&tx_pkt->link);
Talel Atias6d842bf2012-12-06 16:58:35 +0200168 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
Talel Atias8390f262012-11-15 16:33:18 +0200169 tx_pkt->type = desc->type;
170 tx_pkt->cnt = 1; /* only 1 desc in this "set" */
171
172 tx_pkt->mem.phys_base = dma_address;
173 tx_pkt->mem.base = desc->pyld;
174 tx_pkt->mem.size = desc->len;
175 tx_pkt->sys = sys;
176 tx_pkt->callback = desc->callback;
177 tx_pkt->user1 = desc->user1;
178 tx_pkt->user2 = desc->user2;
179
180 /*
181 * Special treatment for immediate commands, where the structure of the
182 * descriptor is different
183 */
184 if (desc->type == IPA_IMM_CMD_DESC) {
185 sps_flags |= SPS_IOVEC_FLAG_IMME;
186 len = desc->opcode;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800187 IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
188 desc->opcode, desc->len, sps_flags);
189 IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
Talel Atias8390f262012-11-15 16:33:18 +0200190 } else {
191 len = desc->len;
192 }
193
Talel Atias8390f262012-11-15 16:33:18 +0200194 spin_lock_irqsave(&sys->spinlock, irq_flags);
195 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
Talel Atias8390f262012-11-15 16:33:18 +0200196 result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
197 sps_flags);
198 if (result) {
199 IPAERR("sps_transfer_one failed rc=%d\n", result);
200 goto fail_sps_send;
201 }
202
203 spin_unlock_irqrestore(&sys->spinlock, irq_flags);
204
205 return 0;
206
207fail_sps_send:
208 list_del(&tx_pkt->link);
209 spin_unlock_irqrestore(&sys->spinlock, irq_flags);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800210 if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800211 dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
212 dma_address);
213 else
214 dma_unmap_single(NULL, dma_address, desc->len, DMA_TO_DEVICE);
Talel Atias8390f262012-11-15 16:33:18 +0200215fail_dma_map:
216 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
217fail_mem_alloc:
218 return -EFAULT;
219}
220
221/**
222 * ipa_send() - Send multiple descriptors in one HW transaction
223 * @sys: system pipe context
224 * @num_desc: number of packets
Talel Atias6d842bf2012-12-06 16:58:35 +0200225 * @desc: packets to send (may be immediate command or data)
Talel Atias9bc53892013-02-10 15:10:07 +0200226 * @in_atomic: whether caller is in atomic context
Talel Atias6d842bf2012-12-06 16:58:35 +0200227 *
228 * This function is used for system-to-bam connection.
229 * - SPS driver expect struct sps_transfer which will contain all the data
230 * for a transaction
231 * - The sps_transfer struct will be pointing to bounce buffers for
232 * its DMA command (immediate command and data)
233 * - ipa_tx_pkt_wrapper will be used for each ipa
234 * descriptor (allocated from wrappers cache)
235 * - The wrapper struct will be configured for each ipa-desc payload and will
236 * contain information which will be later used by the user callbacks
237 * - each transfer will be made by calling to sps_transfer()
238 * - Each packet (command or data) that will be sent will also be saved in
239 * ipa_sys_context for later check that all data was sent
Talel Atias8390f262012-11-15 16:33:18 +0200240 *
241 * Return codes: 0: success, -EFAULT: failure
242 */
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800243int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
Talel Atias9bc53892013-02-10 15:10:07 +0200244 bool in_atomic)
Talel Atias8390f262012-11-15 16:33:18 +0200245{
246 struct ipa_tx_pkt_wrapper *tx_pkt;
247 struct ipa_tx_pkt_wrapper *next_pkt;
248 struct sps_transfer transfer = { 0 };
249 struct sps_iovec *iovec;
250 unsigned long irq_flags;
251 dma_addr_t dma_addr;
Talel Atias6d842bf2012-12-06 16:58:35 +0200252 int i = 0;
Talel Atias8390f262012-11-15 16:33:18 +0200253 int j;
254 int result;
Talel Atias6d842bf2012-12-06 16:58:35 +0200255 int fail_dma_wrap = 0;
Talel Atias8390f262012-11-15 16:33:18 +0200256 uint size = num_desc * sizeof(struct sps_iovec);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800257 u32 mem_flag = GFP_ATOMIC;
Talel Atias9bc53892013-02-10 15:10:07 +0200258
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800259 if (unlikely(!in_atomic))
260 mem_flag = GFP_KERNEL;
Talel Atias8390f262012-11-15 16:33:18 +0200261
Talel Atias6d842bf2012-12-06 16:58:35 +0200262 transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, 0);
263 transfer.iovec_phys = dma_addr;
264 transfer.iovec_count = num_desc;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800265 spin_lock_irqsave(&sys->spinlock, irq_flags);
Talel Atias6d842bf2012-12-06 16:58:35 +0200266 if (!transfer.iovec) {
267 IPAERR("fail to alloc DMA mem for sps xfr buff\n");
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800268 goto failure_coherent;
Talel Atias6d842bf2012-12-06 16:58:35 +0200269 }
270
Talel Atias8390f262012-11-15 16:33:18 +0200271 for (i = 0; i < num_desc; i++) {
272 fail_dma_wrap = 0;
273 tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
Talel Atias9bc53892013-02-10 15:10:07 +0200274 mem_flag);
Talel Atias8390f262012-11-15 16:33:18 +0200275 if (!tx_pkt) {
276 IPAERR("failed to alloc tx wrapper\n");
277 goto failure;
278 }
279 /*
280 * first desc of set is "special" as it holds the count and
281 * other info
282 */
283 if (i == 0) {
284 transfer.user = tx_pkt;
Talel Atias8390f262012-11-15 16:33:18 +0200285 tx_pkt->mult.phys_base = dma_addr;
286 tx_pkt->mult.base = transfer.iovec;
287 tx_pkt->mult.size = size;
288 tx_pkt->cnt = num_desc;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800289 INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
Talel Atias8390f262012-11-15 16:33:18 +0200290 }
291
292 iovec = &transfer.iovec[i];
293 iovec->flags = 0;
294
295 INIT_LIST_HEAD(&tx_pkt->link);
Talel Atias8390f262012-11-15 16:33:18 +0200296 tx_pkt->type = desc[i].type;
297
298 tx_pkt->mem.base = desc[i].pyld;
299 tx_pkt->mem.size = desc[i].len;
300
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800301 if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800302 WARN_ON(tx_pkt->mem.size > 512);
Talel Atias8390f262012-11-15 16:33:18 +0200303
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800304 /*
305 * Due to a HW limitation, we need to make sure that the
306 * packet does not cross a 1KB boundary
307 */
308 tx_pkt->bounce =
Talel Atias9bc53892013-02-10 15:10:07 +0200309 dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
310 mem_flag,
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800311 &tx_pkt->mem.phys_base);
312 if (!tx_pkt->bounce) {
313 tx_pkt->mem.phys_base = 0;
314 } else {
315 WARN_ON(!ipa_straddle_boundary(
Talel Atias8390f262012-11-15 16:33:18 +0200316 (u32)tx_pkt->mem.phys_base,
317 (u32)tx_pkt->mem.phys_base +
318 tx_pkt->mem.size - 1, 1024));
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800319 memcpy(tx_pkt->bounce, tx_pkt->mem.base,
320 tx_pkt->mem.size);
321 }
322 } else {
323 tx_pkt->mem.phys_base =
324 dma_map_single(NULL, tx_pkt->mem.base,
325 tx_pkt->mem.size,
326 DMA_TO_DEVICE);
Talel Atias8390f262012-11-15 16:33:18 +0200327 }
Talel Atias8390f262012-11-15 16:33:18 +0200328 if (!tx_pkt->mem.phys_base) {
329 IPAERR("failed to alloc tx wrapper\n");
330 fail_dma_wrap = 1;
331 goto failure;
332 }
333
334 tx_pkt->sys = sys;
335 tx_pkt->callback = desc[i].callback;
336 tx_pkt->user1 = desc[i].user1;
337 tx_pkt->user2 = desc[i].user2;
338
Talel Atias6d842bf2012-12-06 16:58:35 +0200339 /*
340 * Point the iovec to the bounce buffer and
341 * add this packet to system pipe context.
342 */
Talel Atias8390f262012-11-15 16:33:18 +0200343 iovec->addr = tx_pkt->mem.phys_base;
Talel Atias8390f262012-11-15 16:33:18 +0200344 list_add_tail(&tx_pkt->link, &sys->head_desc_list);
Talel Atias8390f262012-11-15 16:33:18 +0200345
346 /*
347 * Special treatment for immediate commands, where the structure
348 * of the descriptor is different
349 */
350 if (desc[i].type == IPA_IMM_CMD_DESC) {
351 iovec->size = desc[i].opcode;
352 iovec->flags |= SPS_IOVEC_FLAG_IMME;
353 } else {
354 iovec->size = desc[i].len;
355 }
356
357 if (i == (num_desc - 1)) {
358 iovec->flags |= (SPS_IOVEC_FLAG_EOT |
359 SPS_IOVEC_FLAG_INT);
360 /* "mark" the last desc */
Talel Atias9bc53892013-02-10 15:10:07 +0200361 tx_pkt->cnt = IPA_LAST_DESC_CNT;
Talel Atias8390f262012-11-15 16:33:18 +0200362 }
363 }
364
365 result = sps_transfer(sys->ep->ep_hdl, &transfer);
366 if (result) {
367 IPAERR("sps_transfer failed rc=%d\n", result);
368 goto failure;
369 }
370
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800371 spin_unlock_irqrestore(&sys->spinlock, irq_flags);
Talel Atias8390f262012-11-15 16:33:18 +0200372 return 0;
373
374failure:
375 tx_pkt = transfer.user;
376 for (j = 0; j < i; j++) {
Talel Atias8390f262012-11-15 16:33:18 +0200377 next_pkt = list_next_entry(tx_pkt, link);
378 list_del(&tx_pkt->link);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800379 if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
Ravi Gummadidaladcfacd72013-02-06 12:15:35 -0800380 dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
381 tx_pkt->bounce,
382 tx_pkt->mem.phys_base);
383 else
384 dma_unmap_single(NULL, tx_pkt->mem.phys_base,
385 tx_pkt->mem.size,
386 DMA_TO_DEVICE);
Talel Atias8390f262012-11-15 16:33:18 +0200387 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
388 tx_pkt = next_pkt;
389 }
390 if (i < num_desc)
391 /* last desc failed */
392 if (fail_dma_wrap)
393 kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
394 if (transfer.iovec_phys)
395 dma_free_coherent(NULL, size, transfer.iovec,
396 transfer.iovec_phys);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800397failure_coherent:
398 spin_unlock_irqrestore(&sys->spinlock, irq_flags);
Talel Atias8390f262012-11-15 16:33:18 +0200399 return -EFAULT;
400}
401
402/**
Talel Atias6d842bf2012-12-06 16:58:35 +0200403 * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver after an
Talel Atias8390f262012-11-15 16:33:18 +0200404 * immediate command is complete.
405 * @user1: pointer to the descriptor of the transfer
406 * @user2:
407 *
408 * Complete the immediate commands completion object, this will release the
409 * thread which waits on this completion object (ipa_send_cmd())
410 */
Talel Atias6d842bf2012-12-06 16:58:35 +0200411static void ipa_sps_irq_cmd_ack(void *user1, void *user2)
Talel Atias8390f262012-11-15 16:33:18 +0200412{
413 struct ipa_desc *desc = (struct ipa_desc *)user1;
414
415 if (!desc)
416 WARN_ON(1);
417 IPADBG("got ack for cmd=%d\n", desc->opcode);
418 complete(&desc->xfer_done);
419}
420
421/**
422 * ipa_send_cmd - send immediate commands
Talel Atias6d842bf2012-12-06 16:58:35 +0200423 * @num_desc: number of descriptors within the desc struct
Talel Atias8390f262012-11-15 16:33:18 +0200424 * @descr: descriptor structure
425 *
426 * Function will block till command gets ACK from IPA HW, caller needs
427 * to free any resources it allocated after function returns
Talel Atias6d842bf2012-12-06 16:58:35 +0200428 * The callback in ipa_desc should not be set by the caller
429 * for this function.
Talel Atias8390f262012-11-15 16:33:18 +0200430 */
431int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
432{
433 struct ipa_desc *desc;
Talel Atias9bc53892013-02-10 15:10:07 +0200434 int result = 0;
435
436 if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
437 if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
438 ipa_enable_clks();
Talel Atias8390f262012-11-15 16:33:18 +0200439
440 if (num_desc == 1) {
441 init_completion(&descr->xfer_done);
442
Talel Atias8390f262012-11-15 16:33:18 +0200443 if (descr->callback || descr->user1)
444 WARN_ON(1);
445
Talel Atias6d842bf2012-12-06 16:58:35 +0200446 descr->callback = ipa_sps_irq_cmd_ack;
Talel Atias8390f262012-11-15 16:33:18 +0200447 descr->user1 = descr;
Talel Atias9bc53892013-02-10 15:10:07 +0200448 if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr, false)) {
Talel Atias8390f262012-11-15 16:33:18 +0200449 IPAERR("fail to send immediate command\n");
Talel Atias9bc53892013-02-10 15:10:07 +0200450 result = -EFAULT;
451 goto bail;
Talel Atias8390f262012-11-15 16:33:18 +0200452 }
453 wait_for_completion(&descr->xfer_done);
454 } else {
455 desc = &descr[num_desc - 1];
456 init_completion(&desc->xfer_done);
457
Talel Atias8390f262012-11-15 16:33:18 +0200458 if (desc->callback || desc->user1)
459 WARN_ON(1);
460
Talel Atias6d842bf2012-12-06 16:58:35 +0200461 desc->callback = ipa_sps_irq_cmd_ack;
Talel Atias8390f262012-11-15 16:33:18 +0200462 desc->user1 = desc;
Talel Atias9bc53892013-02-10 15:10:07 +0200463 if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc,
464 descr, false)) {
Talel Atias8390f262012-11-15 16:33:18 +0200465 IPAERR("fail to send multiple immediate command set\n");
Talel Atias9bc53892013-02-10 15:10:07 +0200466 result = -EFAULT;
467 goto bail;
Talel Atias8390f262012-11-15 16:33:18 +0200468 }
469 wait_for_completion(&desc->xfer_done);
470 }
471
Talel Atias9bc53892013-02-10 15:10:07 +0200472 IPA_STATS_INC_IC_CNT(num_desc, descr, ipa_ctx->stats.imm_cmds);
473bail:
474 if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
475 if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
476 ipa_disable_clks();
477 return result;
Talel Atias8390f262012-11-15 16:33:18 +0200478}
479
480/**
Talel Atias6d842bf2012-12-06 16:58:35 +0200481 * ipa_sps_irq_tx_notify() - Callback function which will be called by
482 * the SPS driver after a Tx operation is complete.
483 * Called in an interrupt context.
Talel Atias8390f262012-11-15 16:33:18 +0200484 * @notify: SPS driver supplied notification struct
Talel Atias6d842bf2012-12-06 16:58:35 +0200485 *
486 * This function defer the work for this event to the tx workqueue.
487 * This event will be later handled by ipa_write_done.
Talel Atias8390f262012-11-15 16:33:18 +0200488 */
Talel Atias6d842bf2012-12-06 16:58:35 +0200489static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
Talel Atias8390f262012-11-15 16:33:18 +0200490{
491 struct ipa_tx_pkt_wrapper *tx_pkt;
492
493 IPADBG("event %d notified\n", notify->event_id);
494
495 switch (notify->event_id) {
496 case SPS_EVENT_EOT:
497 tx_pkt = notify->data.transfer.user;
498 queue_work(ipa_ctx->tx_wq, &tx_pkt->work);
499 break;
500 default:
501 IPAERR("recieved unexpected event id %d\n", notify->event_id);
502 }
503}
504
505/**
506 * ipa_handle_rx_core() - The core functionality of packet reception. This
507 * function is read from multiple code paths.
508 *
509 * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
510 * endpoint. The function runs as long as there are packets in the pipe.
511 * For each packet:
512 * - Disconnect the packet from the system pipe linked list
513 * - Unmap the packets skb, make it non DMAable
514 * - Free the packet from the cache
515 * - Prepare a proper skb
516 * - Call the endpoints notify function, passing the skb in the parameters
517 * - Replenish the rx cache
518 */
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800519int ipa_handle_rx_core(bool process_all, bool in_poll_state)
Talel Atias8390f262012-11-15 16:33:18 +0200520{
521 struct ipa_a5_mux_hdr *mux_hdr;
522 struct ipa_rx_pkt_wrapper *rx_pkt;
523 struct sk_buff *rx_skb;
524 struct sps_iovec iov;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800525 unsigned int pull_len;
526 unsigned int padding;
Talel Atias8390f262012-11-15 16:33:18 +0200527 int ret;
528 struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
529 struct ipa_ep_context *ep;
Talel Atias9bc53892013-02-10 15:10:07 +0200530 int cnt = 0;
531 struct completion *compl;
532 struct ipa_tree_node *node;
Talel Atias8390f262012-11-15 16:33:18 +0200533
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800534 while ((in_poll_state ? atomic_read(&ipa_ctx->curr_polling_state) :
535 !atomic_read(&ipa_ctx->curr_polling_state))) {
536 if (cnt && !process_all)
537 break;
538
Talel Atias8390f262012-11-15 16:33:18 +0200539 ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
540 if (ret) {
541 IPAERR("sps_get_iovec failed %d\n", ret);
542 break;
543 }
544
Talel Atias8390f262012-11-15 16:33:18 +0200545 if (iov.addr == 0)
546 break;
547
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800548 if (unlikely(list_empty(&sys->head_desc_list)))
549 continue;
550
Talel Atias8390f262012-11-15 16:33:18 +0200551 rx_pkt = list_first_entry(&sys->head_desc_list,
552 struct ipa_rx_pkt_wrapper, link);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800553
Talel Atias8390f262012-11-15 16:33:18 +0200554 rx_pkt->len = iov.size;
555 sys->len--;
556 list_del(&rx_pkt->link);
Talel Atias8390f262012-11-15 16:33:18 +0200557
558 IPADBG("--curr_cnt=%d\n", sys->len);
559
560 rx_skb = rx_pkt->skb;
561 dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
562 DMA_FROM_DEVICE);
Talel Atias8390f262012-11-15 16:33:18 +0200563
564 /*
565 * make it look like a real skb, "data" was already set at
566 * alloc time
567 */
568 rx_skb->tail = rx_skb->data + rx_pkt->len;
569 rx_skb->len = rx_pkt->len;
570 rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800571 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
Talel Atias8390f262012-11-15 16:33:18 +0200572
573 mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
574
575 IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
Talel Atias6d842bf2012-12-06 16:58:35 +0200576 rx_skb->len, ntohs(mux_hdr->interface_id),
577 mux_hdr->src_pipe_index,
578 mux_hdr->flags, ntohl(mux_hdr->metadata));
Talel Atias8390f262012-11-15 16:33:18 +0200579
580 IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
581
Talel Atias9bc53892013-02-10 15:10:07 +0200582 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
583 IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
584
585 if (unlikely(mux_hdr->flags & IPA_A5_MUX_HDR_EXCP_FLAG_TAG)) {
586 if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) {
587 /* retrieve the compl object from tag value */
588 mux_hdr++;
589 compl = (struct completion *)
590 ntohl(*((u32 *)mux_hdr));
591 IPADBG("%x %x %p\n", *(u32 *)mux_hdr,
592 *((u32 *)mux_hdr + 1), compl);
593
594 mutex_lock(&ipa_ctx->lock);
595 node = ipa_search(&ipa_ctx->tag_tree,
596 (u32)compl);
597 if (node) {
598 complete_all(compl);
599 rb_erase(&node->node,
600 &ipa_ctx->tag_tree);
601 kmem_cache_free(
602 ipa_ctx->tree_node_cache, node);
603 } else {
604 WARN_ON(1);
605 }
606 mutex_unlock(&ipa_ctx->lock);
607 }
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800608 dev_kfree_skb(rx_skb);
Talel Atias9bc53892013-02-10 15:10:07 +0200609 ipa_replenish_rx_cache();
610 ++cnt;
611 continue;
612 }
613
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800614 if (unlikely(mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
Talel Atias6d842bf2012-12-06 16:58:35 +0200615 !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800616 !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify)) {
Talel Atias6d842bf2012-12-06 16:58:35 +0200617 IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
618 mux_hdr->src_pipe_index,
619 ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
620 ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800621 dev_kfree_skb(rx_skb);
Talel Atias8390f262012-11-15 16:33:18 +0200622 ipa_replenish_rx_cache();
Talel Atias9bc53892013-02-10 15:10:07 +0200623 ++cnt;
Talel Atias8390f262012-11-15 16:33:18 +0200624 continue;
625 }
626
627 ep = &ipa_ctx->ep[mux_hdr->src_pipe_index];
628 pull_len = sizeof(struct ipa_a5_mux_hdr);
629
630 /*
631 * IP packet starts on word boundary
632 * remove the MUX header and any padding and pass the frame to
633 * the client which registered a rx callback on the "src pipe"
634 */
635 padding = ep->cfg.hdr.hdr_len & 0x3;
636 if (padding)
637 pull_len += 4 - padding;
638
639 IPADBG("pulling %d bytes from skb\n", pull_len);
640 skb_pull(rx_skb, pull_len);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800641 ipa_replenish_rx_cache();
Talel Atias6d842bf2012-12-06 16:58:35 +0200642 ep->client_notify(ep->priv, IPA_RECEIVE,
643 (unsigned long)(rx_skb));
Talel Atias9bc53892013-02-10 15:10:07 +0200644 cnt++;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800645 };
Talel Atias9bc53892013-02-10 15:10:07 +0200646
647 return cnt;
Talel Atias8390f262012-11-15 16:33:18 +0200648}
649
650/**
651 * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
652 */
653static void ipa_rx_switch_to_intr_mode(void)
654{
655 int ret;
656 struct ipa_sys_context *sys;
657
658 IPADBG("Enter");
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800659 if (!atomic_read(&ipa_ctx->curr_polling_state)) {
Talel Atias8390f262012-11-15 16:33:18 +0200660 IPAERR("already in intr mode\n");
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800661 goto fail;
Talel Atias8390f262012-11-15 16:33:18 +0200662 }
663
664 sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
665
666 ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
667 if (ret) {
668 IPAERR("sps_get_config() failed %d\n", ret);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800669 goto fail;
Talel Atias8390f262012-11-15 16:33:18 +0200670 }
671 sys->event.options = SPS_O_EOT;
672 ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
673 if (ret) {
674 IPAERR("sps_register_event() failed %d\n", ret);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800675 goto fail;
Talel Atias8390f262012-11-15 16:33:18 +0200676 }
677 sys->ep->connect.options =
678 SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
679 ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
680 if (ret) {
681 IPAERR("sps_set_config() failed %d\n", ret);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800682 goto fail;
Talel Atias8390f262012-11-15 16:33:18 +0200683 }
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800684 atomic_set(&ipa_ctx->curr_polling_state, 0);
685 ipa_handle_rx_core(true, false);
686 return;
Talel Atias8390f262012-11-15 16:33:18 +0200687
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800688fail:
689 IPA_STATS_INC_CNT(ipa_ctx->stats.x_intr_repost);
690 schedule_delayed_work(&switch_to_intr_work, msecs_to_jiffies(1));
Talel Atias8390f262012-11-15 16:33:18 +0200691}
692
693/**
694 * ipa_rx_notify() - Callback function which is called by the SPS driver when a
695 * a packet is received
696 * @notify: SPS driver supplied notification information
697 *
698 * Called in an interrupt context, therefore the majority of the work is
699 * deffered using a work queue.
700 *
701 * After receiving a packet, the driver goes to polling mode and keeps pulling
702 * packets until the rx buffer is empty, then it goes back to interrupt mode.
703 * This comes to prevent the CPU from handling too many interrupts when the
704 * throughput is high.
705 */
Talel Atias6d842bf2012-12-06 16:58:35 +0200706static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
Talel Atias8390f262012-11-15 16:33:18 +0200707{
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800708 struct ipa_ep_context *ep;
709 int ret;
Talel Atias8390f262012-11-15 16:33:18 +0200710
711 IPADBG("event %d notified\n", notify->event_id);
712
713 switch (notify->event_id) {
714 case SPS_EVENT_EOT:
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800715 if (!atomic_read(&ipa_ctx->curr_polling_state)) {
716 ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
717
718 ret = sps_get_config(ep->ep_hdl, &ep->connect);
719 if (ret) {
720 IPAERR("sps_get_config() failed %d\n", ret);
721 break;
722 }
723 ep->connect.options = SPS_O_AUTO_ENABLE |
724 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
725 ret = sps_set_config(ep->ep_hdl, &ep->connect);
726 if (ret) {
727 IPAERR("sps_set_config() failed %d\n", ret);
728 break;
729 }
730 atomic_set(&ipa_ctx->curr_polling_state, 1);
731 queue_work(ipa_ctx->rx_wq, &rx_work);
Talel Atias8390f262012-11-15 16:33:18 +0200732 }
733 break;
734 default:
735 IPAERR("recieved unexpected event id %d\n", notify->event_id);
736 }
737}
738
739/**
740 * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
741 * IPA EP configuration
Talel Atias6d842bf2012-12-06 16:58:35 +0200742 * @sys_in: [in] input needed to setup BAM pipe and configure EP
Talel Atias8390f262012-11-15 16:33:18 +0200743 * @clnt_hdl: [out] client handle
744 *
Talel Atias6d842bf2012-12-06 16:58:35 +0200745 * - configure the end-point registers with the supplied
746 * parameters from the user.
747 * - call SPS APIs to create a system-to-bam connection with IPA.
748 * - allocate descriptor FIFO
749 * - register callback function(ipa_sps_irq_rx_notify or
750 * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
751 * not configured to pulling mode
752 *
Talel Atias8390f262012-11-15 16:33:18 +0200753 * Returns: 0 on success, negative on failure
754 */
755int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
756{
757 int ipa_ep_idx;
758 int sys_idx = -1;
759 int result = -EFAULT;
760 dma_addr_t dma_addr;
761
762 if (sys_in == NULL || clnt_hdl == NULL ||
763 sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
764 IPAERR("bad parm.\n");
765 result = -EINVAL;
766 goto fail_bad_param;
767 }
768
769 ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
770 if (ipa_ep_idx == -1) {
771 IPAERR("Invalid client.\n");
772 goto fail_bad_param;
773 }
774
775 if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
776 IPAERR("EP already allocated.\n");
777 goto fail_bad_param;
778 }
779
780 memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
781
782 ipa_ctx->ep[ipa_ep_idx].valid = 1;
783 ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
Talel Atias9bc53892013-02-10 15:10:07 +0200784 ipa_ctx->ep[ipa_ep_idx].client_notify = sys_in->notify;
785 ipa_ctx->ep[ipa_ep_idx].priv = sys_in->priv;
Talel Atias8390f262012-11-15 16:33:18 +0200786
787 if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
788 IPAERR("fail to configure EP.\n");
789 goto fail_sps_api;
790 }
791
792 /* Default Config */
793 ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
794
795 if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
796 IPAERR("SPS EP allocation failed.\n");
797 goto fail_sps_api;
798 }
799
800 result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
801 &ipa_ctx->ep[ipa_ep_idx].connect);
802 if (result) {
803 IPAERR("fail to get config.\n");
804 goto fail_mem_alloc;
805 }
806
807 /* Specific Config */
808 if (IPA_CLIENT_IS_CONS(sys_in->client)) {
809 ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
810 ipa_ctx->ep[ipa_ep_idx].connect.destination =
811 SPS_DEV_HANDLE_MEM;
812 ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
813 ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
814 ipa_ctx->a5_pipe_index++;
815 ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
816 ipa_ctx->ep[ipa_ep_idx].connect.options =
817 SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
818 if (ipa_ctx->polling_mode)
819 ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
820 } else {
821 ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
822 ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
823 ipa_ctx->ep[ipa_ep_idx].connect.destination =
824 ipa_ctx->bam_handle;
825 ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
826 ipa_ctx->a5_pipe_index++;
827 ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
828 ipa_ctx->ep[ipa_ep_idx].connect.options =
829 SPS_O_AUTO_ENABLE | SPS_O_EOT;
830 if (ipa_ctx->polling_mode)
831 ipa_ctx->ep[ipa_ep_idx].connect.options |=
832 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
833 }
834
835 ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
836 ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
837 dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
838 &dma_addr, 0);
839 ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
840 if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
841 IPAERR("fail to get DMA desc memory.\n");
842 goto fail_mem_alloc;
843 }
844
845 ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
846
847 result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
848 &ipa_ctx->ep[ipa_ep_idx].connect);
849 if (result) {
850 IPAERR("sps_connect fails.\n");
851 goto fail_sps_connect;
852 }
853
854 switch (ipa_ep_idx) {
855 case 1:
856 /* fall through */
857 case 2:
858 /* fall through */
859 case 3:
860 sys_idx = ipa_ep_idx;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800861 INIT_DELAYED_WORK(&replenish_rx_work, replenish_rx_work_func);
862 INIT_DELAYED_WORK(&switch_to_intr_work,
863 switch_to_intr_work_func);
Talel Atias8390f262012-11-15 16:33:18 +0200864 break;
Talel Atias9bc53892013-02-10 15:10:07 +0200865 case WLAN_AMPDU_TX_EP:
Talel Atias8390f262012-11-15 16:33:18 +0200866 sys_idx = IPA_A5_WLAN_AMPDU_OUT;
867 break;
868 default:
869 IPAERR("Invalid EP index.\n");
870 result = -EFAULT;
871 goto fail_register_event;
872 }
873
874 if (!ipa_ctx->polling_mode) {
Talel Atias6d842bf2012-12-06 16:58:35 +0200875
876 ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
877 ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
878 ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
879 ipa_ctx->sys[sys_idx].event.user =
880 &ipa_ctx->sys[sys_idx];
881 ipa_ctx->sys[sys_idx].event.callback =
882 IPA_CLIENT_IS_CONS(sys_in->client) ?
883 ipa_sps_irq_rx_notify :
884 ipa_sps_irq_tx_notify;
885 result = sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
886 &ipa_ctx->sys[sys_idx].event);
887 if (result < 0) {
888 IPAERR("register event error %d\n", result);
889 goto fail_register_event;
Talel Atias8390f262012-11-15 16:33:18 +0200890 }
891 }
892
893 return 0;
894
895fail_register_event:
896 sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
897fail_sps_connect:
898 dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
899 ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
900 ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
901fail_mem_alloc:
902 sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
903fail_sps_api:
904 memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
905fail_bad_param:
906 return result;
907}
908EXPORT_SYMBOL(ipa_setup_sys_pipe);
909
910/**
911 * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
912 * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe
913 *
914 * Returns: 0 on success, negative on failure
915 */
916int ipa_teardown_sys_pipe(u32 clnt_hdl)
917{
918 if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
919 IPAERR("bad parm.\n");
920 return -EINVAL;
921 }
922
923 sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
924 dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
925 ipa_ctx->ep[clnt_hdl].connect.desc.base,
926 ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
927 sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
928 memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
929 return 0;
930}
931EXPORT_SYMBOL(ipa_teardown_sys_pipe);
932
933/**
Talel Atias6d842bf2012-12-06 16:58:35 +0200934 * ipa_tx_comp_usr_notify_release() - Callback function which will call the
935 * user supplied callback function to release the skb, or release it on
936 * its own if no callback function was supplied.
Talel Atias8390f262012-11-15 16:33:18 +0200937 * @user1
938 * @user2
Talel Atias6d842bf2012-12-06 16:58:35 +0200939 *
Talel Atias9bc53892013-02-10 15:10:07 +0200940 * This notified callback is for the destination client.
Talel Atias6d842bf2012-12-06 16:58:35 +0200941 * This function is supplied in ipa_connect.
Talel Atias8390f262012-11-15 16:33:18 +0200942 */
Talel Atias6d842bf2012-12-06 16:58:35 +0200943static void ipa_tx_comp_usr_notify_release(void *user1, void *user2)
Talel Atias8390f262012-11-15 16:33:18 +0200944{
945 struct sk_buff *skb = (struct sk_buff *)user1;
946 u32 ep_idx = (u32)user2;
947
948 IPADBG("skb=%p ep=%d\n", skb, ep_idx);
949
Talel Atias9bc53892013-02-10 15:10:07 +0200950 IPA_STATS_INC_TX_CNT(ep_idx, ipa_ctx->stats.tx_sw_pkts,
951 ipa_ctx->stats.tx_hw_pkts);
952
Talel Atias6d842bf2012-12-06 16:58:35 +0200953 if (ipa_ctx->ep[ep_idx].client_notify)
954 ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
Talel Atias8390f262012-11-15 16:33:18 +0200955 IPA_WRITE_DONE, (unsigned long)skb);
956 else
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -0800957 dev_kfree_skb(skb);
Talel Atias8390f262012-11-15 16:33:18 +0200958}
959
Talel Atias9bc53892013-02-10 15:10:07 +0200960static void ipa_tx_cmd_comp(void *user1, void *user2)
961{
962 IPA_STATS_INC_CNT(ipa_ctx->stats.imm_cmds[IPA_IP_PACKET_INIT]);
963 kfree(user1);
964}
965
Talel Atias8390f262012-11-15 16:33:18 +0200966/**
967 * ipa_tx_dp() - Data-path tx handler
968 * @dst: [in] which IPA destination to route tx packets to
969 * @skb: [in] the packet to send
970 * @metadata: [in] TX packet meta-data
971 *
972 * Data-path tx handler, this is used for both SW data-path which by-passes most
973 * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
974 * dst is a "valid" CONS type, then SW data-path is used. If dst is the
975 * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
976 * is an error. For errors, client needs to free the skb as needed. For success,
Talel Atias6d842bf2012-12-06 16:58:35 +0200977 * IPA driver will later invoke client callback if one was supplied. That
Talel Atias8390f262012-11-15 16:33:18 +0200978 * callback should free the skb. If no callback supplied, IPA driver will free
979 * the skb internally
980 *
Talel Atias6d842bf2012-12-06 16:58:35 +0200981 * The function will use two descriptors for this send command
982 * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
983 * the first descriptor will be used to inform the IPA hardware that
984 * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
985 * Once this send was done from SPS point-of-view the IPA driver will
986 * get notified by the supplied callback - ipa_sps_irq_tx_comp()
987 *
988 * ipa_sps_irq_tx_comp will call to the user supplied
Talel Atias9bc53892013-02-10 15:10:07 +0200989 * callback (from ipa_connect)
Talel Atias6d842bf2012-12-06 16:58:35 +0200990 *
Talel Atias8390f262012-11-15 16:33:18 +0200991 * Returns: 0 on success, negative on failure
992 */
993int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
994 struct ipa_tx_meta *meta)
995{
996 struct ipa_desc desc[2];
997 int ipa_ep_idx;
998 struct ipa_ip_packet_init *cmd;
999
1000 memset(&desc, 0, 2 * sizeof(struct ipa_desc));
1001
1002 ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
Talel Atias9bc53892013-02-10 15:10:07 +02001003 if (unlikely(ipa_ep_idx == -1)) {
Talel Atias8390f262012-11-15 16:33:18 +02001004 IPAERR("dest EP does not exist.\n");
1005 goto fail_gen;
1006 }
1007
Talel Atias9bc53892013-02-10 15:10:07 +02001008 if (unlikely(ipa_ctx->ep[ipa_ep_idx].valid == 0)) {
Talel Atias8390f262012-11-15 16:33:18 +02001009 IPAERR("dest EP not valid.\n");
1010 goto fail_gen;
1011 }
1012
1013 if (IPA_CLIENT_IS_CONS(dst)) {
Talel Atias9bc53892013-02-10 15:10:07 +02001014 cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
Talel Atias8390f262012-11-15 16:33:18 +02001015 if (!cmd) {
1016 IPAERR("failed to alloc immediate command object\n");
1017 goto fail_mem_alloc;
1018 }
1019
1020 cmd->destination_pipe_index = ipa_ep_idx;
1021 if (meta && meta->mbim_stream_id_valid)
1022 cmd->metadata = meta->mbim_stream_id;
1023 desc[0].opcode = IPA_IP_PACKET_INIT;
1024 desc[0].pyld = cmd;
1025 desc[0].len = sizeof(struct ipa_ip_packet_init);
1026 desc[0].type = IPA_IMM_CMD_DESC;
Talel Atias9bc53892013-02-10 15:10:07 +02001027 desc[0].callback = ipa_tx_cmd_comp;
1028 desc[0].user1 = cmd;
Talel Atias8390f262012-11-15 16:33:18 +02001029 desc[1].pyld = skb->data;
1030 desc[1].len = skb->len;
1031 desc[1].type = IPA_DATA_DESC_SKB;
Talel Atias6d842bf2012-12-06 16:58:35 +02001032 desc[1].callback = ipa_tx_comp_usr_notify_release;
Talel Atias8390f262012-11-15 16:33:18 +02001033 desc[1].user1 = skb;
1034 desc[1].user2 = (void *)ipa_ep_idx;
1035
Talel Atias9bc53892013-02-10 15:10:07 +02001036 if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc,
1037 true)) {
Talel Atias8390f262012-11-15 16:33:18 +02001038 IPAERR("fail to send immediate command\n");
1039 goto fail_send;
1040 }
1041 } else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
1042 desc[0].pyld = skb->data;
1043 desc[0].len = skb->len;
1044 desc[0].type = IPA_DATA_DESC_SKB;
Talel Atias6d842bf2012-12-06 16:58:35 +02001045 desc[0].callback = ipa_tx_comp_usr_notify_release;
Talel Atias8390f262012-11-15 16:33:18 +02001046 desc[0].user1 = skb;
1047 desc[0].user2 = (void *)ipa_ep_idx;
1048
1049 if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
Talel Atias9bc53892013-02-10 15:10:07 +02001050 &desc[0], true)) {
Talel Atias8390f262012-11-15 16:33:18 +02001051 IPAERR("fail to send skb\n");
1052 goto fail_gen;
1053 }
1054 } else {
1055 IPAERR("%d PROD is not supported.\n", dst);
1056 goto fail_gen;
1057 }
1058
1059 return 0;
1060
1061fail_send:
1062 kfree(cmd);
1063fail_mem_alloc:
1064fail_gen:
1065 return -EFAULT;
1066}
1067EXPORT_SYMBOL(ipa_tx_dp);
1068
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001069static void ipa_handle_rx(void)
1070{
1071 int inactive_cycles = 0;
1072 int cnt;
1073
1074 do {
1075 cnt = ipa_handle_rx_core(true, true);
1076 if (cnt == 0) {
1077 inactive_cycles++;
1078 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1079 } else {
1080 inactive_cycles = 0;
1081 }
1082 } while (inactive_cycles <= POLLING_INACTIVITY);
1083
1084 ipa_rx_switch_to_intr_mode();
1085}
1086
Talel Atias8390f262012-11-15 16:33:18 +02001087/**
1088 * ipa_handle_rx() - handle packet reception. This function is executed in the
1089 * context of a work queue.
1090 * @work: work struct needed by the work queue
1091 *
1092 * ipa_handle_rx_core() is run in polling mode. After all packets has been
1093 * received, the driver switches back to interrupt mode.
1094 */
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001095static void ipa_wq_handle_rx(struct work_struct *work)
Talel Atias8390f262012-11-15 16:33:18 +02001096{
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001097 ipa_handle_rx();
Talel Atias8390f262012-11-15 16:33:18 +02001098}
1099
1100/**
1101 * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
1102 *
1103 * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
1104 * are IPA_RX_POOL_CEIL buffers in the cache.
1105 * - Allocate a buffer in the cache
1106 * - Initialized the packets link
1107 * - Initialize the packets work struct
1108 * - Allocate the packets socket buffer (skb)
1109 * - Fill the packets skb with data
1110 * - Make the packet DMAable
1111 * - Add the packet to the system pipe linked list
1112 * - Initiate a SPS transfer so that SPS driver will use this packet later.
1113 */
1114void ipa_replenish_rx_cache(void)
1115{
1116 void *ptr;
1117 struct ipa_rx_pkt_wrapper *rx_pkt;
1118 int ret;
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001119 int rx_len_cached = 0;
Talel Atias8390f262012-11-15 16:33:18 +02001120 struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001121 gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
Talel Atias8390f262012-11-15 16:33:18 +02001122
Talel Atias8390f262012-11-15 16:33:18 +02001123 rx_len_cached = sys->len;
Talel Atias8390f262012-11-15 16:33:18 +02001124
Talel Atias9bc53892013-02-10 15:10:07 +02001125 while (rx_len_cached < IPA_RX_POOL_CEIL) {
Talel Atias8390f262012-11-15 16:33:18 +02001126 rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001127 flag);
Talel Atias8390f262012-11-15 16:33:18 +02001128 if (!rx_pkt) {
1129 IPAERR("failed to alloc rx wrapper\n");
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001130 goto fail_kmem_cache_alloc;
Talel Atias8390f262012-11-15 16:33:18 +02001131 }
1132
1133 INIT_LIST_HEAD(&rx_pkt->link);
Talel Atias8390f262012-11-15 16:33:18 +02001134
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001135 rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, flag);
Talel Atias8390f262012-11-15 16:33:18 +02001136 if (rx_pkt->skb == NULL) {
1137 IPAERR("failed to alloc skb\n");
1138 goto fail_skb_alloc;
1139 }
1140 ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
1141 rx_pkt->dma_address = dma_map_single(NULL, ptr,
1142 IPA_RX_SKB_SIZE,
1143 DMA_FROM_DEVICE);
1144 if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
1145 IPAERR("dma_map_single failure %p for %p\n",
1146 (void *)rx_pkt->dma_address, ptr);
1147 goto fail_dma_mapping;
1148 }
1149
Talel Atias8390f262012-11-15 16:33:18 +02001150 list_add_tail(&rx_pkt->link, &sys->head_desc_list);
1151 rx_len_cached = ++sys->len;
Talel Atias8390f262012-11-15 16:33:18 +02001152
1153 ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
1154 IPA_RX_SKB_SIZE, rx_pkt,
1155 SPS_IOVEC_FLAG_INT);
1156
1157 if (ret) {
1158 IPAERR("sps_transfer_one failed %d\n", ret);
1159 goto fail_sps_transfer;
1160 }
Talel Atias8390f262012-11-15 16:33:18 +02001161 }
1162
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001163 ipa_ctx->stats.rx_q_len = sys->len;
1164
Talel Atias8390f262012-11-15 16:33:18 +02001165 return;
1166
1167fail_sps_transfer:
Talel Atias8390f262012-11-15 16:33:18 +02001168 list_del(&rx_pkt->link);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001169 rx_len_cached = --sys->len;
Talel Atias8390f262012-11-15 16:33:18 +02001170 dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
1171 DMA_FROM_DEVICE);
1172fail_dma_mapping:
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001173 dev_kfree_skb(rx_pkt->skb);
Talel Atias8390f262012-11-15 16:33:18 +02001174fail_skb_alloc:
1175 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001176fail_kmem_cache_alloc:
1177 if (rx_len_cached == 0) {
1178 IPA_STATS_INC_CNT(ipa_ctx->stats.rx_repl_repost);
1179 schedule_delayed_work(&replenish_rx_work,
1180 msecs_to_jiffies(100));
1181 }
1182 ipa_ctx->stats.rx_q_len = sys->len;
Talel Atias8390f262012-11-15 16:33:18 +02001183 return;
1184}
1185
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001186static void replenish_rx_work_func(struct work_struct *work)
1187{
1188 ipa_replenish_rx_cache();
1189}
1190
1191static void switch_to_intr_work_func(struct work_struct *work)
1192{
1193 ipa_handle_rx();
1194}
1195
Talel Atias8390f262012-11-15 16:33:18 +02001196/**
1197 * ipa_cleanup_rx() - release RX queue resources
1198 *
1199 */
1200void ipa_cleanup_rx(void)
1201{
1202 struct ipa_rx_pkt_wrapper *rx_pkt;
1203 struct ipa_rx_pkt_wrapper *r;
Talel Atias8390f262012-11-15 16:33:18 +02001204 struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
1205
Talel Atias8390f262012-11-15 16:33:18 +02001206 list_for_each_entry_safe(rx_pkt, r,
1207 &sys->head_desc_list, link) {
1208 list_del(&rx_pkt->link);
1209 dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
1210 DMA_FROM_DEVICE);
Ravi Gummadidalaaf55bd92013-03-02 16:54:13 -08001211 dev_kfree_skb(rx_pkt->skb);
Talel Atias8390f262012-11-15 16:33:18 +02001212 kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
1213 }
Talel Atias8390f262012-11-15 16:33:18 +02001214}
1215