blob: f6e16162f307d5a2ee85aa1d917387b15683d6c0 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070022#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070023#include <linux/netdevice.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070024#include <linux/vmalloc.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070025#include "liquidio_common.h"
26#include "octeon_droq.h"
27#include "octeon_iq.h"
28#include "response_manager.h"
29#include "octeon_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070030#include "octeon_main.h"
31#include "octeon_network.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070032#include "cn66xx_device.h"
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070033#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070034
35#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
36 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
37
38struct iq_post_status {
39 int status;
40 int index;
41};
42
43static void check_db_timeout(struct work_struct *work);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -070044static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070045
46static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
47
48static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
49{
50 struct octeon_instr_queue *iq =
51 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
52 return iq->iqcmd_64B;
53}
54
55#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
56
57/* Define this to return the request status comaptible to old code */
58/*#define OCTEON_USE_OLD_REQ_STATUS*/
59
60/* Return 0 on success, 1 on failure */
61int octeon_init_instr_queue(struct octeon_device *oct,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070062 union oct_txpciq txpciq,
63 u32 num_descs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070064{
65 struct octeon_instr_queue *iq;
66 struct octeon_iq_config *conf = NULL;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070067 u32 iq_no = (u32)txpciq.s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070068 u32 q_size;
69 struct cavium_wq *db_wq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070070 int orig_node = dev_to_node(&oct->pci_dev->dev);
71 int numa_node = cpu_to_node(iq_no % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070072
73 if (OCTEON_CN6XXX(oct))
74 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070075 else if (OCTEON_CN23XX_PF(oct))
76 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070077 if (!conf) {
78 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
79 oct->chip_id);
80 return 1;
81 }
82
83 if (num_descs & (num_descs - 1)) {
84 dev_err(&oct->pci_dev->dev,
85 "Number of descriptors for instr queue %d not in power of 2.\n",
86 iq_no);
87 return 1;
88 }
89
90 q_size = (u32)conf->instr_type * num_descs;
91
92 iq = oct->instr_queue[iq_no];
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070093
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -070094 iq->oct_dev = oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070095
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070096 set_dev_node(&oct->pci_dev->dev, numa_node);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070097 iq->base_addr = lio_dma_alloc(oct, q_size,
98 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070099 set_dev_node(&oct->pci_dev->dev, orig_node);
100 if (!iq->base_addr)
101 iq->base_addr = lio_dma_alloc(oct, q_size,
102 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700103 if (!iq->base_addr) {
104 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
105 iq_no);
106 return 1;
107 }
108
109 iq->max_count = num_descs;
110
111 /* Initialize a list to holds requests that have been posted to Octeon
112 * but has yet to be fetched by octeon
113 */
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700114 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
115 numa_node);
116 if (!iq->request_list)
117 iq->request_list = vmalloc(sizeof(*iq->request_list) *
118 num_descs);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700119 if (!iq->request_list) {
120 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
121 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
122 iq_no);
123 return 1;
124 }
125
126 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
127
128 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
129 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
130
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700131 iq->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700132 iq->fill_threshold = (u32)conf->db_min;
133 iq->fill_cnt = 0;
134 iq->host_write_index = 0;
135 iq->octeon_read_index = 0;
136 iq->flush_index = 0;
137 iq->last_db_time = 0;
138 iq->do_auto_flush = 1;
139 iq->db_timeout = (u32)conf->db_timeout;
140 atomic_set(&iq->instr_pending, 0);
141
142 /* Initialize the spinlock for this instruction queue */
143 spin_lock_init(&iq->lock);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700144 spin_lock_init(&iq->post_lock);
145
146 spin_lock_init(&iq->iq_flush_running_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700147
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800148 oct->io_qmask.iq |= BIT_ULL(iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700149
150 /* Set the 32B/64B mode for each input queue */
151 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
152 iq->iqcmd_64B = (conf->instr_type == 64);
153
154 oct->fn_list.setup_iq_regs(oct, iq_no);
155
Bhaktipriya Shridharaaa76722016-06-04 20:54:00 +0530156 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
157 WQ_MEM_RECLAIM,
158 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700159 if (!oct->check_db_wq[iq_no].wq) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800160 vfree(iq->request_list);
161 iq->request_list = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700162 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
163 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
164 iq_no);
165 return 1;
166 }
167
168 db_wq = &oct->check_db_wq[iq_no];
169
170 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
171 db_wq->wk.ctxptr = oct;
172 db_wq->wk.ctxul = iq_no;
173 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
174
175 return 0;
176}
177
178int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
179{
180 u64 desc_size = 0, q_size;
181 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
182
183 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700184 destroy_workqueue(oct->check_db_wq[iq_no].wq);
185
186 if (OCTEON_CN6XXX(oct))
187 desc_size =
188 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700189 else if (OCTEON_CN23XX_PF(oct))
190 desc_size =
191 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700192
Markus Elfring9686f312015-06-29 12:22:24 +0200193 vfree(iq->request_list);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700194
195 if (iq->base_addr) {
196 q_size = iq->max_count * desc_size;
197 lio_dma_free(oct, (u32)q_size, iq->base_addr,
198 iq->base_addr_dma);
199 return 0;
200 }
201 return 1;
202}
203
204/* Return 0 on success, 1 on failure */
205int octeon_setup_iq(struct octeon_device *oct,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700206 int ifidx,
207 int q_index,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700208 union oct_txpciq txpciq,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700209 u32 num_descs,
210 void *app_ctx)
211{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700212 u32 iq_no = (u32)txpciq.s.q_no;
213 int numa_node = cpu_to_node(iq_no % num_online_cpus());
214
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700215 if (oct->instr_queue[iq_no]) {
216 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
217 iq_no);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700218 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700219 oct->instr_queue[iq_no]->app_ctx = app_ctx;
220 return 0;
221 }
222 oct->instr_queue[iq_no] =
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700223 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
224 if (!oct->instr_queue[iq_no])
225 oct->instr_queue[iq_no] =
226 vmalloc(sizeof(struct octeon_instr_queue));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700227 if (!oct->instr_queue[iq_no])
228 return 1;
229
230 memset(oct->instr_queue[iq_no], 0,
231 sizeof(struct octeon_instr_queue));
232
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700233 oct->instr_queue[iq_no]->q_index = q_index;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700234 oct->instr_queue[iq_no]->app_ctx = app_ctx;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700235 oct->instr_queue[iq_no]->ifidx = ifidx;
236
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700237 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700238 vfree(oct->instr_queue[iq_no]);
239 oct->instr_queue[iq_no] = NULL;
240 return 1;
241 }
242
243 oct->num_iqs++;
244 oct->fn_list.enable_io_queues(oct);
245 return 0;
246}
247
248int lio_wait_for_instr_fetch(struct octeon_device *oct)
249{
250 int i, retry = 1000, pending, instr_cnt = 0;
251
252 do {
253 instr_cnt = 0;
254
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700255 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800256 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700257 continue;
258 pending =
259 atomic_read(&oct->
260 instr_queue[i]->instr_pending);
261 if (pending)
262 __check_db_timeout(oct, i);
263 instr_cnt += pending;
264 }
265
266 if (instr_cnt == 0)
267 break;
268
269 schedule_timeout_uninterruptible(1);
270
271 } while (retry-- && instr_cnt);
272
273 return instr_cnt;
274}
275
276static inline void
277ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
278{
279 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
280 writel(iq->fill_cnt, iq->doorbell_reg);
281 /* make sure doorbell write goes through */
282 mmiowb();
283 iq->fill_cnt = 0;
284 iq->last_db_time = jiffies;
285 return;
286 }
287}
288
289static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
290 u8 *cmd)
291{
292 u8 *iqptr, cmdsize;
293
294 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
295 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
296
297 memcpy(iqptr, cmd, cmdsize);
298}
299
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700300static inline struct iq_post_status
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700301__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700302{
303 struct iq_post_status st;
304
305 st.status = IQ_SEND_OK;
306
307 /* This ensures that the read index does not wrap around to the same
308 * position if queue gets full before Octeon could fetch any instr.
309 */
310 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
311 st.status = IQ_SEND_FAILED;
312 st.index = -1;
313 return st;
314 }
315
316 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
317 st.status = IQ_SEND_STOP;
318
319 __copy_cmd_into_iq(iq, cmd);
320
321 /* "index" is returned, host_write_index is modified. */
322 st.index = iq->host_write_index;
323 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
324 iq->fill_cnt++;
325
326 /* Flush the command into memory. We need to be sure the data is in
327 * memory before indicating that the instruction is pending.
328 */
329 wmb();
330
331 atomic_inc(&iq->instr_pending);
332
333 return st;
334}
335
336int
337octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
338 void (*fn)(void *))
339{
340 if (reqtype > REQTYPE_LAST) {
341 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
342 __func__, reqtype);
343 return -EINVAL;
344 }
345
346 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
347
348 return 0;
349}
350
351static inline void
352__add_to_request_list(struct octeon_instr_queue *iq,
353 int idx, void *buf, int reqtype)
354{
355 iq->request_list[idx].buf = buf;
356 iq->request_list[idx].reqtype = reqtype;
357}
358
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700359/* Can only run in process context */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700360int
361lio_process_iq_request_list(struct octeon_device *oct,
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700362 struct octeon_instr_queue *iq, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700363{
364 int reqtype;
365 void *buf;
366 u32 old = iq->flush_index;
367 u32 inst_count = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700368 unsigned int pkts_compl = 0, bytes_compl = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700369 struct octeon_soft_command *sc;
370 struct octeon_instr_irh *irh;
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700371 unsigned long flags;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700372
373 while (old != iq->octeon_read_index) {
374 reqtype = iq->request_list[old].reqtype;
375 buf = iq->request_list[old].buf;
376
377 if (reqtype == REQTYPE_NONE)
378 goto skip_this;
379
380 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
381 &bytes_compl);
382
383 switch (reqtype) {
384 case REQTYPE_NORESP_NET:
385 case REQTYPE_NORESP_NET_SG:
386 case REQTYPE_RESP_NET_SG:
387 reqtype_free_fn[oct->octeon_id][reqtype](buf);
388 break;
389 case REQTYPE_RESP_NET:
390 case REQTYPE_SOFT_COMMAND:
391 sc = buf;
392
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700393 if (OCTEON_CN23XX_PF(oct))
394 irh = (struct octeon_instr_irh *)
395 &sc->cmd.cmd3.irh;
396 else
397 irh = (struct octeon_instr_irh *)
398 &sc->cmd.cmd2.irh;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700399 if (irh->rflag) {
400 /* We're expecting a response from Octeon.
401 * It's up to lio_process_ordered_list() to
402 * process sc. Add sc to the ordered soft
403 * command response list because we expect
404 * a response from Octeon.
405 */
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700406 spin_lock_irqsave
407 (&oct->response_list
408 [OCTEON_ORDERED_SC_LIST].lock,
409 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700410 atomic_inc(&oct->response_list
411 [OCTEON_ORDERED_SC_LIST].
412 pending_req_count);
413 list_add_tail(&sc->node, &oct->response_list
414 [OCTEON_ORDERED_SC_LIST].head);
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700415 spin_unlock_irqrestore
416 (&oct->response_list
417 [OCTEON_ORDERED_SC_LIST].lock,
418 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700419 } else {
420 if (sc->callback) {
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700421 /* This callback must not sleep */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700422 sc->callback(oct, OCTEON_REQUEST_DONE,
423 sc->callback_arg);
424 }
425 }
426 break;
427 default:
428 dev_err(&oct->pci_dev->dev,
429 "%s Unknown reqtype: %d buf: %p at idx %d\n",
430 __func__, reqtype, buf, old);
431 }
432
433 iq->request_list[old].buf = NULL;
434 iq->request_list[old].reqtype = 0;
435
436 skip_this:
437 inst_count++;
438 INCR_INDEX_BY1(old, iq->max_count);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700439
440 if ((napi_budget) && (inst_count >= napi_budget))
441 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700442 }
443 if (bytes_compl)
444 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
445 bytes_compl);
446 iq->flush_index = old;
447
448 return inst_count;
449}
450
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700451/* Can only be called from process context */
452int
453octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
454 u32 pending_thresh, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700455{
456 u32 inst_processed = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700457 u32 tot_inst_processed = 0;
458 int tx_done = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700459
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700460 if (!spin_trylock(&iq->iq_flush_running_lock))
461 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700462
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700463 spin_lock_bh(&iq->lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700464
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700465 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700466
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700467 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700468 do {
469 /* Process any outstanding IQ packets. */
470 if (iq->flush_index == iq->octeon_read_index)
471 break;
472
473 if (napi_budget)
474 inst_processed = lio_process_iq_request_list
475 (oct, iq,
476 napi_budget - tot_inst_processed);
477 else
478 inst_processed =
479 lio_process_iq_request_list(oct, iq, 0);
480
481 if (inst_processed) {
482 atomic_sub(inst_processed, &iq->instr_pending);
483 iq->stats.instr_processed += inst_processed;
484 }
485
486 tot_inst_processed += inst_processed;
487 inst_processed = 0;
488
489 } while (tot_inst_processed < napi_budget);
490
491 if (napi_budget && (tot_inst_processed >= napi_budget))
492 tx_done = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700493 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700494
495 iq->last_db_time = jiffies;
496
497 spin_unlock_bh(&iq->lock);
498
499 spin_unlock(&iq->iq_flush_running_lock);
500
501 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700502}
503
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700504/* Process instruction queue after timeout.
505 * This routine gets called from a workqueue or when removing the module.
506 */
507static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700508{
509 struct octeon_instr_queue *iq;
510 u64 next_time;
511
512 if (!oct)
513 return;
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700514
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700515 iq = oct->instr_queue[iq_no];
516 if (!iq)
517 return;
518
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700519 /* return immediately, if no work pending */
520 if (!atomic_read(&iq->instr_pending))
521 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700522 /* If jiffies - last_db_time < db_timeout do nothing */
523 next_time = iq->last_db_time + iq->db_timeout;
524 if (!time_after(jiffies, (unsigned long)next_time))
525 return;
526 iq->last_db_time = jiffies;
527
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700528 /* Flush the instruction queue */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700529 octeon_flush_iq(oct, iq, 1, 0);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700530
531 lio_enable_irq(NULL, iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700532}
533
534/* Called by the Poll thread at regular intervals to check the instruction
535 * queue for commands to be posted and for commands that were fetched by Octeon.
536 */
537static void check_db_timeout(struct work_struct *work)
538{
539 struct cavium_wk *wk = (struct cavium_wk *)work;
540 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700541 u64 iq_no = wk->ctxul;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700542 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700543 u32 delay = 10;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700544
545 __check_db_timeout(oct, iq_no);
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700546 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700547}
548
549int
550octeon_send_command(struct octeon_device *oct, u32 iq_no,
551 u32 force_db, void *cmd, void *buf,
552 u32 datasize, u32 reqtype)
553{
554 struct iq_post_status st;
555 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
556
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700557 /* Get the lock and prevent other tasks and tx interrupt handler from
558 * running.
559 */
560 spin_lock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700561
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700562 st = __post_command2(iq, cmd);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700563
564 if (st.status != IQ_SEND_FAILED) {
565 octeon_report_sent_bytes_to_bql(buf, reqtype);
566 __add_to_request_list(iq, st.index, buf, reqtype);
567 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
568 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
569
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700570 if (force_db)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700571 ring_doorbell(oct, iq);
572 } else {
573 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
574 }
575
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700576 spin_unlock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700577
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700578 /* This is only done here to expedite packets being flushed
579 * for cases where there are no IQ completion interrupts.
580 */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700581
582 return st.status;
583}
584
585void
586octeon_prepare_soft_command(struct octeon_device *oct,
587 struct octeon_soft_command *sc,
588 u8 opcode,
589 u8 subcode,
590 u32 irh_ossp,
591 u64 ossp0,
592 u64 ossp1)
593{
594 struct octeon_config *oct_cfg;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700595 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700596 struct octeon_instr_ih3 *ih3;
597 struct octeon_instr_pki_ih3 *pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700598 struct octeon_instr_irh *irh;
599 struct octeon_instr_rdp *rdp;
600
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700601 WARN_ON(opcode > 15);
602 WARN_ON(subcode > 127);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700603
604 oct_cfg = octeon_get_conf(oct);
605
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700606 if (OCTEON_CN23XX_PF(oct)) {
607 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700608
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700609 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700610
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700611 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700612
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700613 pki_ih3->w = 1;
614 pki_ih3->raw = 1;
615 pki_ih3->utag = 1;
616 pki_ih3->uqpg =
617 oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
618 pki_ih3->utt = 1;
619 pki_ih3->tag = LIO_CONTROL;
620 pki_ih3->tagtype = ATOMIC_TAG;
621 pki_ih3->qpg =
622 oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
623 pki_ih3->pm = 0x7;
624 pki_ih3->sl = 8;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700625
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700626 if (sc->datasize)
627 ih3->dlengsz = sc->datasize;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700628
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700629 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
630 irh->opcode = opcode;
631 irh->subcode = subcode;
632
633 /* opcode/subcode specific parameters (ossp) */
634 irh->ossp = irh_ossp;
635 sc->cmd.cmd3.ossp[0] = ossp0;
636 sc->cmd.cmd3.ossp[1] = ossp1;
637
638 if (sc->rdatasize) {
639 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
640 rdp->pcie_port = oct->pcie_port;
641 rdp->rlen = sc->rdatasize;
642
643 irh->rflag = 1;
644 /*PKI IH3*/
645 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
646 ih3->fsz = LIO_SOFTCMDRESP_IH3;
647 } else {
648 irh->rflag = 0;
649 /*PKI IH3*/
650 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
651 ih3->fsz = LIO_PCICMD_O3;
652 }
653
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700654 } else {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700655 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
656 ih2->tagtype = ATOMIC_TAG;
657 ih2->tag = LIO_CONTROL;
658 ih2->raw = 1;
659 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
660
661 if (sc->datasize) {
662 ih2->dlengsz = sc->datasize;
663 ih2->rs = 1;
664 }
665
666 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
667 irh->opcode = opcode;
668 irh->subcode = subcode;
669
670 /* opcode/subcode specific parameters (ossp) */
671 irh->ossp = irh_ossp;
672 sc->cmd.cmd2.ossp[0] = ossp0;
673 sc->cmd.cmd2.ossp[1] = ossp1;
674
675 if (sc->rdatasize) {
676 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
677 rdp->pcie_port = oct->pcie_port;
678 rdp->rlen = sc->rdatasize;
679
680 irh->rflag = 1;
681 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
682 ih2->fsz = LIO_SOFTCMDRESP_IH2;
683 } else {
684 irh->rflag = 0;
685 /* irh + ossp[0] + ossp[1] = 24 bytes */
686 ih2->fsz = LIO_PCICMD_O2;
687 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700688 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700689}
690
691int octeon_send_soft_command(struct octeon_device *oct,
692 struct octeon_soft_command *sc)
693{
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700694 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700695 struct octeon_instr_ih3 *ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700696 struct octeon_instr_irh *irh;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700697 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700698
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700699 if (OCTEON_CN23XX_PF(oct)) {
700 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
701 if (ih3->dlengsz) {
702 WARN_ON(!sc->dmadptr);
703 sc->cmd.cmd3.dptr = sc->dmadptr;
704 }
705 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
706 if (irh->rflag) {
707 WARN_ON(!sc->dmarptr);
708 WARN_ON(!sc->status_word);
709 *sc->status_word = COMPLETION_WORD_INIT;
710 sc->cmd.cmd3.rptr = sc->dmarptr;
711 }
712 len = (u32)ih3->dlengsz;
713 } else {
714 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
715 if (ih2->dlengsz) {
716 WARN_ON(!sc->dmadptr);
717 sc->cmd.cmd2.dptr = sc->dmadptr;
718 }
719 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
720 if (irh->rflag) {
721 WARN_ON(!sc->dmarptr);
722 WARN_ON(!sc->status_word);
723 *sc->status_word = COMPLETION_WORD_INIT;
724 sc->cmd.cmd2.rptr = sc->dmarptr;
725 }
726 len = (u32)ih2->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700727 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700728
729 if (sc->wait_time)
730 sc->timeout = jiffies + sc->wait_time;
731
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700732 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
733 len, REQTYPE_SOFT_COMMAND));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700734}
735
736int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
737{
738 int i;
739 u64 dma_addr;
740 struct octeon_soft_command *sc;
741
742 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
743 spin_lock_init(&oct->sc_buf_pool.lock);
744 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
745
746 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
747 sc = (struct octeon_soft_command *)
748 lio_dma_alloc(oct,
749 SOFT_COMMAND_BUFFER_SIZE,
750 (dma_addr_t *)&dma_addr);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800751 if (!sc) {
752 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700753 return 1;
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800754 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700755
756 sc->dma_addr = dma_addr;
757 sc->size = SOFT_COMMAND_BUFFER_SIZE;
758
759 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
760 }
761
762 return 0;
763}
764
765int octeon_free_sc_buffer_pool(struct octeon_device *oct)
766{
767 struct list_head *tmp, *tmp2;
768 struct octeon_soft_command *sc;
769
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700770 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700771
772 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
773 list_del(tmp);
774
775 sc = (struct octeon_soft_command *)tmp;
776
777 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
778 }
779
780 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
781
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700782 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700783
784 return 0;
785}
786
787struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
788 u32 datasize,
789 u32 rdatasize,
790 u32 ctxsize)
791{
792 u64 dma_addr;
793 u32 size;
794 u32 offset = sizeof(struct octeon_soft_command);
795 struct octeon_soft_command *sc = NULL;
796 struct list_head *tmp;
797
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700798 WARN_ON((offset + datasize + rdatasize + ctxsize) >
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700799 SOFT_COMMAND_BUFFER_SIZE);
800
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700801 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700802
803 if (list_empty(&oct->sc_buf_pool.head)) {
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700804 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700805 return NULL;
806 }
807
808 list_for_each(tmp, &oct->sc_buf_pool.head)
809 break;
810
811 list_del(tmp);
812
813 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
814
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700815 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700816
817 sc = (struct octeon_soft_command *)tmp;
818
819 dma_addr = sc->dma_addr;
820 size = sc->size;
821
822 memset(sc, 0, sc->size);
823
824 sc->dma_addr = dma_addr;
825 sc->size = size;
826
827 if (ctxsize) {
828 sc->ctxptr = (u8 *)sc + offset;
829 sc->ctxsize = ctxsize;
830 }
831
832 /* Start data at 128 byte boundary */
833 offset = (offset + ctxsize + 127) & 0xffffff80;
834
835 if (datasize) {
836 sc->virtdptr = (u8 *)sc + offset;
837 sc->dmadptr = dma_addr + offset;
838 sc->datasize = datasize;
839 }
840
841 /* Start rdata at 128 byte boundary */
842 offset = (offset + datasize + 127) & 0xffffff80;
843
844 if (rdatasize) {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700845 WARN_ON(rdatasize < 16);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700846 sc->virtrptr = (u8 *)sc + offset;
847 sc->dmarptr = dma_addr + offset;
848 sc->rdatasize = rdatasize;
849 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
850 }
851
852 return sc;
853}
854
855void octeon_free_soft_command(struct octeon_device *oct,
856 struct octeon_soft_command *sc)
857{
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700858 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700859
860 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
861
862 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
863
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700864 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700865}