blob: a73560c939010c28da7605ca77d2b4b3924aae70 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070022#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070023#include <linux/netdevice.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070024#include <linux/vmalloc.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070025#include "liquidio_common.h"
26#include "octeon_droq.h"
27#include "octeon_iq.h"
28#include "response_manager.h"
29#include "octeon_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070030#include "octeon_main.h"
31#include "octeon_network.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070032#include "cn66xx_device.h"
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070033#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070034
35#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
36 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
37
38struct iq_post_status {
39 int status;
40 int index;
41};
42
43static void check_db_timeout(struct work_struct *work);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -070044static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070045
46static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
47
48static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
49{
50 struct octeon_instr_queue *iq =
51 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
52 return iq->iqcmd_64B;
53}
54
55#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
56
57/* Define this to return the request status comaptible to old code */
58/*#define OCTEON_USE_OLD_REQ_STATUS*/
59
60/* Return 0 on success, 1 on failure */
61int octeon_init_instr_queue(struct octeon_device *oct,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070062 union oct_txpciq txpciq,
63 u32 num_descs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070064{
65 struct octeon_instr_queue *iq;
66 struct octeon_iq_config *conf = NULL;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070067 u32 iq_no = (u32)txpciq.s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070068 u32 q_size;
69 struct cavium_wq *db_wq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070070 int orig_node = dev_to_node(&oct->pci_dev->dev);
71 int numa_node = cpu_to_node(iq_no % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070072
73 if (OCTEON_CN6XXX(oct))
74 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070075 else if (OCTEON_CN23XX_PF(oct))
76 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070077 if (!conf) {
78 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
79 oct->chip_id);
80 return 1;
81 }
82
83 if (num_descs & (num_descs - 1)) {
84 dev_err(&oct->pci_dev->dev,
85 "Number of descriptors for instr queue %d not in power of 2.\n",
86 iq_no);
87 return 1;
88 }
89
90 q_size = (u32)conf->instr_type * num_descs;
91
92 iq = oct->instr_queue[iq_no];
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070093
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -070094 iq->oct_dev = oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070095
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070096 set_dev_node(&oct->pci_dev->dev, numa_node);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070097 iq->base_addr = lio_dma_alloc(oct, q_size,
98 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070099 set_dev_node(&oct->pci_dev->dev, orig_node);
100 if (!iq->base_addr)
101 iq->base_addr = lio_dma_alloc(oct, q_size,
102 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700103 if (!iq->base_addr) {
104 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
105 iq_no);
106 return 1;
107 }
108
109 iq->max_count = num_descs;
110
111 /* Initialize a list to holds requests that have been posted to Octeon
112 * but has yet to be fetched by octeon
113 */
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700114 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
115 numa_node);
116 if (!iq->request_list)
117 iq->request_list = vmalloc(sizeof(*iq->request_list) *
118 num_descs);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700119 if (!iq->request_list) {
120 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
121 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
122 iq_no);
123 return 1;
124 }
125
126 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
127
128 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
129 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
130
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700131 iq->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700132 iq->fill_threshold = (u32)conf->db_min;
133 iq->fill_cnt = 0;
134 iq->host_write_index = 0;
135 iq->octeon_read_index = 0;
136 iq->flush_index = 0;
137 iq->last_db_time = 0;
138 iq->do_auto_flush = 1;
139 iq->db_timeout = (u32)conf->db_timeout;
140 atomic_set(&iq->instr_pending, 0);
141
142 /* Initialize the spinlock for this instruction queue */
143 spin_lock_init(&iq->lock);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700144 spin_lock_init(&iq->post_lock);
145
146 spin_lock_init(&iq->iq_flush_running_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700147
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700148 oct->io_qmask.iq |= (1ULL << iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700149
150 /* Set the 32B/64B mode for each input queue */
151 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
152 iq->iqcmd_64B = (conf->instr_type == 64);
153
154 oct->fn_list.setup_iq_regs(oct, iq_no);
155
Bhaktipriya Shridharaaa76722016-06-04 20:54:00 +0530156 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
157 WQ_MEM_RECLAIM,
158 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700159 if (!oct->check_db_wq[iq_no].wq) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800160 vfree(iq->request_list);
161 iq->request_list = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700162 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
163 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
164 iq_no);
165 return 1;
166 }
167
168 db_wq = &oct->check_db_wq[iq_no];
169
170 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
171 db_wq->wk.ctxptr = oct;
172 db_wq->wk.ctxul = iq_no;
173 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
174
175 return 0;
176}
177
178int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
179{
180 u64 desc_size = 0, q_size;
181 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
182
183 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700184 destroy_workqueue(oct->check_db_wq[iq_no].wq);
185
186 if (OCTEON_CN6XXX(oct))
187 desc_size =
188 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700189 else if (OCTEON_CN23XX_PF(oct))
190 desc_size =
191 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700192
Markus Elfring9686f312015-06-29 12:22:24 +0200193 vfree(iq->request_list);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700194
195 if (iq->base_addr) {
196 q_size = iq->max_count * desc_size;
197 lio_dma_free(oct, (u32)q_size, iq->base_addr,
198 iq->base_addr_dma);
199 return 0;
200 }
201 return 1;
202}
203
204/* Return 0 on success, 1 on failure */
205int octeon_setup_iq(struct octeon_device *oct,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700206 int ifidx,
207 int q_index,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700208 union oct_txpciq txpciq,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700209 u32 num_descs,
210 void *app_ctx)
211{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700212 u32 iq_no = (u32)txpciq.s.q_no;
213 int numa_node = cpu_to_node(iq_no % num_online_cpus());
214
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700215 if (oct->instr_queue[iq_no]) {
216 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
217 iq_no);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700218 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700219 oct->instr_queue[iq_no]->app_ctx = app_ctx;
220 return 0;
221 }
222 oct->instr_queue[iq_no] =
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700223 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
224 if (!oct->instr_queue[iq_no])
225 oct->instr_queue[iq_no] =
226 vmalloc(sizeof(struct octeon_instr_queue));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700227 if (!oct->instr_queue[iq_no])
228 return 1;
229
230 memset(oct->instr_queue[iq_no], 0,
231 sizeof(struct octeon_instr_queue));
232
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700233 oct->instr_queue[iq_no]->q_index = q_index;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700234 oct->instr_queue[iq_no]->app_ctx = app_ctx;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700235 oct->instr_queue[iq_no]->ifidx = ifidx;
236
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700237 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700238 vfree(oct->instr_queue[iq_no]);
239 oct->instr_queue[iq_no] = NULL;
240 return 1;
241 }
242
243 oct->num_iqs++;
244 oct->fn_list.enable_io_queues(oct);
245 return 0;
246}
247
248int lio_wait_for_instr_fetch(struct octeon_device *oct)
249{
250 int i, retry = 1000, pending, instr_cnt = 0;
251
252 do {
253 instr_cnt = 0;
254
255 /*for (i = 0; i < oct->num_iqs; i++) {*/
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700256 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
257 if (!(oct->io_qmask.iq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700258 continue;
259 pending =
260 atomic_read(&oct->
261 instr_queue[i]->instr_pending);
262 if (pending)
263 __check_db_timeout(oct, i);
264 instr_cnt += pending;
265 }
266
267 if (instr_cnt == 0)
268 break;
269
270 schedule_timeout_uninterruptible(1);
271
272 } while (retry-- && instr_cnt);
273
274 return instr_cnt;
275}
276
277static inline void
278ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
279{
280 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
281 writel(iq->fill_cnt, iq->doorbell_reg);
282 /* make sure doorbell write goes through */
283 mmiowb();
284 iq->fill_cnt = 0;
285 iq->last_db_time = jiffies;
286 return;
287 }
288}
289
290static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
291 u8 *cmd)
292{
293 u8 *iqptr, cmdsize;
294
295 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
296 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
297
298 memcpy(iqptr, cmd, cmdsize);
299}
300
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700301static inline struct iq_post_status
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700302__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700303{
304 struct iq_post_status st;
305
306 st.status = IQ_SEND_OK;
307
308 /* This ensures that the read index does not wrap around to the same
309 * position if queue gets full before Octeon could fetch any instr.
310 */
311 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
312 st.status = IQ_SEND_FAILED;
313 st.index = -1;
314 return st;
315 }
316
317 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
318 st.status = IQ_SEND_STOP;
319
320 __copy_cmd_into_iq(iq, cmd);
321
322 /* "index" is returned, host_write_index is modified. */
323 st.index = iq->host_write_index;
324 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
325 iq->fill_cnt++;
326
327 /* Flush the command into memory. We need to be sure the data is in
328 * memory before indicating that the instruction is pending.
329 */
330 wmb();
331
332 atomic_inc(&iq->instr_pending);
333
334 return st;
335}
336
337int
338octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
339 void (*fn)(void *))
340{
341 if (reqtype > REQTYPE_LAST) {
342 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
343 __func__, reqtype);
344 return -EINVAL;
345 }
346
347 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
348
349 return 0;
350}
351
352static inline void
353__add_to_request_list(struct octeon_instr_queue *iq,
354 int idx, void *buf, int reqtype)
355{
356 iq->request_list[idx].buf = buf;
357 iq->request_list[idx].reqtype = reqtype;
358}
359
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700360/* Can only run in process context */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700361int
362lio_process_iq_request_list(struct octeon_device *oct,
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700363 struct octeon_instr_queue *iq, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700364{
365 int reqtype;
366 void *buf;
367 u32 old = iq->flush_index;
368 u32 inst_count = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700369 unsigned int pkts_compl = 0, bytes_compl = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700370 struct octeon_soft_command *sc;
371 struct octeon_instr_irh *irh;
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700372 unsigned long flags;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700373
374 while (old != iq->octeon_read_index) {
375 reqtype = iq->request_list[old].reqtype;
376 buf = iq->request_list[old].buf;
377
378 if (reqtype == REQTYPE_NONE)
379 goto skip_this;
380
381 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
382 &bytes_compl);
383
384 switch (reqtype) {
385 case REQTYPE_NORESP_NET:
386 case REQTYPE_NORESP_NET_SG:
387 case REQTYPE_RESP_NET_SG:
388 reqtype_free_fn[oct->octeon_id][reqtype](buf);
389 break;
390 case REQTYPE_RESP_NET:
391 case REQTYPE_SOFT_COMMAND:
392 sc = buf;
393
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700394 if (OCTEON_CN23XX_PF(oct))
395 irh = (struct octeon_instr_irh *)
396 &sc->cmd.cmd3.irh;
397 else
398 irh = (struct octeon_instr_irh *)
399 &sc->cmd.cmd2.irh;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700400 if (irh->rflag) {
401 /* We're expecting a response from Octeon.
402 * It's up to lio_process_ordered_list() to
403 * process sc. Add sc to the ordered soft
404 * command response list because we expect
405 * a response from Octeon.
406 */
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700407 spin_lock_irqsave
408 (&oct->response_list
409 [OCTEON_ORDERED_SC_LIST].lock,
410 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700411 atomic_inc(&oct->response_list
412 [OCTEON_ORDERED_SC_LIST].
413 pending_req_count);
414 list_add_tail(&sc->node, &oct->response_list
415 [OCTEON_ORDERED_SC_LIST].head);
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700416 spin_unlock_irqrestore
417 (&oct->response_list
418 [OCTEON_ORDERED_SC_LIST].lock,
419 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700420 } else {
421 if (sc->callback) {
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700422 /* This callback must not sleep */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700423 sc->callback(oct, OCTEON_REQUEST_DONE,
424 sc->callback_arg);
425 }
426 }
427 break;
428 default:
429 dev_err(&oct->pci_dev->dev,
430 "%s Unknown reqtype: %d buf: %p at idx %d\n",
431 __func__, reqtype, buf, old);
432 }
433
434 iq->request_list[old].buf = NULL;
435 iq->request_list[old].reqtype = 0;
436
437 skip_this:
438 inst_count++;
439 INCR_INDEX_BY1(old, iq->max_count);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700440
441 if ((napi_budget) && (inst_count >= napi_budget))
442 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700443 }
444 if (bytes_compl)
445 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
446 bytes_compl);
447 iq->flush_index = old;
448
449 return inst_count;
450}
451
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700452/* Can only be called from process context */
453int
454octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
455 u32 pending_thresh, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700456{
457 u32 inst_processed = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700458 u32 tot_inst_processed = 0;
459 int tx_done = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700460
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700461 if (!spin_trylock(&iq->iq_flush_running_lock))
462 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700463
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700464 spin_lock_bh(&iq->lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700465
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700466 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700467
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700468 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700469 do {
470 /* Process any outstanding IQ packets. */
471 if (iq->flush_index == iq->octeon_read_index)
472 break;
473
474 if (napi_budget)
475 inst_processed = lio_process_iq_request_list
476 (oct, iq,
477 napi_budget - tot_inst_processed);
478 else
479 inst_processed =
480 lio_process_iq_request_list(oct, iq, 0);
481
482 if (inst_processed) {
483 atomic_sub(inst_processed, &iq->instr_pending);
484 iq->stats.instr_processed += inst_processed;
485 }
486
487 tot_inst_processed += inst_processed;
488 inst_processed = 0;
489
490 } while (tot_inst_processed < napi_budget);
491
492 if (napi_budget && (tot_inst_processed >= napi_budget))
493 tx_done = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700494 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700495
496 iq->last_db_time = jiffies;
497
498 spin_unlock_bh(&iq->lock);
499
500 spin_unlock(&iq->iq_flush_running_lock);
501
502 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700503}
504
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700505/* Process instruction queue after timeout.
506 * This routine gets called from a workqueue or when removing the module.
507 */
508static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700509{
510 struct octeon_instr_queue *iq;
511 u64 next_time;
512
513 if (!oct)
514 return;
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700515
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700516 iq = oct->instr_queue[iq_no];
517 if (!iq)
518 return;
519
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700520 /* return immediately, if no work pending */
521 if (!atomic_read(&iq->instr_pending))
522 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700523 /* If jiffies - last_db_time < db_timeout do nothing */
524 next_time = iq->last_db_time + iq->db_timeout;
525 if (!time_after(jiffies, (unsigned long)next_time))
526 return;
527 iq->last_db_time = jiffies;
528
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700529 /* Flush the instruction queue */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700530 octeon_flush_iq(oct, iq, 1, 0);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700531
532 lio_enable_irq(NULL, iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700533}
534
535/* Called by the Poll thread at regular intervals to check the instruction
536 * queue for commands to be posted and for commands that were fetched by Octeon.
537 */
538static void check_db_timeout(struct work_struct *work)
539{
540 struct cavium_wk *wk = (struct cavium_wk *)work;
541 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700542 u64 iq_no = wk->ctxul;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700543 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700544 u32 delay = 10;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700545
546 __check_db_timeout(oct, iq_no);
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700547 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700548}
549
550int
551octeon_send_command(struct octeon_device *oct, u32 iq_no,
552 u32 force_db, void *cmd, void *buf,
553 u32 datasize, u32 reqtype)
554{
555 struct iq_post_status st;
556 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
557
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700558 /* Get the lock and prevent other tasks and tx interrupt handler from
559 * running.
560 */
561 spin_lock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700562
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700563 st = __post_command2(iq, cmd);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700564
565 if (st.status != IQ_SEND_FAILED) {
566 octeon_report_sent_bytes_to_bql(buf, reqtype);
567 __add_to_request_list(iq, st.index, buf, reqtype);
568 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
569 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
570
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700571 if (force_db)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700572 ring_doorbell(oct, iq);
573 } else {
574 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
575 }
576
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700577 spin_unlock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700578
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700579 /* This is only done here to expedite packets being flushed
580 * for cases where there are no IQ completion interrupts.
581 */
582 /*if (iq->do_auto_flush)*/
583 /* octeon_flush_iq(oct, iq, 2, 0);*/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700584
585 return st.status;
586}
587
588void
589octeon_prepare_soft_command(struct octeon_device *oct,
590 struct octeon_soft_command *sc,
591 u8 opcode,
592 u8 subcode,
593 u32 irh_ossp,
594 u64 ossp0,
595 u64 ossp1)
596{
597 struct octeon_config *oct_cfg;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700598 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700599 struct octeon_instr_ih3 *ih3;
600 struct octeon_instr_pki_ih3 *pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700601 struct octeon_instr_irh *irh;
602 struct octeon_instr_rdp *rdp;
603
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700604 WARN_ON(opcode > 15);
605 WARN_ON(subcode > 127);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700606
607 oct_cfg = octeon_get_conf(oct);
608
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700609 if (OCTEON_CN23XX_PF(oct)) {
610 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700611
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700612 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700613
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700614 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700615
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700616 pki_ih3->w = 1;
617 pki_ih3->raw = 1;
618 pki_ih3->utag = 1;
619 pki_ih3->uqpg =
620 oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
621 pki_ih3->utt = 1;
622 pki_ih3->tag = LIO_CONTROL;
623 pki_ih3->tagtype = ATOMIC_TAG;
624 pki_ih3->qpg =
625 oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
626 pki_ih3->pm = 0x7;
627 pki_ih3->sl = 8;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700628
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700629 if (sc->datasize)
630 ih3->dlengsz = sc->datasize;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700631
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700632 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
633 irh->opcode = opcode;
634 irh->subcode = subcode;
635
636 /* opcode/subcode specific parameters (ossp) */
637 irh->ossp = irh_ossp;
638 sc->cmd.cmd3.ossp[0] = ossp0;
639 sc->cmd.cmd3.ossp[1] = ossp1;
640
641 if (sc->rdatasize) {
642 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
643 rdp->pcie_port = oct->pcie_port;
644 rdp->rlen = sc->rdatasize;
645
646 irh->rflag = 1;
647 /*PKI IH3*/
648 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
649 ih3->fsz = LIO_SOFTCMDRESP_IH3;
650 } else {
651 irh->rflag = 0;
652 /*PKI IH3*/
653 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
654 ih3->fsz = LIO_PCICMD_O3;
655 }
656
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700657 } else {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700658 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
659 ih2->tagtype = ATOMIC_TAG;
660 ih2->tag = LIO_CONTROL;
661 ih2->raw = 1;
662 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
663
664 if (sc->datasize) {
665 ih2->dlengsz = sc->datasize;
666 ih2->rs = 1;
667 }
668
669 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
670 irh->opcode = opcode;
671 irh->subcode = subcode;
672
673 /* opcode/subcode specific parameters (ossp) */
674 irh->ossp = irh_ossp;
675 sc->cmd.cmd2.ossp[0] = ossp0;
676 sc->cmd.cmd2.ossp[1] = ossp1;
677
678 if (sc->rdatasize) {
679 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
680 rdp->pcie_port = oct->pcie_port;
681 rdp->rlen = sc->rdatasize;
682
683 irh->rflag = 1;
684 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
685 ih2->fsz = LIO_SOFTCMDRESP_IH2;
686 } else {
687 irh->rflag = 0;
688 /* irh + ossp[0] + ossp[1] = 24 bytes */
689 ih2->fsz = LIO_PCICMD_O2;
690 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700691 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700692}
693
694int octeon_send_soft_command(struct octeon_device *oct,
695 struct octeon_soft_command *sc)
696{
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700697 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700698 struct octeon_instr_ih3 *ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700699 struct octeon_instr_irh *irh;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700700 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700701
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700702 if (OCTEON_CN23XX_PF(oct)) {
703 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
704 if (ih3->dlengsz) {
705 WARN_ON(!sc->dmadptr);
706 sc->cmd.cmd3.dptr = sc->dmadptr;
707 }
708 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
709 if (irh->rflag) {
710 WARN_ON(!sc->dmarptr);
711 WARN_ON(!sc->status_word);
712 *sc->status_word = COMPLETION_WORD_INIT;
713 sc->cmd.cmd3.rptr = sc->dmarptr;
714 }
715 len = (u32)ih3->dlengsz;
716 } else {
717 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
718 if (ih2->dlengsz) {
719 WARN_ON(!sc->dmadptr);
720 sc->cmd.cmd2.dptr = sc->dmadptr;
721 }
722 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
723 if (irh->rflag) {
724 WARN_ON(!sc->dmarptr);
725 WARN_ON(!sc->status_word);
726 *sc->status_word = COMPLETION_WORD_INIT;
727 sc->cmd.cmd2.rptr = sc->dmarptr;
728 }
729 len = (u32)ih2->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700730 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700731
732 if (sc->wait_time)
733 sc->timeout = jiffies + sc->wait_time;
734
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700735 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
736 len, REQTYPE_SOFT_COMMAND));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700737}
738
739int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
740{
741 int i;
742 u64 dma_addr;
743 struct octeon_soft_command *sc;
744
745 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
746 spin_lock_init(&oct->sc_buf_pool.lock);
747 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
748
749 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
750 sc = (struct octeon_soft_command *)
751 lio_dma_alloc(oct,
752 SOFT_COMMAND_BUFFER_SIZE,
753 (dma_addr_t *)&dma_addr);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800754 if (!sc) {
755 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700756 return 1;
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800757 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700758
759 sc->dma_addr = dma_addr;
760 sc->size = SOFT_COMMAND_BUFFER_SIZE;
761
762 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
763 }
764
765 return 0;
766}
767
768int octeon_free_sc_buffer_pool(struct octeon_device *oct)
769{
770 struct list_head *tmp, *tmp2;
771 struct octeon_soft_command *sc;
772
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700773 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700774
775 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
776 list_del(tmp);
777
778 sc = (struct octeon_soft_command *)tmp;
779
780 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
781 }
782
783 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
784
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700785 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700786
787 return 0;
788}
789
790struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
791 u32 datasize,
792 u32 rdatasize,
793 u32 ctxsize)
794{
795 u64 dma_addr;
796 u32 size;
797 u32 offset = sizeof(struct octeon_soft_command);
798 struct octeon_soft_command *sc = NULL;
799 struct list_head *tmp;
800
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700801 WARN_ON((offset + datasize + rdatasize + ctxsize) >
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700802 SOFT_COMMAND_BUFFER_SIZE);
803
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700804 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700805
806 if (list_empty(&oct->sc_buf_pool.head)) {
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700807 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700808 return NULL;
809 }
810
811 list_for_each(tmp, &oct->sc_buf_pool.head)
812 break;
813
814 list_del(tmp);
815
816 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
817
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700818 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700819
820 sc = (struct octeon_soft_command *)tmp;
821
822 dma_addr = sc->dma_addr;
823 size = sc->size;
824
825 memset(sc, 0, sc->size);
826
827 sc->dma_addr = dma_addr;
828 sc->size = size;
829
830 if (ctxsize) {
831 sc->ctxptr = (u8 *)sc + offset;
832 sc->ctxsize = ctxsize;
833 }
834
835 /* Start data at 128 byte boundary */
836 offset = (offset + ctxsize + 127) & 0xffffff80;
837
838 if (datasize) {
839 sc->virtdptr = (u8 *)sc + offset;
840 sc->dmadptr = dma_addr + offset;
841 sc->datasize = datasize;
842 }
843
844 /* Start rdata at 128 byte boundary */
845 offset = (offset + datasize + 127) & 0xffffff80;
846
847 if (rdatasize) {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700848 WARN_ON(rdatasize < 16);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700849 sc->virtrptr = (u8 *)sc + offset;
850 sc->dmarptr = dma_addr + offset;
851 sc->rdatasize = rdatasize;
852 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
853 }
854
855 return sc;
856}
857
858void octeon_free_soft_command(struct octeon_device *oct,
859 struct octeon_soft_command *sc)
860{
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700861 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700862
863 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
864
865 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
866
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700867 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700868}