blob: 7b297f1f6dbe9b7443fdbb76f68e59ffb92934f8 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
Raghu Vatsavayi50579d32016-11-14 15:54:46 -08007 * Copyright (c) 2003-2016 Cavium, Inc.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07008 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070018 **********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070019#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070020#include <linux/netdevice.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070021#include <linux/vmalloc.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070022#include "liquidio_common.h"
23#include "octeon_droq.h"
24#include "octeon_iq.h"
25#include "response_manager.h"
26#include "octeon_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070027#include "octeon_main.h"
28#include "octeon_network.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070029#include "cn66xx_device.h"
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070030#include "cn23xx_pf_device.h"
Raghu Vatsavayi9003baf2016-11-28 16:54:38 -080031#include "cn23xx_vf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070032
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070033struct iq_post_status {
34 int status;
35 int index;
36};
37
38static void check_db_timeout(struct work_struct *work);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -070039static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070040
41static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
42
43static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
44{
45 struct octeon_instr_queue *iq =
46 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
47 return iq->iqcmd_64B;
48}
49
50#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
51
52/* Define this to return the request status comaptible to old code */
53/*#define OCTEON_USE_OLD_REQ_STATUS*/
54
55/* Return 0 on success, 1 on failure */
56int octeon_init_instr_queue(struct octeon_device *oct,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070057 union oct_txpciq txpciq,
58 u32 num_descs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070059{
60 struct octeon_instr_queue *iq;
61 struct octeon_iq_config *conf = NULL;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070062 u32 iq_no = (u32)txpciq.s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070063 u32 q_size;
64 struct cavium_wq *db_wq;
VSR Burrub3ca9af2017-03-09 17:03:24 -080065 int numa_node = dev_to_node(&oct->pci_dev->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070066
67 if (OCTEON_CN6XXX(oct))
Raghu Vatsavayi97a25322016-11-14 15:54:47 -080068 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070069 else if (OCTEON_CN23XX_PF(oct))
Raghu Vatsavayi97a25322016-11-14 15:54:47 -080070 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
Raghu Vatsavayi9003baf2016-11-28 16:54:38 -080071 else if (OCTEON_CN23XX_VF(oct))
72 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
73
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070074 if (!conf) {
75 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
76 oct->chip_id);
77 return 1;
78 }
79
80 if (num_descs & (num_descs - 1)) {
81 dev_err(&oct->pci_dev->dev,
82 "Number of descriptors for instr queue %d not in power of 2.\n",
83 iq_no);
84 return 1;
85 }
86
87 q_size = (u32)conf->instr_type * num_descs;
88
89 iq = oct->instr_queue[iq_no];
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070090
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -070091 iq->oct_dev = oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070092
VSR Burrub3ca9af2017-03-09 17:03:24 -080093 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070094 if (!iq->base_addr) {
95 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
96 iq_no);
97 return 1;
98 }
99
100 iq->max_count = num_descs;
101
102 /* Initialize a list to holds requests that have been posted to Octeon
103 * but has yet to be fetched by octeon
104 */
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700105 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
106 numa_node);
107 if (!iq->request_list)
108 iq->request_list = vmalloc(sizeof(*iq->request_list) *
109 num_descs);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700110 if (!iq->request_list) {
111 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
112 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
113 iq_no);
114 return 1;
115 }
116
117 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
118
119 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
120 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
121
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700122 iq->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700123 iq->fill_threshold = (u32)conf->db_min;
124 iq->fill_cnt = 0;
125 iq->host_write_index = 0;
126 iq->octeon_read_index = 0;
127 iq->flush_index = 0;
128 iq->last_db_time = 0;
129 iq->do_auto_flush = 1;
130 iq->db_timeout = (u32)conf->db_timeout;
131 atomic_set(&iq->instr_pending, 0);
132
133 /* Initialize the spinlock for this instruction queue */
134 spin_lock_init(&iq->lock);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700135 spin_lock_init(&iq->post_lock);
136
137 spin_lock_init(&iq->iq_flush_running_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700138
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800139 oct->io_qmask.iq |= BIT_ULL(iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700140
141 /* Set the 32B/64B mode for each input queue */
142 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
143 iq->iqcmd_64B = (conf->instr_type == 64);
144
145 oct->fn_list.setup_iq_regs(oct, iq_no);
146
Bhaktipriya Shridharaaa76722016-06-04 20:54:00 +0530147 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
148 WQ_MEM_RECLAIM,
149 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700150 if (!oct->check_db_wq[iq_no].wq) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800151 vfree(iq->request_list);
152 iq->request_list = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700153 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
154 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
155 iq_no);
156 return 1;
157 }
158
159 db_wq = &oct->check_db_wq[iq_no];
160
161 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
162 db_wq->wk.ctxptr = oct;
163 db_wq->wk.ctxul = iq_no;
164 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
165
166 return 0;
167}
168
169int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
170{
171 u64 desc_size = 0, q_size;
172 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
173
174 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700175 destroy_workqueue(oct->check_db_wq[iq_no].wq);
176
177 if (OCTEON_CN6XXX(oct))
178 desc_size =
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800179 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700180 else if (OCTEON_CN23XX_PF(oct))
181 desc_size =
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800182 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
Raghu Vatsavayi9003baf2016-11-28 16:54:38 -0800183 else if (OCTEON_CN23XX_VF(oct))
184 desc_size =
185 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700186
Markus Elfring9686f312015-06-29 12:22:24 +0200187 vfree(iq->request_list);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700188
189 if (iq->base_addr) {
190 q_size = iq->max_count * desc_size;
191 lio_dma_free(oct, (u32)q_size, iq->base_addr,
192 iq->base_addr_dma);
193 return 0;
194 }
195 return 1;
196}
197
198/* Return 0 on success, 1 on failure */
199int octeon_setup_iq(struct octeon_device *oct,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700200 int ifidx,
201 int q_index,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700202 union oct_txpciq txpciq,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700203 u32 num_descs,
204 void *app_ctx)
205{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700206 u32 iq_no = (u32)txpciq.s.q_no;
VSR Burrub3ca9af2017-03-09 17:03:24 -0800207 int numa_node = dev_to_node(&oct->pci_dev->dev);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700208
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700209 if (oct->instr_queue[iq_no]) {
210 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
211 iq_no);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700212 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700213 oct->instr_queue[iq_no]->app_ctx = app_ctx;
214 return 0;
215 }
216 oct->instr_queue[iq_no] =
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700217 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
218 if (!oct->instr_queue[iq_no])
219 oct->instr_queue[iq_no] =
220 vmalloc(sizeof(struct octeon_instr_queue));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700221 if (!oct->instr_queue[iq_no])
222 return 1;
223
224 memset(oct->instr_queue[iq_no], 0,
225 sizeof(struct octeon_instr_queue));
226
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700227 oct->instr_queue[iq_no]->q_index = q_index;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700228 oct->instr_queue[iq_no]->app_ctx = app_ctx;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700229 oct->instr_queue[iq_no]->ifidx = ifidx;
230
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700231 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700232 vfree(oct->instr_queue[iq_no]);
233 oct->instr_queue[iq_no] = NULL;
234 return 1;
235 }
236
237 oct->num_iqs++;
Raghu Vatsavayic865cdf2016-11-28 16:54:36 -0800238 if (oct->fn_list.enable_io_queues(oct))
239 return 1;
240
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700241 return 0;
242}
243
244int lio_wait_for_instr_fetch(struct octeon_device *oct)
245{
246 int i, retry = 1000, pending, instr_cnt = 0;
247
248 do {
249 instr_cnt = 0;
250
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700251 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800252 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700253 continue;
254 pending =
Satanand Burla9ae122c2017-05-31 10:45:15 -0700255 atomic_read(&oct->instr_queue[i]->instr_pending);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700256 if (pending)
257 __check_db_timeout(oct, i);
258 instr_cnt += pending;
259 }
260
261 if (instr_cnt == 0)
262 break;
263
264 schedule_timeout_uninterruptible(1);
265
266 } while (retry-- && instr_cnt);
267
268 return instr_cnt;
269}
270
271static inline void
272ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
273{
274 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
275 writel(iq->fill_cnt, iq->doorbell_reg);
276 /* make sure doorbell write goes through */
277 mmiowb();
278 iq->fill_cnt = 0;
279 iq->last_db_time = jiffies;
280 return;
281 }
282}
283
284static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
285 u8 *cmd)
286{
287 u8 *iqptr, cmdsize;
288
289 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
290 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
291
292 memcpy(iqptr, cmd, cmdsize);
293}
294
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700295static inline struct iq_post_status
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700296__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700297{
298 struct iq_post_status st;
299
300 st.status = IQ_SEND_OK;
301
302 /* This ensures that the read index does not wrap around to the same
303 * position if queue gets full before Octeon could fetch any instr.
304 */
305 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
306 st.status = IQ_SEND_FAILED;
307 st.index = -1;
308 return st;
309 }
310
311 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
312 st.status = IQ_SEND_STOP;
313
314 __copy_cmd_into_iq(iq, cmd);
315
316 /* "index" is returned, host_write_index is modified. */
317 st.index = iq->host_write_index;
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800318 iq->host_write_index = incr_index(iq->host_write_index, 1,
319 iq->max_count);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700320 iq->fill_cnt++;
321
322 /* Flush the command into memory. We need to be sure the data is in
323 * memory before indicating that the instruction is pending.
324 */
325 wmb();
326
327 atomic_inc(&iq->instr_pending);
328
329 return st;
330}
331
332int
333octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
334 void (*fn)(void *))
335{
336 if (reqtype > REQTYPE_LAST) {
337 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
338 __func__, reqtype);
339 return -EINVAL;
340 }
341
342 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
343
344 return 0;
345}
346
347static inline void
348__add_to_request_list(struct octeon_instr_queue *iq,
349 int idx, void *buf, int reqtype)
350{
351 iq->request_list[idx].buf = buf;
352 iq->request_list[idx].reqtype = reqtype;
353}
354
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700355/* Can only run in process context */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700356int
357lio_process_iq_request_list(struct octeon_device *oct,
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700358 struct octeon_instr_queue *iq, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700359{
360 int reqtype;
361 void *buf;
362 u32 old = iq->flush_index;
363 u32 inst_count = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700364 unsigned int pkts_compl = 0, bytes_compl = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700365 struct octeon_soft_command *sc;
366 struct octeon_instr_irh *irh;
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700367 unsigned long flags;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700368
369 while (old != iq->octeon_read_index) {
370 reqtype = iq->request_list[old].reqtype;
371 buf = iq->request_list[old].buf;
372
373 if (reqtype == REQTYPE_NONE)
374 goto skip_this;
375
376 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
377 &bytes_compl);
378
379 switch (reqtype) {
380 case REQTYPE_NORESP_NET:
381 case REQTYPE_NORESP_NET_SG:
382 case REQTYPE_RESP_NET_SG:
383 reqtype_free_fn[oct->octeon_id][reqtype](buf);
384 break;
385 case REQTYPE_RESP_NET:
386 case REQTYPE_SOFT_COMMAND:
387 sc = buf;
388
Raghu Vatsavayi99813282016-12-07 08:54:35 -0800389 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700390 irh = (struct octeon_instr_irh *)
391 &sc->cmd.cmd3.irh;
392 else
393 irh = (struct octeon_instr_irh *)
394 &sc->cmd.cmd2.irh;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700395 if (irh->rflag) {
396 /* We're expecting a response from Octeon.
397 * It's up to lio_process_ordered_list() to
398 * process sc. Add sc to the ordered soft
399 * command response list because we expect
400 * a response from Octeon.
401 */
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700402 spin_lock_irqsave
403 (&oct->response_list
404 [OCTEON_ORDERED_SC_LIST].lock,
405 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700406 atomic_inc(&oct->response_list
407 [OCTEON_ORDERED_SC_LIST].
408 pending_req_count);
409 list_add_tail(&sc->node, &oct->response_list
410 [OCTEON_ORDERED_SC_LIST].head);
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700411 spin_unlock_irqrestore
412 (&oct->response_list
413 [OCTEON_ORDERED_SC_LIST].lock,
414 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700415 } else {
416 if (sc->callback) {
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700417 /* This callback must not sleep */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700418 sc->callback(oct, OCTEON_REQUEST_DONE,
419 sc->callback_arg);
420 }
421 }
422 break;
423 default:
424 dev_err(&oct->pci_dev->dev,
425 "%s Unknown reqtype: %d buf: %p at idx %d\n",
426 __func__, reqtype, buf, old);
427 }
428
429 iq->request_list[old].buf = NULL;
430 iq->request_list[old].reqtype = 0;
431
432 skip_this:
433 inst_count++;
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800434 old = incr_index(old, 1, iq->max_count);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700435
436 if ((napi_budget) && (inst_count >= napi_budget))
437 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700438 }
439 if (bytes_compl)
440 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
441 bytes_compl);
442 iq->flush_index = old;
443
444 return inst_count;
445}
446
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700447/* Can only be called from process context */
448int
449octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
Derek Chickles60889862017-01-06 17:16:12 -0800450 u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700451{
452 u32 inst_processed = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700453 u32 tot_inst_processed = 0;
454 int tx_done = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700455
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700456 if (!spin_trylock(&iq->iq_flush_running_lock))
457 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700458
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700459 spin_lock_bh(&iq->lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700460
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700461 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700462
Derek Chickles60889862017-01-06 17:16:12 -0800463 do {
464 /* Process any outstanding IQ packets. */
465 if (iq->flush_index == iq->octeon_read_index)
466 break;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700467
Derek Chickles60889862017-01-06 17:16:12 -0800468 if (napi_budget)
469 inst_processed =
470 lio_process_iq_request_list(oct, iq,
471 napi_budget -
472 tot_inst_processed);
473 else
474 inst_processed =
475 lio_process_iq_request_list(oct, iq, 0);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700476
Derek Chickles60889862017-01-06 17:16:12 -0800477 if (inst_processed) {
478 atomic_sub(inst_processed, &iq->instr_pending);
479 iq->stats.instr_processed += inst_processed;
480 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700481
Derek Chickles60889862017-01-06 17:16:12 -0800482 tot_inst_processed += inst_processed;
483 inst_processed = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700484
Derek Chickles60889862017-01-06 17:16:12 -0800485 } while (tot_inst_processed < napi_budget);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700486
Derek Chickles60889862017-01-06 17:16:12 -0800487 if (napi_budget && (tot_inst_processed >= napi_budget))
488 tx_done = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700489
490 iq->last_db_time = jiffies;
491
492 spin_unlock_bh(&iq->lock);
493
494 spin_unlock(&iq->iq_flush_running_lock);
495
496 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700497}
498
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700499/* Process instruction queue after timeout.
500 * This routine gets called from a workqueue or when removing the module.
501 */
502static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700503{
504 struct octeon_instr_queue *iq;
505 u64 next_time;
506
507 if (!oct)
508 return;
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700509
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700510 iq = oct->instr_queue[iq_no];
511 if (!iq)
512 return;
513
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700514 /* return immediately, if no work pending */
515 if (!atomic_read(&iq->instr_pending))
516 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700517 /* If jiffies - last_db_time < db_timeout do nothing */
518 next_time = iq->last_db_time + iq->db_timeout;
519 if (!time_after(jiffies, (unsigned long)next_time))
520 return;
521 iq->last_db_time = jiffies;
522
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700523 /* Flush the instruction queue */
Derek Chickles60889862017-01-06 17:16:12 -0800524 octeon_flush_iq(oct, iq, 0);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700525
526 lio_enable_irq(NULL, iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700527}
528
529/* Called by the Poll thread at regular intervals to check the instruction
530 * queue for commands to be posted and for commands that were fetched by Octeon.
531 */
532static void check_db_timeout(struct work_struct *work)
533{
534 struct cavium_wk *wk = (struct cavium_wk *)work;
535 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700536 u64 iq_no = wk->ctxul;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700537 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700538 u32 delay = 10;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700539
540 __check_db_timeout(oct, iq_no);
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700541 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700542}
543
544int
545octeon_send_command(struct octeon_device *oct, u32 iq_no,
546 u32 force_db, void *cmd, void *buf,
547 u32 datasize, u32 reqtype)
548{
549 struct iq_post_status st;
550 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
551
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700552 /* Get the lock and prevent other tasks and tx interrupt handler from
553 * running.
554 */
555 spin_lock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700556
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700557 st = __post_command2(iq, cmd);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700558
559 if (st.status != IQ_SEND_FAILED) {
560 octeon_report_sent_bytes_to_bql(buf, reqtype);
561 __add_to_request_list(iq, st.index, buf, reqtype);
562 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
563 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
564
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700565 if (force_db)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700566 ring_doorbell(oct, iq);
567 } else {
568 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
569 }
570
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700571 spin_unlock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700572
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700573 /* This is only done here to expedite packets being flushed
574 * for cases where there are no IQ completion interrupts.
575 */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700576
577 return st.status;
578}
579
580void
581octeon_prepare_soft_command(struct octeon_device *oct,
582 struct octeon_soft_command *sc,
583 u8 opcode,
584 u8 subcode,
585 u32 irh_ossp,
586 u64 ossp0,
587 u64 ossp1)
588{
589 struct octeon_config *oct_cfg;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700590 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700591 struct octeon_instr_ih3 *ih3;
592 struct octeon_instr_pki_ih3 *pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700593 struct octeon_instr_irh *irh;
594 struct octeon_instr_rdp *rdp;
595
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700596 WARN_ON(opcode > 15);
597 WARN_ON(subcode > 127);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700598
599 oct_cfg = octeon_get_conf(oct);
600
Raghu Vatsavayi99813282016-12-07 08:54:35 -0800601 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700602 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700603
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700604 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700605
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700606 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700607
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700608 pki_ih3->w = 1;
609 pki_ih3->raw = 1;
610 pki_ih3->utag = 1;
611 pki_ih3->uqpg =
612 oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
613 pki_ih3->utt = 1;
614 pki_ih3->tag = LIO_CONTROL;
615 pki_ih3->tagtype = ATOMIC_TAG;
616 pki_ih3->qpg =
617 oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
618 pki_ih3->pm = 0x7;
619 pki_ih3->sl = 8;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700620
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700621 if (sc->datasize)
622 ih3->dlengsz = sc->datasize;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700623
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700624 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
625 irh->opcode = opcode;
626 irh->subcode = subcode;
627
628 /* opcode/subcode specific parameters (ossp) */
629 irh->ossp = irh_ossp;
630 sc->cmd.cmd3.ossp[0] = ossp0;
631 sc->cmd.cmd3.ossp[1] = ossp1;
632
633 if (sc->rdatasize) {
634 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
635 rdp->pcie_port = oct->pcie_port;
636 rdp->rlen = sc->rdatasize;
637
638 irh->rflag = 1;
639 /*PKI IH3*/
640 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
641 ih3->fsz = LIO_SOFTCMDRESP_IH3;
642 } else {
643 irh->rflag = 0;
644 /*PKI IH3*/
645 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
646 ih3->fsz = LIO_PCICMD_O3;
647 }
648
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700649 } else {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700650 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
651 ih2->tagtype = ATOMIC_TAG;
652 ih2->tag = LIO_CONTROL;
653 ih2->raw = 1;
654 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
655
656 if (sc->datasize) {
657 ih2->dlengsz = sc->datasize;
658 ih2->rs = 1;
659 }
660
661 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
662 irh->opcode = opcode;
663 irh->subcode = subcode;
664
665 /* opcode/subcode specific parameters (ossp) */
666 irh->ossp = irh_ossp;
667 sc->cmd.cmd2.ossp[0] = ossp0;
668 sc->cmd.cmd2.ossp[1] = ossp1;
669
670 if (sc->rdatasize) {
671 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
672 rdp->pcie_port = oct->pcie_port;
673 rdp->rlen = sc->rdatasize;
674
675 irh->rflag = 1;
676 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
677 ih2->fsz = LIO_SOFTCMDRESP_IH2;
678 } else {
679 irh->rflag = 0;
680 /* irh + ossp[0] + ossp[1] = 24 bytes */
681 ih2->fsz = LIO_PCICMD_O2;
682 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700683 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700684}
685
686int octeon_send_soft_command(struct octeon_device *oct,
687 struct octeon_soft_command *sc)
688{
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700689 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700690 struct octeon_instr_ih3 *ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700691 struct octeon_instr_irh *irh;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700692 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700693
Raghu Vatsavayi99813282016-12-07 08:54:35 -0800694 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700695 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
696 if (ih3->dlengsz) {
697 WARN_ON(!sc->dmadptr);
698 sc->cmd.cmd3.dptr = sc->dmadptr;
699 }
700 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
701 if (irh->rflag) {
702 WARN_ON(!sc->dmarptr);
703 WARN_ON(!sc->status_word);
704 *sc->status_word = COMPLETION_WORD_INIT;
705 sc->cmd.cmd3.rptr = sc->dmarptr;
706 }
707 len = (u32)ih3->dlengsz;
708 } else {
709 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
710 if (ih2->dlengsz) {
711 WARN_ON(!sc->dmadptr);
712 sc->cmd.cmd2.dptr = sc->dmadptr;
713 }
714 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
715 if (irh->rflag) {
716 WARN_ON(!sc->dmarptr);
717 WARN_ON(!sc->status_word);
718 *sc->status_word = COMPLETION_WORD_INIT;
719 sc->cmd.cmd2.rptr = sc->dmarptr;
720 }
721 len = (u32)ih2->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700722 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700723
724 if (sc->wait_time)
725 sc->timeout = jiffies + sc->wait_time;
726
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700727 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
728 len, REQTYPE_SOFT_COMMAND));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700729}
730
731int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
732{
733 int i;
734 u64 dma_addr;
735 struct octeon_soft_command *sc;
736
737 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
738 spin_lock_init(&oct->sc_buf_pool.lock);
739 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
740
741 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
742 sc = (struct octeon_soft_command *)
743 lio_dma_alloc(oct,
744 SOFT_COMMAND_BUFFER_SIZE,
745 (dma_addr_t *)&dma_addr);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800746 if (!sc) {
747 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700748 return 1;
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800749 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700750
751 sc->dma_addr = dma_addr;
752 sc->size = SOFT_COMMAND_BUFFER_SIZE;
753
754 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
755 }
756
757 return 0;
758}
759
760int octeon_free_sc_buffer_pool(struct octeon_device *oct)
761{
762 struct list_head *tmp, *tmp2;
763 struct octeon_soft_command *sc;
764
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700765 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700766
767 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
768 list_del(tmp);
769
770 sc = (struct octeon_soft_command *)tmp;
771
772 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
773 }
774
775 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
776
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700777 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700778
779 return 0;
780}
781
782struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
783 u32 datasize,
784 u32 rdatasize,
785 u32 ctxsize)
786{
787 u64 dma_addr;
788 u32 size;
789 u32 offset = sizeof(struct octeon_soft_command);
790 struct octeon_soft_command *sc = NULL;
791 struct list_head *tmp;
792
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700793 WARN_ON((offset + datasize + rdatasize + ctxsize) >
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700794 SOFT_COMMAND_BUFFER_SIZE);
795
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700796 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700797
798 if (list_empty(&oct->sc_buf_pool.head)) {
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700799 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700800 return NULL;
801 }
802
803 list_for_each(tmp, &oct->sc_buf_pool.head)
804 break;
805
806 list_del(tmp);
807
808 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
809
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700810 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700811
812 sc = (struct octeon_soft_command *)tmp;
813
814 dma_addr = sc->dma_addr;
815 size = sc->size;
816
817 memset(sc, 0, sc->size);
818
819 sc->dma_addr = dma_addr;
820 sc->size = size;
821
822 if (ctxsize) {
823 sc->ctxptr = (u8 *)sc + offset;
824 sc->ctxsize = ctxsize;
825 }
826
827 /* Start data at 128 byte boundary */
828 offset = (offset + ctxsize + 127) & 0xffffff80;
829
830 if (datasize) {
831 sc->virtdptr = (u8 *)sc + offset;
832 sc->dmadptr = dma_addr + offset;
833 sc->datasize = datasize;
834 }
835
836 /* Start rdata at 128 byte boundary */
837 offset = (offset + datasize + 127) & 0xffffff80;
838
839 if (rdatasize) {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700840 WARN_ON(rdatasize < 16);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700841 sc->virtrptr = (u8 *)sc + offset;
842 sc->dmarptr = dma_addr + offset;
843 sc->rdatasize = rdatasize;
844 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
845 }
846
847 return sc;
848}
849
850void octeon_free_soft_command(struct octeon_device *oct,
851 struct octeon_soft_command *sc)
852{
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700853 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700854
855 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
856
857 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
858
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700859 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700860}