blob: 8531a004ad3e0d8f122d570502216b99840775c8 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
Raghu Vatsavayi50579d32016-11-14 15:54:46 -08007 * Copyright (c) 2003-2016 Cavium, Inc.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07008 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070018 **********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070019#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070020#include <linux/netdevice.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070021#include <linux/vmalloc.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070022#include "liquidio_common.h"
23#include "octeon_droq.h"
24#include "octeon_iq.h"
25#include "response_manager.h"
26#include "octeon_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070027#include "octeon_main.h"
28#include "octeon_network.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070029#include "cn66xx_device.h"
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070030#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070031
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070032struct iq_post_status {
33 int status;
34 int index;
35};
36
37static void check_db_timeout(struct work_struct *work);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -070038static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070039
40static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
41
42static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
43{
44 struct octeon_instr_queue *iq =
45 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
46 return iq->iqcmd_64B;
47}
48
49#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
50
51/* Define this to return the request status comaptible to old code */
52/*#define OCTEON_USE_OLD_REQ_STATUS*/
53
54/* Return 0 on success, 1 on failure */
55int octeon_init_instr_queue(struct octeon_device *oct,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070056 union oct_txpciq txpciq,
57 u32 num_descs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070058{
59 struct octeon_instr_queue *iq;
60 struct octeon_iq_config *conf = NULL;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070061 u32 iq_no = (u32)txpciq.s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070062 u32 q_size;
63 struct cavium_wq *db_wq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070064 int orig_node = dev_to_node(&oct->pci_dev->dev);
65 int numa_node = cpu_to_node(iq_no % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070066
67 if (OCTEON_CN6XXX(oct))
Raghu Vatsavayi97a25322016-11-14 15:54:47 -080068 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070069 else if (OCTEON_CN23XX_PF(oct))
Raghu Vatsavayi97a25322016-11-14 15:54:47 -080070 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070071 if (!conf) {
72 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
73 oct->chip_id);
74 return 1;
75 }
76
77 if (num_descs & (num_descs - 1)) {
78 dev_err(&oct->pci_dev->dev,
79 "Number of descriptors for instr queue %d not in power of 2.\n",
80 iq_no);
81 return 1;
82 }
83
84 q_size = (u32)conf->instr_type * num_descs;
85
86 iq = oct->instr_queue[iq_no];
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070087
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -070088 iq->oct_dev = oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070089
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070090 set_dev_node(&oct->pci_dev->dev, numa_node);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070091 iq->base_addr = lio_dma_alloc(oct, q_size,
92 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070093 set_dev_node(&oct->pci_dev->dev, orig_node);
94 if (!iq->base_addr)
95 iq->base_addr = lio_dma_alloc(oct, q_size,
96 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070097 if (!iq->base_addr) {
98 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
99 iq_no);
100 return 1;
101 }
102
103 iq->max_count = num_descs;
104
105 /* Initialize a list to holds requests that have been posted to Octeon
106 * but has yet to be fetched by octeon
107 */
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700108 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
109 numa_node);
110 if (!iq->request_list)
111 iq->request_list = vmalloc(sizeof(*iq->request_list) *
112 num_descs);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700113 if (!iq->request_list) {
114 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
115 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
116 iq_no);
117 return 1;
118 }
119
120 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
121
122 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
123 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
124
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700125 iq->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700126 iq->fill_threshold = (u32)conf->db_min;
127 iq->fill_cnt = 0;
128 iq->host_write_index = 0;
129 iq->octeon_read_index = 0;
130 iq->flush_index = 0;
131 iq->last_db_time = 0;
132 iq->do_auto_flush = 1;
133 iq->db_timeout = (u32)conf->db_timeout;
134 atomic_set(&iq->instr_pending, 0);
135
136 /* Initialize the spinlock for this instruction queue */
137 spin_lock_init(&iq->lock);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700138 spin_lock_init(&iq->post_lock);
139
140 spin_lock_init(&iq->iq_flush_running_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700141
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800142 oct->io_qmask.iq |= BIT_ULL(iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700143
144 /* Set the 32B/64B mode for each input queue */
145 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
146 iq->iqcmd_64B = (conf->instr_type == 64);
147
148 oct->fn_list.setup_iq_regs(oct, iq_no);
149
Bhaktipriya Shridharaaa76722016-06-04 20:54:00 +0530150 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
151 WQ_MEM_RECLAIM,
152 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700153 if (!oct->check_db_wq[iq_no].wq) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800154 vfree(iq->request_list);
155 iq->request_list = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700156 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
157 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
158 iq_no);
159 return 1;
160 }
161
162 db_wq = &oct->check_db_wq[iq_no];
163
164 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
165 db_wq->wk.ctxptr = oct;
166 db_wq->wk.ctxul = iq_no;
167 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
168
169 return 0;
170}
171
172int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
173{
174 u64 desc_size = 0, q_size;
175 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
176
177 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700178 destroy_workqueue(oct->check_db_wq[iq_no].wq);
179
180 if (OCTEON_CN6XXX(oct))
181 desc_size =
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800182 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700183 else if (OCTEON_CN23XX_PF(oct))
184 desc_size =
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800185 CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700186
Markus Elfring9686f312015-06-29 12:22:24 +0200187 vfree(iq->request_list);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700188
189 if (iq->base_addr) {
190 q_size = iq->max_count * desc_size;
191 lio_dma_free(oct, (u32)q_size, iq->base_addr,
192 iq->base_addr_dma);
193 return 0;
194 }
195 return 1;
196}
197
198/* Return 0 on success, 1 on failure */
199int octeon_setup_iq(struct octeon_device *oct,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700200 int ifidx,
201 int q_index,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700202 union oct_txpciq txpciq,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700203 u32 num_descs,
204 void *app_ctx)
205{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700206 u32 iq_no = (u32)txpciq.s.q_no;
207 int numa_node = cpu_to_node(iq_no % num_online_cpus());
208
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700209 if (oct->instr_queue[iq_no]) {
210 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
211 iq_no);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700212 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700213 oct->instr_queue[iq_no]->app_ctx = app_ctx;
214 return 0;
215 }
216 oct->instr_queue[iq_no] =
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700217 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
218 if (!oct->instr_queue[iq_no])
219 oct->instr_queue[iq_no] =
220 vmalloc(sizeof(struct octeon_instr_queue));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700221 if (!oct->instr_queue[iq_no])
222 return 1;
223
224 memset(oct->instr_queue[iq_no], 0,
225 sizeof(struct octeon_instr_queue));
226
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700227 oct->instr_queue[iq_no]->q_index = q_index;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700228 oct->instr_queue[iq_no]->app_ctx = app_ctx;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700229 oct->instr_queue[iq_no]->ifidx = ifidx;
230
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700231 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700232 vfree(oct->instr_queue[iq_no]);
233 oct->instr_queue[iq_no] = NULL;
234 return 1;
235 }
236
237 oct->num_iqs++;
238 oct->fn_list.enable_io_queues(oct);
239 return 0;
240}
241
242int lio_wait_for_instr_fetch(struct octeon_device *oct)
243{
244 int i, retry = 1000, pending, instr_cnt = 0;
245
246 do {
247 instr_cnt = 0;
248
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700249 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800250 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700251 continue;
252 pending =
253 atomic_read(&oct->
254 instr_queue[i]->instr_pending);
255 if (pending)
256 __check_db_timeout(oct, i);
257 instr_cnt += pending;
258 }
259
260 if (instr_cnt == 0)
261 break;
262
263 schedule_timeout_uninterruptible(1);
264
265 } while (retry-- && instr_cnt);
266
267 return instr_cnt;
268}
269
270static inline void
271ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
272{
273 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
274 writel(iq->fill_cnt, iq->doorbell_reg);
275 /* make sure doorbell write goes through */
276 mmiowb();
277 iq->fill_cnt = 0;
278 iq->last_db_time = jiffies;
279 return;
280 }
281}
282
283static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
284 u8 *cmd)
285{
286 u8 *iqptr, cmdsize;
287
288 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
289 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
290
291 memcpy(iqptr, cmd, cmdsize);
292}
293
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700294static inline struct iq_post_status
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700295__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700296{
297 struct iq_post_status st;
298
299 st.status = IQ_SEND_OK;
300
301 /* This ensures that the read index does not wrap around to the same
302 * position if queue gets full before Octeon could fetch any instr.
303 */
304 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
305 st.status = IQ_SEND_FAILED;
306 st.index = -1;
307 return st;
308 }
309
310 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
311 st.status = IQ_SEND_STOP;
312
313 __copy_cmd_into_iq(iq, cmd);
314
315 /* "index" is returned, host_write_index is modified. */
316 st.index = iq->host_write_index;
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800317 iq->host_write_index = incr_index(iq->host_write_index, 1,
318 iq->max_count);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700319 iq->fill_cnt++;
320
321 /* Flush the command into memory. We need to be sure the data is in
322 * memory before indicating that the instruction is pending.
323 */
324 wmb();
325
326 atomic_inc(&iq->instr_pending);
327
328 return st;
329}
330
331int
332octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
333 void (*fn)(void *))
334{
335 if (reqtype > REQTYPE_LAST) {
336 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
337 __func__, reqtype);
338 return -EINVAL;
339 }
340
341 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
342
343 return 0;
344}
345
346static inline void
347__add_to_request_list(struct octeon_instr_queue *iq,
348 int idx, void *buf, int reqtype)
349{
350 iq->request_list[idx].buf = buf;
351 iq->request_list[idx].reqtype = reqtype;
352}
353
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700354/* Can only run in process context */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700355int
356lio_process_iq_request_list(struct octeon_device *oct,
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700357 struct octeon_instr_queue *iq, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700358{
359 int reqtype;
360 void *buf;
361 u32 old = iq->flush_index;
362 u32 inst_count = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700363 unsigned int pkts_compl = 0, bytes_compl = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700364 struct octeon_soft_command *sc;
365 struct octeon_instr_irh *irh;
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700366 unsigned long flags;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700367
368 while (old != iq->octeon_read_index) {
369 reqtype = iq->request_list[old].reqtype;
370 buf = iq->request_list[old].buf;
371
372 if (reqtype == REQTYPE_NONE)
373 goto skip_this;
374
375 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
376 &bytes_compl);
377
378 switch (reqtype) {
379 case REQTYPE_NORESP_NET:
380 case REQTYPE_NORESP_NET_SG:
381 case REQTYPE_RESP_NET_SG:
382 reqtype_free_fn[oct->octeon_id][reqtype](buf);
383 break;
384 case REQTYPE_RESP_NET:
385 case REQTYPE_SOFT_COMMAND:
386 sc = buf;
387
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700388 if (OCTEON_CN23XX_PF(oct))
389 irh = (struct octeon_instr_irh *)
390 &sc->cmd.cmd3.irh;
391 else
392 irh = (struct octeon_instr_irh *)
393 &sc->cmd.cmd2.irh;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700394 if (irh->rflag) {
395 /* We're expecting a response from Octeon.
396 * It's up to lio_process_ordered_list() to
397 * process sc. Add sc to the ordered soft
398 * command response list because we expect
399 * a response from Octeon.
400 */
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700401 spin_lock_irqsave
402 (&oct->response_list
403 [OCTEON_ORDERED_SC_LIST].lock,
404 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700405 atomic_inc(&oct->response_list
406 [OCTEON_ORDERED_SC_LIST].
407 pending_req_count);
408 list_add_tail(&sc->node, &oct->response_list
409 [OCTEON_ORDERED_SC_LIST].head);
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700410 spin_unlock_irqrestore
411 (&oct->response_list
412 [OCTEON_ORDERED_SC_LIST].lock,
413 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700414 } else {
415 if (sc->callback) {
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700416 /* This callback must not sleep */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700417 sc->callback(oct, OCTEON_REQUEST_DONE,
418 sc->callback_arg);
419 }
420 }
421 break;
422 default:
423 dev_err(&oct->pci_dev->dev,
424 "%s Unknown reqtype: %d buf: %p at idx %d\n",
425 __func__, reqtype, buf, old);
426 }
427
428 iq->request_list[old].buf = NULL;
429 iq->request_list[old].reqtype = 0;
430
431 skip_this:
432 inst_count++;
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800433 old = incr_index(old, 1, iq->max_count);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700434
435 if ((napi_budget) && (inst_count >= napi_budget))
436 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700437 }
438 if (bytes_compl)
439 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
440 bytes_compl);
441 iq->flush_index = old;
442
443 return inst_count;
444}
445
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700446/* Can only be called from process context */
447int
448octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
449 u32 pending_thresh, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700450{
451 u32 inst_processed = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700452 u32 tot_inst_processed = 0;
453 int tx_done = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700454
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700455 if (!spin_trylock(&iq->iq_flush_running_lock))
456 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700457
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700458 spin_lock_bh(&iq->lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700459
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700460 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700461
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700462 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700463 do {
464 /* Process any outstanding IQ packets. */
465 if (iq->flush_index == iq->octeon_read_index)
466 break;
467
468 if (napi_budget)
469 inst_processed = lio_process_iq_request_list
470 (oct, iq,
471 napi_budget - tot_inst_processed);
472 else
473 inst_processed =
474 lio_process_iq_request_list(oct, iq, 0);
475
476 if (inst_processed) {
477 atomic_sub(inst_processed, &iq->instr_pending);
478 iq->stats.instr_processed += inst_processed;
479 }
480
481 tot_inst_processed += inst_processed;
482 inst_processed = 0;
483
484 } while (tot_inst_processed < napi_budget);
485
486 if (napi_budget && (tot_inst_processed >= napi_budget))
487 tx_done = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700488 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700489
490 iq->last_db_time = jiffies;
491
492 spin_unlock_bh(&iq->lock);
493
494 spin_unlock(&iq->iq_flush_running_lock);
495
496 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700497}
498
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700499/* Process instruction queue after timeout.
500 * This routine gets called from a workqueue or when removing the module.
501 */
502static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700503{
504 struct octeon_instr_queue *iq;
505 u64 next_time;
506
507 if (!oct)
508 return;
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700509
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700510 iq = oct->instr_queue[iq_no];
511 if (!iq)
512 return;
513
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700514 /* return immediately, if no work pending */
515 if (!atomic_read(&iq->instr_pending))
516 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700517 /* If jiffies - last_db_time < db_timeout do nothing */
518 next_time = iq->last_db_time + iq->db_timeout;
519 if (!time_after(jiffies, (unsigned long)next_time))
520 return;
521 iq->last_db_time = jiffies;
522
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700523 /* Flush the instruction queue */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700524 octeon_flush_iq(oct, iq, 1, 0);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700525
526 lio_enable_irq(NULL, iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700527}
528
529/* Called by the Poll thread at regular intervals to check the instruction
530 * queue for commands to be posted and for commands that were fetched by Octeon.
531 */
532static void check_db_timeout(struct work_struct *work)
533{
534 struct cavium_wk *wk = (struct cavium_wk *)work;
535 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700536 u64 iq_no = wk->ctxul;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700537 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700538 u32 delay = 10;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700539
540 __check_db_timeout(oct, iq_no);
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700541 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700542}
543
544int
545octeon_send_command(struct octeon_device *oct, u32 iq_no,
546 u32 force_db, void *cmd, void *buf,
547 u32 datasize, u32 reqtype)
548{
549 struct iq_post_status st;
550 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
551
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700552 /* Get the lock and prevent other tasks and tx interrupt handler from
553 * running.
554 */
555 spin_lock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700556
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700557 st = __post_command2(iq, cmd);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700558
559 if (st.status != IQ_SEND_FAILED) {
560 octeon_report_sent_bytes_to_bql(buf, reqtype);
561 __add_to_request_list(iq, st.index, buf, reqtype);
562 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
563 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
564
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700565 if (force_db)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700566 ring_doorbell(oct, iq);
567 } else {
568 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
569 }
570
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700571 spin_unlock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700572
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700573 /* This is only done here to expedite packets being flushed
574 * for cases where there are no IQ completion interrupts.
575 */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700576
577 return st.status;
578}
579
580void
581octeon_prepare_soft_command(struct octeon_device *oct,
582 struct octeon_soft_command *sc,
583 u8 opcode,
584 u8 subcode,
585 u32 irh_ossp,
586 u64 ossp0,
587 u64 ossp1)
588{
589 struct octeon_config *oct_cfg;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700590 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700591 struct octeon_instr_ih3 *ih3;
592 struct octeon_instr_pki_ih3 *pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700593 struct octeon_instr_irh *irh;
594 struct octeon_instr_rdp *rdp;
595
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700596 WARN_ON(opcode > 15);
597 WARN_ON(subcode > 127);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700598
599 oct_cfg = octeon_get_conf(oct);
600
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700601 if (OCTEON_CN23XX_PF(oct)) {
602 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700603
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700604 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700605
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700606 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700607
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700608 pki_ih3->w = 1;
609 pki_ih3->raw = 1;
610 pki_ih3->utag = 1;
611 pki_ih3->uqpg =
612 oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
613 pki_ih3->utt = 1;
614 pki_ih3->tag = LIO_CONTROL;
615 pki_ih3->tagtype = ATOMIC_TAG;
616 pki_ih3->qpg =
617 oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
618 pki_ih3->pm = 0x7;
619 pki_ih3->sl = 8;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700620
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700621 if (sc->datasize)
622 ih3->dlengsz = sc->datasize;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700623
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700624 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
625 irh->opcode = opcode;
626 irh->subcode = subcode;
627
628 /* opcode/subcode specific parameters (ossp) */
629 irh->ossp = irh_ossp;
630 sc->cmd.cmd3.ossp[0] = ossp0;
631 sc->cmd.cmd3.ossp[1] = ossp1;
632
633 if (sc->rdatasize) {
634 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
635 rdp->pcie_port = oct->pcie_port;
636 rdp->rlen = sc->rdatasize;
637
638 irh->rflag = 1;
639 /*PKI IH3*/
640 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
641 ih3->fsz = LIO_SOFTCMDRESP_IH3;
642 } else {
643 irh->rflag = 0;
644 /*PKI IH3*/
645 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
646 ih3->fsz = LIO_PCICMD_O3;
647 }
648
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700649 } else {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700650 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
651 ih2->tagtype = ATOMIC_TAG;
652 ih2->tag = LIO_CONTROL;
653 ih2->raw = 1;
654 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
655
656 if (sc->datasize) {
657 ih2->dlengsz = sc->datasize;
658 ih2->rs = 1;
659 }
660
661 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
662 irh->opcode = opcode;
663 irh->subcode = subcode;
664
665 /* opcode/subcode specific parameters (ossp) */
666 irh->ossp = irh_ossp;
667 sc->cmd.cmd2.ossp[0] = ossp0;
668 sc->cmd.cmd2.ossp[1] = ossp1;
669
670 if (sc->rdatasize) {
671 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
672 rdp->pcie_port = oct->pcie_port;
673 rdp->rlen = sc->rdatasize;
674
675 irh->rflag = 1;
676 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
677 ih2->fsz = LIO_SOFTCMDRESP_IH2;
678 } else {
679 irh->rflag = 0;
680 /* irh + ossp[0] + ossp[1] = 24 bytes */
681 ih2->fsz = LIO_PCICMD_O2;
682 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700683 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700684}
685
686int octeon_send_soft_command(struct octeon_device *oct,
687 struct octeon_soft_command *sc)
688{
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700689 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700690 struct octeon_instr_ih3 *ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700691 struct octeon_instr_irh *irh;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700692 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700693
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700694 if (OCTEON_CN23XX_PF(oct)) {
695 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
696 if (ih3->dlengsz) {
697 WARN_ON(!sc->dmadptr);
698 sc->cmd.cmd3.dptr = sc->dmadptr;
699 }
700 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
701 if (irh->rflag) {
702 WARN_ON(!sc->dmarptr);
703 WARN_ON(!sc->status_word);
704 *sc->status_word = COMPLETION_WORD_INIT;
705 sc->cmd.cmd3.rptr = sc->dmarptr;
706 }
707 len = (u32)ih3->dlengsz;
708 } else {
709 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
710 if (ih2->dlengsz) {
711 WARN_ON(!sc->dmadptr);
712 sc->cmd.cmd2.dptr = sc->dmadptr;
713 }
714 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
715 if (irh->rflag) {
716 WARN_ON(!sc->dmarptr);
717 WARN_ON(!sc->status_word);
718 *sc->status_word = COMPLETION_WORD_INIT;
719 sc->cmd.cmd2.rptr = sc->dmarptr;
720 }
721 len = (u32)ih2->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700722 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700723
724 if (sc->wait_time)
725 sc->timeout = jiffies + sc->wait_time;
726
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700727 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
728 len, REQTYPE_SOFT_COMMAND));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700729}
730
731int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
732{
733 int i;
734 u64 dma_addr;
735 struct octeon_soft_command *sc;
736
737 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
738 spin_lock_init(&oct->sc_buf_pool.lock);
739 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
740
741 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
742 sc = (struct octeon_soft_command *)
743 lio_dma_alloc(oct,
744 SOFT_COMMAND_BUFFER_SIZE,
745 (dma_addr_t *)&dma_addr);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800746 if (!sc) {
747 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700748 return 1;
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800749 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700750
751 sc->dma_addr = dma_addr;
752 sc->size = SOFT_COMMAND_BUFFER_SIZE;
753
754 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
755 }
756
757 return 0;
758}
759
760int octeon_free_sc_buffer_pool(struct octeon_device *oct)
761{
762 struct list_head *tmp, *tmp2;
763 struct octeon_soft_command *sc;
764
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700765 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700766
767 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
768 list_del(tmp);
769
770 sc = (struct octeon_soft_command *)tmp;
771
772 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
773 }
774
775 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
776
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700777 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700778
779 return 0;
780}
781
782struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
783 u32 datasize,
784 u32 rdatasize,
785 u32 ctxsize)
786{
787 u64 dma_addr;
788 u32 size;
789 u32 offset = sizeof(struct octeon_soft_command);
790 struct octeon_soft_command *sc = NULL;
791 struct list_head *tmp;
792
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700793 WARN_ON((offset + datasize + rdatasize + ctxsize) >
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700794 SOFT_COMMAND_BUFFER_SIZE);
795
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700796 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700797
798 if (list_empty(&oct->sc_buf_pool.head)) {
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700799 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700800 return NULL;
801 }
802
803 list_for_each(tmp, &oct->sc_buf_pool.head)
804 break;
805
806 list_del(tmp);
807
808 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
809
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700810 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700811
812 sc = (struct octeon_soft_command *)tmp;
813
814 dma_addr = sc->dma_addr;
815 size = sc->size;
816
817 memset(sc, 0, sc->size);
818
819 sc->dma_addr = dma_addr;
820 sc->size = size;
821
822 if (ctxsize) {
823 sc->ctxptr = (u8 *)sc + offset;
824 sc->ctxsize = ctxsize;
825 }
826
827 /* Start data at 128 byte boundary */
828 offset = (offset + ctxsize + 127) & 0xffffff80;
829
830 if (datasize) {
831 sc->virtdptr = (u8 *)sc + offset;
832 sc->dmadptr = dma_addr + offset;
833 sc->datasize = datasize;
834 }
835
836 /* Start rdata at 128 byte boundary */
837 offset = (offset + datasize + 127) & 0xffffff80;
838
839 if (rdatasize) {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700840 WARN_ON(rdatasize < 16);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700841 sc->virtrptr = (u8 *)sc + offset;
842 sc->dmarptr = dma_addr + offset;
843 sc->rdatasize = rdatasize;
844 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
845 }
846
847 return sc;
848}
849
850void octeon_free_soft_command(struct octeon_device *oct,
851 struct octeon_soft_command *sc)
852{
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700853 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700854
855 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
856
857 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
858
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700859 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700860}