blob: 0cc1583b16b78b29af5655157309718841b2e9fd [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
Raghu Vatsavayi50579d32016-11-14 15:54:46 -08007 * Copyright (c) 2003-2016 Cavium, Inc.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07008 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070018 **********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070019#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070020#include <linux/netdevice.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070021#include <linux/vmalloc.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070022#include "liquidio_common.h"
23#include "octeon_droq.h"
24#include "octeon_iq.h"
25#include "response_manager.h"
26#include "octeon_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070027#include "octeon_main.h"
28#include "octeon_network.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070029#include "cn66xx_device.h"
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070030#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070031
32#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
33 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
34
35struct iq_post_status {
36 int status;
37 int index;
38};
39
40static void check_db_timeout(struct work_struct *work);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -070041static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070042
43static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
44
45static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
46{
47 struct octeon_instr_queue *iq =
48 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
49 return iq->iqcmd_64B;
50}
51
52#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
53
54/* Define this to return the request status comaptible to old code */
55/*#define OCTEON_USE_OLD_REQ_STATUS*/
56
57/* Return 0 on success, 1 on failure */
58int octeon_init_instr_queue(struct octeon_device *oct,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070059 union oct_txpciq txpciq,
60 u32 num_descs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070061{
62 struct octeon_instr_queue *iq;
63 struct octeon_iq_config *conf = NULL;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070064 u32 iq_no = (u32)txpciq.s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070065 u32 q_size;
66 struct cavium_wq *db_wq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070067 int orig_node = dev_to_node(&oct->pci_dev->dev);
68 int numa_node = cpu_to_node(iq_no % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070069
70 if (OCTEON_CN6XXX(oct))
71 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070072 else if (OCTEON_CN23XX_PF(oct))
73 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070074 if (!conf) {
75 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
76 oct->chip_id);
77 return 1;
78 }
79
80 if (num_descs & (num_descs - 1)) {
81 dev_err(&oct->pci_dev->dev,
82 "Number of descriptors for instr queue %d not in power of 2.\n",
83 iq_no);
84 return 1;
85 }
86
87 q_size = (u32)conf->instr_type * num_descs;
88
89 iq = oct->instr_queue[iq_no];
Raghu Vatsavayi5b823512016-09-01 11:16:07 -070090
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -070091 iq->oct_dev = oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070092
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070093 set_dev_node(&oct->pci_dev->dev, numa_node);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070094 iq->base_addr = lio_dma_alloc(oct, q_size,
95 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070096 set_dev_node(&oct->pci_dev->dev, orig_node);
97 if (!iq->base_addr)
98 iq->base_addr = lio_dma_alloc(oct, q_size,
99 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700100 if (!iq->base_addr) {
101 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
102 iq_no);
103 return 1;
104 }
105
106 iq->max_count = num_descs;
107
108 /* Initialize a list to holds requests that have been posted to Octeon
109 * but has yet to be fetched by octeon
110 */
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700111 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
112 numa_node);
113 if (!iq->request_list)
114 iq->request_list = vmalloc(sizeof(*iq->request_list) *
115 num_descs);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700116 if (!iq->request_list) {
117 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
118 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
119 iq_no);
120 return 1;
121 }
122
123 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
124
125 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
126 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
127
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700128 iq->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700129 iq->fill_threshold = (u32)conf->db_min;
130 iq->fill_cnt = 0;
131 iq->host_write_index = 0;
132 iq->octeon_read_index = 0;
133 iq->flush_index = 0;
134 iq->last_db_time = 0;
135 iq->do_auto_flush = 1;
136 iq->db_timeout = (u32)conf->db_timeout;
137 atomic_set(&iq->instr_pending, 0);
138
139 /* Initialize the spinlock for this instruction queue */
140 spin_lock_init(&iq->lock);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700141 spin_lock_init(&iq->post_lock);
142
143 spin_lock_init(&iq->iq_flush_running_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700144
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800145 oct->io_qmask.iq |= BIT_ULL(iq_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700146
147 /* Set the 32B/64B mode for each input queue */
148 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
149 iq->iqcmd_64B = (conf->instr_type == 64);
150
151 oct->fn_list.setup_iq_regs(oct, iq_no);
152
Bhaktipriya Shridharaaa76722016-06-04 20:54:00 +0530153 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
154 WQ_MEM_RECLAIM,
155 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700156 if (!oct->check_db_wq[iq_no].wq) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800157 vfree(iq->request_list);
158 iq->request_list = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700159 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
160 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
161 iq_no);
162 return 1;
163 }
164
165 db_wq = &oct->check_db_wq[iq_no];
166
167 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
168 db_wq->wk.ctxptr = oct;
169 db_wq->wk.ctxul = iq_no;
170 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
171
172 return 0;
173}
174
175int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
176{
177 u64 desc_size = 0, q_size;
178 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
179
180 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700181 destroy_workqueue(oct->check_db_wq[iq_no].wq);
182
183 if (OCTEON_CN6XXX(oct))
184 desc_size =
185 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700186 else if (OCTEON_CN23XX_PF(oct))
187 desc_size =
188 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700189
Markus Elfring9686f312015-06-29 12:22:24 +0200190 vfree(iq->request_list);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700191
192 if (iq->base_addr) {
193 q_size = iq->max_count * desc_size;
194 lio_dma_free(oct, (u32)q_size, iq->base_addr,
195 iq->base_addr_dma);
196 return 0;
197 }
198 return 1;
199}
200
201/* Return 0 on success, 1 on failure */
202int octeon_setup_iq(struct octeon_device *oct,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700203 int ifidx,
204 int q_index,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700205 union oct_txpciq txpciq,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700206 u32 num_descs,
207 void *app_ctx)
208{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700209 u32 iq_no = (u32)txpciq.s.q_no;
210 int numa_node = cpu_to_node(iq_no % num_online_cpus());
211
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700212 if (oct->instr_queue[iq_no]) {
213 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
214 iq_no);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700215 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700216 oct->instr_queue[iq_no]->app_ctx = app_ctx;
217 return 0;
218 }
219 oct->instr_queue[iq_no] =
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700220 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
221 if (!oct->instr_queue[iq_no])
222 oct->instr_queue[iq_no] =
223 vmalloc(sizeof(struct octeon_instr_queue));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700224 if (!oct->instr_queue[iq_no])
225 return 1;
226
227 memset(oct->instr_queue[iq_no], 0,
228 sizeof(struct octeon_instr_queue));
229
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700230 oct->instr_queue[iq_no]->q_index = q_index;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700231 oct->instr_queue[iq_no]->app_ctx = app_ctx;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700232 oct->instr_queue[iq_no]->ifidx = ifidx;
233
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700234 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700235 vfree(oct->instr_queue[iq_no]);
236 oct->instr_queue[iq_no] = NULL;
237 return 1;
238 }
239
240 oct->num_iqs++;
241 oct->fn_list.enable_io_queues(oct);
242 return 0;
243}
244
245int lio_wait_for_instr_fetch(struct octeon_device *oct)
246{
247 int i, retry = 1000, pending, instr_cnt = 0;
248
249 do {
250 instr_cnt = 0;
251
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700252 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800253 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700254 continue;
255 pending =
256 atomic_read(&oct->
257 instr_queue[i]->instr_pending);
258 if (pending)
259 __check_db_timeout(oct, i);
260 instr_cnt += pending;
261 }
262
263 if (instr_cnt == 0)
264 break;
265
266 schedule_timeout_uninterruptible(1);
267
268 } while (retry-- && instr_cnt);
269
270 return instr_cnt;
271}
272
273static inline void
274ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
275{
276 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
277 writel(iq->fill_cnt, iq->doorbell_reg);
278 /* make sure doorbell write goes through */
279 mmiowb();
280 iq->fill_cnt = 0;
281 iq->last_db_time = jiffies;
282 return;
283 }
284}
285
286static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
287 u8 *cmd)
288{
289 u8 *iqptr, cmdsize;
290
291 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
292 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
293
294 memcpy(iqptr, cmd, cmdsize);
295}
296
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700297static inline struct iq_post_status
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700298__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700299{
300 struct iq_post_status st;
301
302 st.status = IQ_SEND_OK;
303
304 /* This ensures that the read index does not wrap around to the same
305 * position if queue gets full before Octeon could fetch any instr.
306 */
307 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
308 st.status = IQ_SEND_FAILED;
309 st.index = -1;
310 return st;
311 }
312
313 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
314 st.status = IQ_SEND_STOP;
315
316 __copy_cmd_into_iq(iq, cmd);
317
318 /* "index" is returned, host_write_index is modified. */
319 st.index = iq->host_write_index;
320 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
321 iq->fill_cnt++;
322
323 /* Flush the command into memory. We need to be sure the data is in
324 * memory before indicating that the instruction is pending.
325 */
326 wmb();
327
328 atomic_inc(&iq->instr_pending);
329
330 return st;
331}
332
333int
334octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
335 void (*fn)(void *))
336{
337 if (reqtype > REQTYPE_LAST) {
338 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
339 __func__, reqtype);
340 return -EINVAL;
341 }
342
343 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
344
345 return 0;
346}
347
348static inline void
349__add_to_request_list(struct octeon_instr_queue *iq,
350 int idx, void *buf, int reqtype)
351{
352 iq->request_list[idx].buf = buf;
353 iq->request_list[idx].reqtype = reqtype;
354}
355
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700356/* Can only run in process context */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700357int
358lio_process_iq_request_list(struct octeon_device *oct,
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700359 struct octeon_instr_queue *iq, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700360{
361 int reqtype;
362 void *buf;
363 u32 old = iq->flush_index;
364 u32 inst_count = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700365 unsigned int pkts_compl = 0, bytes_compl = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700366 struct octeon_soft_command *sc;
367 struct octeon_instr_irh *irh;
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700368 unsigned long flags;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700369
370 while (old != iq->octeon_read_index) {
371 reqtype = iq->request_list[old].reqtype;
372 buf = iq->request_list[old].buf;
373
374 if (reqtype == REQTYPE_NONE)
375 goto skip_this;
376
377 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
378 &bytes_compl);
379
380 switch (reqtype) {
381 case REQTYPE_NORESP_NET:
382 case REQTYPE_NORESP_NET_SG:
383 case REQTYPE_RESP_NET_SG:
384 reqtype_free_fn[oct->octeon_id][reqtype](buf);
385 break;
386 case REQTYPE_RESP_NET:
387 case REQTYPE_SOFT_COMMAND:
388 sc = buf;
389
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700390 if (OCTEON_CN23XX_PF(oct))
391 irh = (struct octeon_instr_irh *)
392 &sc->cmd.cmd3.irh;
393 else
394 irh = (struct octeon_instr_irh *)
395 &sc->cmd.cmd2.irh;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700396 if (irh->rflag) {
397 /* We're expecting a response from Octeon.
398 * It's up to lio_process_ordered_list() to
399 * process sc. Add sc to the ordered soft
400 * command response list because we expect
401 * a response from Octeon.
402 */
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700403 spin_lock_irqsave
404 (&oct->response_list
405 [OCTEON_ORDERED_SC_LIST].lock,
406 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700407 atomic_inc(&oct->response_list
408 [OCTEON_ORDERED_SC_LIST].
409 pending_req_count);
410 list_add_tail(&sc->node, &oct->response_list
411 [OCTEON_ORDERED_SC_LIST].head);
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700412 spin_unlock_irqrestore
413 (&oct->response_list
414 [OCTEON_ORDERED_SC_LIST].lock,
415 flags);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700416 } else {
417 if (sc->callback) {
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700418 /* This callback must not sleep */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700419 sc->callback(oct, OCTEON_REQUEST_DONE,
420 sc->callback_arg);
421 }
422 }
423 break;
424 default:
425 dev_err(&oct->pci_dev->dev,
426 "%s Unknown reqtype: %d buf: %p at idx %d\n",
427 __func__, reqtype, buf, old);
428 }
429
430 iq->request_list[old].buf = NULL;
431 iq->request_list[old].reqtype = 0;
432
433 skip_this:
434 inst_count++;
435 INCR_INDEX_BY1(old, iq->max_count);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700436
437 if ((napi_budget) && (inst_count >= napi_budget))
438 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700439 }
440 if (bytes_compl)
441 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
442 bytes_compl);
443 iq->flush_index = old;
444
445 return inst_count;
446}
447
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700448/* Can only be called from process context */
449int
450octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
451 u32 pending_thresh, u32 napi_budget)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700452{
453 u32 inst_processed = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700454 u32 tot_inst_processed = 0;
455 int tx_done = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700456
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700457 if (!spin_trylock(&iq->iq_flush_running_lock))
458 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700459
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700460 spin_lock_bh(&iq->lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700461
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700462 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700463
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700464 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700465 do {
466 /* Process any outstanding IQ packets. */
467 if (iq->flush_index == iq->octeon_read_index)
468 break;
469
470 if (napi_budget)
471 inst_processed = lio_process_iq_request_list
472 (oct, iq,
473 napi_budget - tot_inst_processed);
474 else
475 inst_processed =
476 lio_process_iq_request_list(oct, iq, 0);
477
478 if (inst_processed) {
479 atomic_sub(inst_processed, &iq->instr_pending);
480 iq->stats.instr_processed += inst_processed;
481 }
482
483 tot_inst_processed += inst_processed;
484 inst_processed = 0;
485
486 } while (tot_inst_processed < napi_budget);
487
488 if (napi_budget && (tot_inst_processed >= napi_budget))
489 tx_done = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700490 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700491
492 iq->last_db_time = jiffies;
493
494 spin_unlock_bh(&iq->lock);
495
496 spin_unlock(&iq->iq_flush_running_lock);
497
498 return tx_done;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700499}
500
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700501/* Process instruction queue after timeout.
502 * This routine gets called from a workqueue or when removing the module.
503 */
504static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700505{
506 struct octeon_instr_queue *iq;
507 u64 next_time;
508
509 if (!oct)
510 return;
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700511
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700512 iq = oct->instr_queue[iq_no];
513 if (!iq)
514 return;
515
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700516 /* return immediately, if no work pending */
517 if (!atomic_read(&iq->instr_pending))
518 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700519 /* If jiffies - last_db_time < db_timeout do nothing */
520 next_time = iq->last_db_time + iq->db_timeout;
521 if (!time_after(jiffies, (unsigned long)next_time))
522 return;
523 iq->last_db_time = jiffies;
524
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700525 /* Flush the instruction queue */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700526 octeon_flush_iq(oct, iq, 1, 0);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700527
528 lio_enable_irq(NULL, iq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700529}
530
531/* Called by the Poll thread at regular intervals to check the instruction
532 * queue for commands to be posted and for commands that were fetched by Octeon.
533 */
534static void check_db_timeout(struct work_struct *work)
535{
536 struct cavium_wk *wk = (struct cavium_wk *)work;
537 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700538 u64 iq_no = wk->ctxul;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700539 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700540 u32 delay = 10;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700541
542 __check_db_timeout(oct, iq_no);
Raghu Vatsavayi55893a62016-07-03 13:56:50 -0700543 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700544}
545
546int
547octeon_send_command(struct octeon_device *oct, u32 iq_no,
548 u32 force_db, void *cmd, void *buf,
549 u32 datasize, u32 reqtype)
550{
551 struct iq_post_status st;
552 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
553
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700554 /* Get the lock and prevent other tasks and tx interrupt handler from
555 * running.
556 */
557 spin_lock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700558
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700559 st = __post_command2(iq, cmd);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700560
561 if (st.status != IQ_SEND_FAILED) {
562 octeon_report_sent_bytes_to_bql(buf, reqtype);
563 __add_to_request_list(iq, st.index, buf, reqtype);
564 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
565 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
566
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700567 if (force_db)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700568 ring_doorbell(oct, iq);
569 } else {
570 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
571 }
572
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700573 spin_unlock_bh(&iq->post_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700574
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700575 /* This is only done here to expedite packets being flushed
576 * for cases where there are no IQ completion interrupts.
577 */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700578
579 return st.status;
580}
581
582void
583octeon_prepare_soft_command(struct octeon_device *oct,
584 struct octeon_soft_command *sc,
585 u8 opcode,
586 u8 subcode,
587 u32 irh_ossp,
588 u64 ossp0,
589 u64 ossp1)
590{
591 struct octeon_config *oct_cfg;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700592 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700593 struct octeon_instr_ih3 *ih3;
594 struct octeon_instr_pki_ih3 *pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700595 struct octeon_instr_irh *irh;
596 struct octeon_instr_rdp *rdp;
597
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700598 WARN_ON(opcode > 15);
599 WARN_ON(subcode > 127);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700600
601 oct_cfg = octeon_get_conf(oct);
602
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700603 if (OCTEON_CN23XX_PF(oct)) {
604 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700605
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700606 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700607
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700608 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700609
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700610 pki_ih3->w = 1;
611 pki_ih3->raw = 1;
612 pki_ih3->utag = 1;
613 pki_ih3->uqpg =
614 oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
615 pki_ih3->utt = 1;
616 pki_ih3->tag = LIO_CONTROL;
617 pki_ih3->tagtype = ATOMIC_TAG;
618 pki_ih3->qpg =
619 oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
620 pki_ih3->pm = 0x7;
621 pki_ih3->sl = 8;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700622
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700623 if (sc->datasize)
624 ih3->dlengsz = sc->datasize;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700625
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700626 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
627 irh->opcode = opcode;
628 irh->subcode = subcode;
629
630 /* opcode/subcode specific parameters (ossp) */
631 irh->ossp = irh_ossp;
632 sc->cmd.cmd3.ossp[0] = ossp0;
633 sc->cmd.cmd3.ossp[1] = ossp1;
634
635 if (sc->rdatasize) {
636 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
637 rdp->pcie_port = oct->pcie_port;
638 rdp->rlen = sc->rdatasize;
639
640 irh->rflag = 1;
641 /*PKI IH3*/
642 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
643 ih3->fsz = LIO_SOFTCMDRESP_IH3;
644 } else {
645 irh->rflag = 0;
646 /*PKI IH3*/
647 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
648 ih3->fsz = LIO_PCICMD_O3;
649 }
650
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700651 } else {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700652 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
653 ih2->tagtype = ATOMIC_TAG;
654 ih2->tag = LIO_CONTROL;
655 ih2->raw = 1;
656 ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
657
658 if (sc->datasize) {
659 ih2->dlengsz = sc->datasize;
660 ih2->rs = 1;
661 }
662
663 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
664 irh->opcode = opcode;
665 irh->subcode = subcode;
666
667 /* opcode/subcode specific parameters (ossp) */
668 irh->ossp = irh_ossp;
669 sc->cmd.cmd2.ossp[0] = ossp0;
670 sc->cmd.cmd2.ossp[1] = ossp1;
671
672 if (sc->rdatasize) {
673 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
674 rdp->pcie_port = oct->pcie_port;
675 rdp->rlen = sc->rdatasize;
676
677 irh->rflag = 1;
678 /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
679 ih2->fsz = LIO_SOFTCMDRESP_IH2;
680 } else {
681 irh->rflag = 0;
682 /* irh + ossp[0] + ossp[1] = 24 bytes */
683 ih2->fsz = LIO_PCICMD_O2;
684 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700685 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700686}
687
688int octeon_send_soft_command(struct octeon_device *oct,
689 struct octeon_soft_command *sc)
690{
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700691 struct octeon_instr_ih2 *ih2;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700692 struct octeon_instr_ih3 *ih3;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700693 struct octeon_instr_irh *irh;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700694 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700695
Raghu Vatsavayi5b823512016-09-01 11:16:07 -0700696 if (OCTEON_CN23XX_PF(oct)) {
697 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
698 if (ih3->dlengsz) {
699 WARN_ON(!sc->dmadptr);
700 sc->cmd.cmd3.dptr = sc->dmadptr;
701 }
702 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
703 if (irh->rflag) {
704 WARN_ON(!sc->dmarptr);
705 WARN_ON(!sc->status_word);
706 *sc->status_word = COMPLETION_WORD_INIT;
707 sc->cmd.cmd3.rptr = sc->dmarptr;
708 }
709 len = (u32)ih3->dlengsz;
710 } else {
711 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
712 if (ih2->dlengsz) {
713 WARN_ON(!sc->dmadptr);
714 sc->cmd.cmd2.dptr = sc->dmadptr;
715 }
716 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
717 if (irh->rflag) {
718 WARN_ON(!sc->dmarptr);
719 WARN_ON(!sc->status_word);
720 *sc->status_word = COMPLETION_WORD_INIT;
721 sc->cmd.cmd2.rptr = sc->dmarptr;
722 }
723 len = (u32)ih2->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700724 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700725
726 if (sc->wait_time)
727 sc->timeout = jiffies + sc->wait_time;
728
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -0700729 return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
730 len, REQTYPE_SOFT_COMMAND));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700731}
732
733int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
734{
735 int i;
736 u64 dma_addr;
737 struct octeon_soft_command *sc;
738
739 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
740 spin_lock_init(&oct->sc_buf_pool.lock);
741 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
742
743 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
744 sc = (struct octeon_soft_command *)
745 lio_dma_alloc(oct,
746 SOFT_COMMAND_BUFFER_SIZE,
747 (dma_addr_t *)&dma_addr);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800748 if (!sc) {
749 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700750 return 1;
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800751 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700752
753 sc->dma_addr = dma_addr;
754 sc->size = SOFT_COMMAND_BUFFER_SIZE;
755
756 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
757 }
758
759 return 0;
760}
761
762int octeon_free_sc_buffer_pool(struct octeon_device *oct)
763{
764 struct list_head *tmp, *tmp2;
765 struct octeon_soft_command *sc;
766
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700767 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700768
769 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
770 list_del(tmp);
771
772 sc = (struct octeon_soft_command *)tmp;
773
774 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
775 }
776
777 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
778
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700779 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700780
781 return 0;
782}
783
784struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
785 u32 datasize,
786 u32 rdatasize,
787 u32 ctxsize)
788{
789 u64 dma_addr;
790 u32 size;
791 u32 offset = sizeof(struct octeon_soft_command);
792 struct octeon_soft_command *sc = NULL;
793 struct list_head *tmp;
794
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700795 WARN_ON((offset + datasize + rdatasize + ctxsize) >
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700796 SOFT_COMMAND_BUFFER_SIZE);
797
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700798 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700799
800 if (list_empty(&oct->sc_buf_pool.head)) {
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700801 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700802 return NULL;
803 }
804
805 list_for_each(tmp, &oct->sc_buf_pool.head)
806 break;
807
808 list_del(tmp);
809
810 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
811
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700812 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700813
814 sc = (struct octeon_soft_command *)tmp;
815
816 dma_addr = sc->dma_addr;
817 size = sc->size;
818
819 memset(sc, 0, sc->size);
820
821 sc->dma_addr = dma_addr;
822 sc->size = size;
823
824 if (ctxsize) {
825 sc->ctxptr = (u8 *)sc + offset;
826 sc->ctxsize = ctxsize;
827 }
828
829 /* Start data at 128 byte boundary */
830 offset = (offset + ctxsize + 127) & 0xffffff80;
831
832 if (datasize) {
833 sc->virtdptr = (u8 *)sc + offset;
834 sc->dmadptr = dma_addr + offset;
835 sc->datasize = datasize;
836 }
837
838 /* Start rdata at 128 byte boundary */
839 offset = (offset + datasize + 127) & 0xffffff80;
840
841 if (rdatasize) {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700842 WARN_ON(rdatasize < 16);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700843 sc->virtrptr = (u8 *)sc + offset;
844 sc->dmarptr = dma_addr + offset;
845 sc->rdatasize = rdatasize;
846 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
847 }
848
849 return sc;
850}
851
852void octeon_free_soft_command(struct octeon_device *oct,
853 struct octeon_soft_command *sc)
854{
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700855 spin_lock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700856
857 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
858
859 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
860
Raghu Vatsavayi14866cc2016-07-03 13:56:49 -0700861 spin_unlock_bh(&oct->sc_buf_pool.lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700862}