blob: b0a8d4dddc56aa841a67f9eff0f5d108cb621237 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070029#include <linux/vmalloc.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070030#include "octeon_config.h"
31#include "liquidio_common.h"
32#include "octeon_droq.h"
33#include "octeon_iq.h"
34#include "response_manager.h"
35#include "octeon_device.h"
36#include "octeon_nic.h"
37#include "octeon_main.h"
38#include "octeon_network.h"
39#include "cn66xx_regs.h"
40#include "cn66xx_device.h"
41#include "cn68xx_regs.h"
42#include "cn68xx_device.h"
43#include "liquidio_image.h"
44
45#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
46 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
47
48struct iq_post_status {
49 int status;
50 int index;
51};
52
53static void check_db_timeout(struct work_struct *work);
54static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
55
56static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
57
58static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
59{
60 struct octeon_instr_queue *iq =
61 (struct octeon_instr_queue *)oct->instr_queue[iq_no];
62 return iq->iqcmd_64B;
63}
64
65#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
66
67/* Define this to return the request status comaptible to old code */
68/*#define OCTEON_USE_OLD_REQ_STATUS*/
69
70/* Return 0 on success, 1 on failure */
71int octeon_init_instr_queue(struct octeon_device *oct,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070072 union oct_txpciq txpciq,
73 u32 num_descs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070074{
75 struct octeon_instr_queue *iq;
76 struct octeon_iq_config *conf = NULL;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070077 u32 iq_no = (u32)txpciq.s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070078 u32 q_size;
79 struct cavium_wq *db_wq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -070080 int orig_node = dev_to_node(&oct->pci_dev->dev);
81 int numa_node = cpu_to_node(iq_no % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070082
83 if (OCTEON_CN6XXX(oct))
84 conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
85
86 if (!conf) {
87 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
88 oct->chip_id);
89 return 1;
90 }
91
92 if (num_descs & (num_descs - 1)) {
93 dev_err(&oct->pci_dev->dev,
94 "Number of descriptors for instr queue %d not in power of 2.\n",
95 iq_no);
96 return 1;
97 }
98
99 q_size = (u32)conf->instr_type * num_descs;
100
101 iq = oct->instr_queue[iq_no];
102
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700103 set_dev_node(&oct->pci_dev->dev, numa_node);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700104 iq->base_addr = lio_dma_alloc(oct, q_size,
105 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700106 set_dev_node(&oct->pci_dev->dev, orig_node);
107 if (!iq->base_addr)
108 iq->base_addr = lio_dma_alloc(oct, q_size,
109 (dma_addr_t *)&iq->base_addr_dma);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700110 if (!iq->base_addr) {
111 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
112 iq_no);
113 return 1;
114 }
115
116 iq->max_count = num_descs;
117
118 /* Initialize a list to holds requests that have been posted to Octeon
119 * but has yet to be fetched by octeon
120 */
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700121 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
122 numa_node);
123 if (!iq->request_list)
124 iq->request_list = vmalloc(sizeof(*iq->request_list) *
125 num_descs);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700126 if (!iq->request_list) {
127 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
128 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
129 iq_no);
130 return 1;
131 }
132
133 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
134
135 dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
136 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
137
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700138 iq->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700139 iq->fill_threshold = (u32)conf->db_min;
140 iq->fill_cnt = 0;
141 iq->host_write_index = 0;
142 iq->octeon_read_index = 0;
143 iq->flush_index = 0;
144 iq->last_db_time = 0;
145 iq->do_auto_flush = 1;
146 iq->db_timeout = (u32)conf->db_timeout;
147 atomic_set(&iq->instr_pending, 0);
148
149 /* Initialize the spinlock for this instruction queue */
150 spin_lock_init(&iq->lock);
151
152 oct->io_qmask.iq |= (1 << iq_no);
153
154 /* Set the 32B/64B mode for each input queue */
155 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
156 iq->iqcmd_64B = (conf->instr_type == 64);
157
158 oct->fn_list.setup_iq_regs(oct, iq_no);
159
Bhaktipriya Shridharaaa76722016-06-04 20:54:00 +0530160 oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
161 WQ_MEM_RECLAIM,
162 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700163 if (!oct->check_db_wq[iq_no].wq) {
164 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
165 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
166 iq_no);
167 return 1;
168 }
169
170 db_wq = &oct->check_db_wq[iq_no];
171
172 INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
173 db_wq->wk.ctxptr = oct;
174 db_wq->wk.ctxul = iq_no;
175 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
176
177 return 0;
178}
179
180int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
181{
182 u64 desc_size = 0, q_size;
183 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
184
185 cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700186 destroy_workqueue(oct->check_db_wq[iq_no].wq);
187
188 if (OCTEON_CN6XXX(oct))
189 desc_size =
190 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
191
Markus Elfring9686f312015-06-29 12:22:24 +0200192 vfree(iq->request_list);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700193
194 if (iq->base_addr) {
195 q_size = iq->max_count * desc_size;
196 lio_dma_free(oct, (u32)q_size, iq->base_addr,
197 iq->base_addr_dma);
198 return 0;
199 }
200 return 1;
201}
202
203/* Return 0 on success, 1 on failure */
204int octeon_setup_iq(struct octeon_device *oct,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700205 int ifidx,
206 int q_index,
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700207 union oct_txpciq txpciq,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700208 u32 num_descs,
209 void *app_ctx)
210{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700211 u32 iq_no = (u32)txpciq.s.q_no;
212 int numa_node = cpu_to_node(iq_no % num_online_cpus());
213
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700214 if (oct->instr_queue[iq_no]) {
215 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
216 iq_no);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700217 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700218 oct->instr_queue[iq_no]->app_ctx = app_ctx;
219 return 0;
220 }
221 oct->instr_queue[iq_no] =
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700222 vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
223 if (!oct->instr_queue[iq_no])
224 oct->instr_queue[iq_no] =
225 vmalloc(sizeof(struct octeon_instr_queue));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700226 if (!oct->instr_queue[iq_no])
227 return 1;
228
229 memset(oct->instr_queue[iq_no], 0,
230 sizeof(struct octeon_instr_queue));
231
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700232 oct->instr_queue[iq_no]->q_index = q_index;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700233 oct->instr_queue[iq_no]->app_ctx = app_ctx;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700234 oct->instr_queue[iq_no]->ifidx = ifidx;
235
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700236 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700237 vfree(oct->instr_queue[iq_no]);
238 oct->instr_queue[iq_no] = NULL;
239 return 1;
240 }
241
242 oct->num_iqs++;
243 oct->fn_list.enable_io_queues(oct);
244 return 0;
245}
246
247int lio_wait_for_instr_fetch(struct octeon_device *oct)
248{
249 int i, retry = 1000, pending, instr_cnt = 0;
250
251 do {
252 instr_cnt = 0;
253
254 /*for (i = 0; i < oct->num_iqs; i++) {*/
255 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
256 if (!(oct->io_qmask.iq & (1UL << i)))
257 continue;
258 pending =
259 atomic_read(&oct->
260 instr_queue[i]->instr_pending);
261 if (pending)
262 __check_db_timeout(oct, i);
263 instr_cnt += pending;
264 }
265
266 if (instr_cnt == 0)
267 break;
268
269 schedule_timeout_uninterruptible(1);
270
271 } while (retry-- && instr_cnt);
272
273 return instr_cnt;
274}
275
276static inline void
277ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
278{
279 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
280 writel(iq->fill_cnt, iq->doorbell_reg);
281 /* make sure doorbell write goes through */
282 mmiowb();
283 iq->fill_cnt = 0;
284 iq->last_db_time = jiffies;
285 return;
286 }
287}
288
289static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
290 u8 *cmd)
291{
292 u8 *iqptr, cmdsize;
293
294 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
295 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
296
297 memcpy(iqptr, cmd, cmdsize);
298}
299
300static inline int
301__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
302 struct octeon_instr_queue *iq,
303 u32 force_db __attribute__((unused)), u8 *cmd)
304{
305 u32 index = -1;
306
307 /* This ensures that the read index does not wrap around to the same
308 * position if queue gets full before Octeon could fetch any instr.
309 */
310 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
311 return -1;
312
313 __copy_cmd_into_iq(iq, cmd);
314
315 /* "index" is returned, host_write_index is modified. */
316 index = iq->host_write_index;
317 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
318 iq->fill_cnt++;
319
320 /* Flush the command into memory. We need to be sure the data is in
321 * memory before indicating that the instruction is pending.
322 */
323 wmb();
324
325 atomic_inc(&iq->instr_pending);
326
327 return index;
328}
329
330static inline struct iq_post_status
331__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
332 struct octeon_instr_queue *iq,
333 u32 force_db __attribute__((unused)), u8 *cmd)
334{
335 struct iq_post_status st;
336
337 st.status = IQ_SEND_OK;
338
339 /* This ensures that the read index does not wrap around to the same
340 * position if queue gets full before Octeon could fetch any instr.
341 */
342 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
343 st.status = IQ_SEND_FAILED;
344 st.index = -1;
345 return st;
346 }
347
348 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
349 st.status = IQ_SEND_STOP;
350
351 __copy_cmd_into_iq(iq, cmd);
352
353 /* "index" is returned, host_write_index is modified. */
354 st.index = iq->host_write_index;
355 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
356 iq->fill_cnt++;
357
358 /* Flush the command into memory. We need to be sure the data is in
359 * memory before indicating that the instruction is pending.
360 */
361 wmb();
362
363 atomic_inc(&iq->instr_pending);
364
365 return st;
366}
367
368int
369octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
370 void (*fn)(void *))
371{
372 if (reqtype > REQTYPE_LAST) {
373 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
374 __func__, reqtype);
375 return -EINVAL;
376 }
377
378 reqtype_free_fn[oct->octeon_id][reqtype] = fn;
379
380 return 0;
381}
382
383static inline void
384__add_to_request_list(struct octeon_instr_queue *iq,
385 int idx, void *buf, int reqtype)
386{
387 iq->request_list[idx].buf = buf;
388 iq->request_list[idx].reqtype = reqtype;
389}
390
391int
392lio_process_iq_request_list(struct octeon_device *oct,
393 struct octeon_instr_queue *iq)
394{
395 int reqtype;
396 void *buf;
397 u32 old = iq->flush_index;
398 u32 inst_count = 0;
399 unsigned pkts_compl = 0, bytes_compl = 0;
400 struct octeon_soft_command *sc;
401 struct octeon_instr_irh *irh;
402
403 while (old != iq->octeon_read_index) {
404 reqtype = iq->request_list[old].reqtype;
405 buf = iq->request_list[old].buf;
406
407 if (reqtype == REQTYPE_NONE)
408 goto skip_this;
409
410 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
411 &bytes_compl);
412
413 switch (reqtype) {
414 case REQTYPE_NORESP_NET:
415 case REQTYPE_NORESP_NET_SG:
416 case REQTYPE_RESP_NET_SG:
417 reqtype_free_fn[oct->octeon_id][reqtype](buf);
418 break;
419 case REQTYPE_RESP_NET:
420 case REQTYPE_SOFT_COMMAND:
421 sc = buf;
422
423 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
424 if (irh->rflag) {
425 /* We're expecting a response from Octeon.
426 * It's up to lio_process_ordered_list() to
427 * process sc. Add sc to the ordered soft
428 * command response list because we expect
429 * a response from Octeon.
430 */
431 spin_lock_bh(&oct->response_list
432 [OCTEON_ORDERED_SC_LIST].lock);
433 atomic_inc(&oct->response_list
434 [OCTEON_ORDERED_SC_LIST].
435 pending_req_count);
436 list_add_tail(&sc->node, &oct->response_list
437 [OCTEON_ORDERED_SC_LIST].head);
438 spin_unlock_bh(&oct->response_list
439 [OCTEON_ORDERED_SC_LIST].lock);
440 } else {
441 if (sc->callback) {
442 sc->callback(oct, OCTEON_REQUEST_DONE,
443 sc->callback_arg);
444 }
445 }
446 break;
447 default:
448 dev_err(&oct->pci_dev->dev,
449 "%s Unknown reqtype: %d buf: %p at idx %d\n",
450 __func__, reqtype, buf, old);
451 }
452
453 iq->request_list[old].buf = NULL;
454 iq->request_list[old].reqtype = 0;
455
456 skip_this:
457 inst_count++;
458 INCR_INDEX_BY1(old, iq->max_count);
459 }
460 if (bytes_compl)
461 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
462 bytes_compl);
463 iq->flush_index = old;
464
465 return inst_count;
466}
467
468static inline void
469update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
470{
471 u32 inst_processed = 0;
472
473 /* Calculate how many commands Octeon has read and move the read index
474 * accordingly.
475 */
476 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
477
478 /* Move the NORESPONSE requests to the per-device completion list. */
479 if (iq->flush_index != iq->octeon_read_index)
480 inst_processed = lio_process_iq_request_list(oct, iq);
481
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700482 if (inst_processed) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700483 atomic_sub(inst_processed, &iq->instr_pending);
484 iq->stats.instr_processed += inst_processed;
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700485 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700486}
487
488static void
489octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
490 u32 pending_thresh)
491{
492 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
493 spin_lock_bh(&iq->lock);
494 update_iq_indices(oct, iq);
495 spin_unlock_bh(&iq->lock);
496 }
497}
498
499static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
500{
501 struct octeon_instr_queue *iq;
502 u64 next_time;
503
504 if (!oct)
505 return;
506 iq = oct->instr_queue[iq_no];
507 if (!iq)
508 return;
509
510 /* If jiffies - last_db_time < db_timeout do nothing */
511 next_time = iq->last_db_time + iq->db_timeout;
512 if (!time_after(jiffies, (unsigned long)next_time))
513 return;
514 iq->last_db_time = jiffies;
515
516 /* Get the lock and prevent tasklets. This routine gets called from
517 * the poll thread. Instructions can now be posted in tasklet context
518 */
519 spin_lock_bh(&iq->lock);
520 if (iq->fill_cnt != 0)
521 ring_doorbell(oct, iq);
522
523 spin_unlock_bh(&iq->lock);
524
525 /* Flush the instruction queue */
526 if (iq->do_auto_flush)
527 octeon_flush_iq(oct, iq, 1);
528}
529
530/* Called by the Poll thread at regular intervals to check the instruction
531 * queue for commands to be posted and for commands that were fetched by Octeon.
532 */
533static void check_db_timeout(struct work_struct *work)
534{
535 struct cavium_wk *wk = (struct cavium_wk *)work;
536 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
537 unsigned long iq_no = wk->ctxul;
538 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
539
540 __check_db_timeout(oct, iq_no);
541 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
542}
543
544int
545octeon_send_command(struct octeon_device *oct, u32 iq_no,
546 u32 force_db, void *cmd, void *buf,
547 u32 datasize, u32 reqtype)
548{
549 struct iq_post_status st;
550 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
551
552 spin_lock_bh(&iq->lock);
553
554 st = __post_command2(oct, iq, force_db, cmd);
555
556 if (st.status != IQ_SEND_FAILED) {
557 octeon_report_sent_bytes_to_bql(buf, reqtype);
558 __add_to_request_list(iq, st.index, buf, reqtype);
559 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
560 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
561
562 if (iq->fill_cnt >= iq->fill_threshold || force_db)
563 ring_doorbell(oct, iq);
564 } else {
565 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
566 }
567
568 spin_unlock_bh(&iq->lock);
569
570 if (iq->do_auto_flush)
571 octeon_flush_iq(oct, iq, 2);
572
573 return st.status;
574}
575
576void
577octeon_prepare_soft_command(struct octeon_device *oct,
578 struct octeon_soft_command *sc,
579 u8 opcode,
580 u8 subcode,
581 u32 irh_ossp,
582 u64 ossp0,
583 u64 ossp1)
584{
585 struct octeon_config *oct_cfg;
586 struct octeon_instr_ih *ih;
587 struct octeon_instr_irh *irh;
588 struct octeon_instr_rdp *rdp;
589
590 BUG_ON(opcode > 15);
591 BUG_ON(subcode > 127);
592
593 oct_cfg = octeon_get_conf(oct);
594
595 ih = (struct octeon_instr_ih *)&sc->cmd.ih;
596 ih->tagtype = ATOMIC_TAG;
597 ih->tag = LIO_CONTROL;
598 ih->raw = 1;
599 ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
600
601 if (sc->datasize) {
602 ih->dlengsz = sc->datasize;
603 ih->rs = 1;
604 }
605
606 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
607 irh->opcode = opcode;
608 irh->subcode = subcode;
609
610 /* opcode/subcode specific parameters (ossp) */
611 irh->ossp = irh_ossp;
612 sc->cmd.ossp[0] = ossp0;
613 sc->cmd.ossp[1] = ossp1;
614
615 if (sc->rdatasize) {
616 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
617 rdp->pcie_port = oct->pcie_port;
618 rdp->rlen = sc->rdatasize;
619
620 irh->rflag = 1;
621 irh->len = 4;
622 ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
623 } else {
624 irh->rflag = 0;
625 irh->len = 2;
626 ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
627 }
628
629 while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
630 sc->iq_no++;
631}
632
633int octeon_send_soft_command(struct octeon_device *oct,
634 struct octeon_soft_command *sc)
635{
636 struct octeon_instr_ih *ih;
637 struct octeon_instr_irh *irh;
638 struct octeon_instr_rdp *rdp;
639
640 ih = (struct octeon_instr_ih *)&sc->cmd.ih;
641 if (ih->dlengsz) {
642 BUG_ON(!sc->dmadptr);
643 sc->cmd.dptr = sc->dmadptr;
644 }
645
646 irh = (struct octeon_instr_irh *)&sc->cmd.irh;
647 if (irh->rflag) {
648 BUG_ON(!sc->dmarptr);
649 BUG_ON(!sc->status_word);
650 *sc->status_word = COMPLETION_WORD_INIT;
651
652 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
653
654 sc->cmd.rptr = sc->dmarptr;
655 }
656
657 if (sc->wait_time)
658 sc->timeout = jiffies + sc->wait_time;
659
660 return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
661 (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
662}
663
664int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
665{
666 int i;
667 u64 dma_addr;
668 struct octeon_soft_command *sc;
669
670 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
671 spin_lock_init(&oct->sc_buf_pool.lock);
672 atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
673
674 for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
675 sc = (struct octeon_soft_command *)
676 lio_dma_alloc(oct,
677 SOFT_COMMAND_BUFFER_SIZE,
678 (dma_addr_t *)&dma_addr);
679 if (!sc)
680 return 1;
681
682 sc->dma_addr = dma_addr;
683 sc->size = SOFT_COMMAND_BUFFER_SIZE;
684
685 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
686 }
687
688 return 0;
689}
690
691int octeon_free_sc_buffer_pool(struct octeon_device *oct)
692{
693 struct list_head *tmp, *tmp2;
694 struct octeon_soft_command *sc;
695
696 spin_lock(&oct->sc_buf_pool.lock);
697
698 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
699 list_del(tmp);
700
701 sc = (struct octeon_soft_command *)tmp;
702
703 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
704 }
705
706 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
707
708 spin_unlock(&oct->sc_buf_pool.lock);
709
710 return 0;
711}
712
713struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
714 u32 datasize,
715 u32 rdatasize,
716 u32 ctxsize)
717{
718 u64 dma_addr;
719 u32 size;
720 u32 offset = sizeof(struct octeon_soft_command);
721 struct octeon_soft_command *sc = NULL;
722 struct list_head *tmp;
723
724 BUG_ON((offset + datasize + rdatasize + ctxsize) >
725 SOFT_COMMAND_BUFFER_SIZE);
726
727 spin_lock(&oct->sc_buf_pool.lock);
728
729 if (list_empty(&oct->sc_buf_pool.head)) {
730 spin_unlock(&oct->sc_buf_pool.lock);
731 return NULL;
732 }
733
734 list_for_each(tmp, &oct->sc_buf_pool.head)
735 break;
736
737 list_del(tmp);
738
739 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
740
741 spin_unlock(&oct->sc_buf_pool.lock);
742
743 sc = (struct octeon_soft_command *)tmp;
744
745 dma_addr = sc->dma_addr;
746 size = sc->size;
747
748 memset(sc, 0, sc->size);
749
750 sc->dma_addr = dma_addr;
751 sc->size = size;
752
753 if (ctxsize) {
754 sc->ctxptr = (u8 *)sc + offset;
755 sc->ctxsize = ctxsize;
756 }
757
758 /* Start data at 128 byte boundary */
759 offset = (offset + ctxsize + 127) & 0xffffff80;
760
761 if (datasize) {
762 sc->virtdptr = (u8 *)sc + offset;
763 sc->dmadptr = dma_addr + offset;
764 sc->datasize = datasize;
765 }
766
767 /* Start rdata at 128 byte boundary */
768 offset = (offset + datasize + 127) & 0xffffff80;
769
770 if (rdatasize) {
771 BUG_ON(rdatasize < 16);
772 sc->virtrptr = (u8 *)sc + offset;
773 sc->dmarptr = dma_addr + offset;
774 sc->rdatasize = rdatasize;
775 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
776 }
777
778 return sc;
779}
780
781void octeon_free_soft_command(struct octeon_device *oct,
782 struct octeon_soft_command *sc)
783{
784 spin_lock(&oct->sc_buf_pool.lock);
785
786 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
787
788 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
789
790 spin_unlock(&oct->sc_buf_pool.lock);
791}