blob: 5472cbd34028d9038539824c4167a0c3010a3a01 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/delay.h>
41#include <linux/random.h>
42#include <linux/io-mapping.h>
43#include <linux/mlx5/driver.h>
44#include <linux/debugfs.h>
45
46#include "mlx5_core.h"
47
48enum {
Moshe Lazer0a324f312013-08-14 17:46:48 +030049 CMD_IF_REV = 5,
Eli Cohene126ba92013-07-07 17:25:49 +030050};
51
52enum {
53 CMD_MODE_POLLING,
54 CMD_MODE_EVENTS
55};
56
57enum {
58 NUM_LONG_LISTS = 2,
59 NUM_MED_LISTS = 64,
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
63};
64
65enum {
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
77};
78
79enum {
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
96};
97
98static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
101 mlx5_cmd_cbk_t cbk,
102 void *context, int page_queue)
103{
104 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
105 struct mlx5_cmd_work_ent *ent;
106
107 ent = kzalloc(sizeof(*ent), alloc_flags);
108 if (!ent)
109 return ERR_PTR(-ENOMEM);
110
111 ent->in = in;
112 ent->out = out;
113 ent->callback = cbk;
114 ent->context = context;
115 ent->cmd = cmd;
116 ent->page_queue = page_queue;
117
118 return ent;
119}
120
121static u8 alloc_token(struct mlx5_cmd *cmd)
122{
123 u8 token;
124
125 spin_lock(&cmd->token_lock);
126 token = cmd->token++ % 255 + 1;
127 spin_unlock(&cmd->token_lock);
128
129 return token;
130}
131
132static int alloc_ent(struct mlx5_cmd *cmd)
133{
134 unsigned long flags;
135 int ret;
136
137 spin_lock_irqsave(&cmd->alloc_lock, flags);
138 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
139 if (ret < cmd->max_reg_cmds)
140 clear_bit(ret, &cmd->bitmask);
141 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
142
143 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
144}
145
146static void free_ent(struct mlx5_cmd *cmd, int idx)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&cmd->alloc_lock, flags);
151 set_bit(idx, &cmd->bitmask);
152 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
153}
154
155static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
156{
157 return cmd->cmd_buf + (idx << cmd->log_stride);
158}
159
160static u8 xor8_buf(void *buf, int len)
161{
162 u8 *ptr = buf;
163 u8 sum = 0;
164 int i;
165
166 for (i = 0; i < len; i++)
167 sum ^= ptr[i];
168
169 return sum;
170}
171
172static int verify_block_sig(struct mlx5_cmd_prot_block *block)
173{
174 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
175 return -EINVAL;
176
177 if (xor8_buf(block, sizeof(*block)) != 0xff)
178 return -EINVAL;
179
180 return 0;
181}
182
183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
184{
185 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
188}
189
190static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
191{
192 struct mlx5_cmd_mailbox *next = msg->next;
193
194 while (next) {
195 calc_block_sig(next->buf, token);
196 next = next->next;
197 }
198}
199
200static void set_signature(struct mlx5_cmd_work_ent *ent)
201{
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token);
204 calc_chain_sig(ent->out, ent->token);
205}
206
207static void poll_timeout(struct mlx5_cmd_work_ent *ent)
208{
209 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
210 u8 own;
211
212 do {
213 own = ent->lay->status_own;
214 if (!(own & CMD_OWNER_HW)) {
215 ent->ret = 0;
216 return;
217 }
218 usleep_range(5000, 10000);
219 } while (time_before(jiffies, poll_end));
220
221 ent->ret = -ETIMEDOUT;
222}
223
224static void free_cmd(struct mlx5_cmd_work_ent *ent)
225{
226 kfree(ent);
227}
228
229
230static int verify_signature(struct mlx5_cmd_work_ent *ent)
231{
232 struct mlx5_cmd_mailbox *next = ent->out->next;
233 int err;
234 u8 sig;
235
236 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
237 if (sig != 0xff)
238 return -EINVAL;
239
240 while (next) {
241 err = verify_block_sig(next->buf);
242 if (err)
243 return err;
244
245 next = next->next;
246 }
247
248 return 0;
249}
250
251static void dump_buf(void *buf, int size, int data_only, int offset)
252{
253 __be32 *p = buf;
254 int i;
255
256 for (i = 0; i < size; i += 16) {
257 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
258 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
259 be32_to_cpu(p[3]));
260 p += 4;
261 offset += 16;
262 }
263 if (!data_only)
264 pr_debug("\n");
265}
266
267const char *mlx5_command_str(int command)
268{
269 switch (command) {
270 case MLX5_CMD_OP_QUERY_HCA_CAP:
271 return "QUERY_HCA_CAP";
272
273 case MLX5_CMD_OP_SET_HCA_CAP:
274 return "SET_HCA_CAP";
275
276 case MLX5_CMD_OP_QUERY_ADAPTER:
277 return "QUERY_ADAPTER";
278
279 case MLX5_CMD_OP_INIT_HCA:
280 return "INIT_HCA";
281
282 case MLX5_CMD_OP_TEARDOWN_HCA:
283 return "TEARDOWN_HCA";
284
Eli Cohencd23b142013-07-18 15:31:08 +0300285 case MLX5_CMD_OP_ENABLE_HCA:
286 return "MLX5_CMD_OP_ENABLE_HCA";
287
288 case MLX5_CMD_OP_DISABLE_HCA:
289 return "MLX5_CMD_OP_DISABLE_HCA";
290
Eli Cohene126ba92013-07-07 17:25:49 +0300291 case MLX5_CMD_OP_QUERY_PAGES:
292 return "QUERY_PAGES";
293
294 case MLX5_CMD_OP_MANAGE_PAGES:
295 return "MANAGE_PAGES";
296
297 case MLX5_CMD_OP_CREATE_MKEY:
298 return "CREATE_MKEY";
299
300 case MLX5_CMD_OP_QUERY_MKEY:
301 return "QUERY_MKEY";
302
303 case MLX5_CMD_OP_DESTROY_MKEY:
304 return "DESTROY_MKEY";
305
306 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
307 return "QUERY_SPECIAL_CONTEXTS";
308
309 case MLX5_CMD_OP_CREATE_EQ:
310 return "CREATE_EQ";
311
312 case MLX5_CMD_OP_DESTROY_EQ:
313 return "DESTROY_EQ";
314
315 case MLX5_CMD_OP_QUERY_EQ:
316 return "QUERY_EQ";
317
318 case MLX5_CMD_OP_CREATE_CQ:
319 return "CREATE_CQ";
320
321 case MLX5_CMD_OP_DESTROY_CQ:
322 return "DESTROY_CQ";
323
324 case MLX5_CMD_OP_QUERY_CQ:
325 return "QUERY_CQ";
326
327 case MLX5_CMD_OP_MODIFY_CQ:
328 return "MODIFY_CQ";
329
330 case MLX5_CMD_OP_CREATE_QP:
331 return "CREATE_QP";
332
333 case MLX5_CMD_OP_DESTROY_QP:
334 return "DESTROY_QP";
335
336 case MLX5_CMD_OP_RST2INIT_QP:
337 return "RST2INIT_QP";
338
339 case MLX5_CMD_OP_INIT2RTR_QP:
340 return "INIT2RTR_QP";
341
342 case MLX5_CMD_OP_RTR2RTS_QP:
343 return "RTR2RTS_QP";
344
345 case MLX5_CMD_OP_RTS2RTS_QP:
346 return "RTS2RTS_QP";
347
348 case MLX5_CMD_OP_SQERR2RTS_QP:
349 return "SQERR2RTS_QP";
350
351 case MLX5_CMD_OP_2ERR_QP:
352 return "2ERR_QP";
353
354 case MLX5_CMD_OP_RTS2SQD_QP:
355 return "RTS2SQD_QP";
356
357 case MLX5_CMD_OP_SQD2RTS_QP:
358 return "SQD2RTS_QP";
359
360 case MLX5_CMD_OP_2RST_QP:
361 return "2RST_QP";
362
363 case MLX5_CMD_OP_QUERY_QP:
364 return "QUERY_QP";
365
366 case MLX5_CMD_OP_CONF_SQP:
367 return "CONF_SQP";
368
369 case MLX5_CMD_OP_MAD_IFC:
370 return "MAD_IFC";
371
372 case MLX5_CMD_OP_INIT2INIT_QP:
373 return "INIT2INIT_QP";
374
375 case MLX5_CMD_OP_SUSPEND_QP:
376 return "SUSPEND_QP";
377
378 case MLX5_CMD_OP_UNSUSPEND_QP:
379 return "UNSUSPEND_QP";
380
381 case MLX5_CMD_OP_SQD2SQD_QP:
382 return "SQD2SQD_QP";
383
384 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
385 return "ALLOC_QP_COUNTER_SET";
386
387 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
388 return "DEALLOC_QP_COUNTER_SET";
389
390 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
391 return "QUERY_QP_COUNTER_SET";
392
393 case MLX5_CMD_OP_CREATE_PSV:
394 return "CREATE_PSV";
395
396 case MLX5_CMD_OP_DESTROY_PSV:
397 return "DESTROY_PSV";
398
399 case MLX5_CMD_OP_QUERY_PSV:
400 return "QUERY_PSV";
401
402 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
403 return "QUERY_SIG_RULE_TABLE";
404
405 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
406 return "QUERY_BLOCK_SIZE_TABLE";
407
408 case MLX5_CMD_OP_CREATE_SRQ:
409 return "CREATE_SRQ";
410
411 case MLX5_CMD_OP_DESTROY_SRQ:
412 return "DESTROY_SRQ";
413
414 case MLX5_CMD_OP_QUERY_SRQ:
415 return "QUERY_SRQ";
416
417 case MLX5_CMD_OP_ARM_RQ:
418 return "ARM_RQ";
419
420 case MLX5_CMD_OP_RESIZE_SRQ:
421 return "RESIZE_SRQ";
422
423 case MLX5_CMD_OP_ALLOC_PD:
424 return "ALLOC_PD";
425
426 case MLX5_CMD_OP_DEALLOC_PD:
427 return "DEALLOC_PD";
428
429 case MLX5_CMD_OP_ALLOC_UAR:
430 return "ALLOC_UAR";
431
432 case MLX5_CMD_OP_DEALLOC_UAR:
433 return "DEALLOC_UAR";
434
435 case MLX5_CMD_OP_ATTACH_TO_MCG:
436 return "ATTACH_TO_MCG";
437
438 case MLX5_CMD_OP_DETACH_FROM_MCG:
439 return "DETACH_FROM_MCG";
440
441 case MLX5_CMD_OP_ALLOC_XRCD:
442 return "ALLOC_XRCD";
443
444 case MLX5_CMD_OP_DEALLOC_XRCD:
445 return "DEALLOC_XRCD";
446
447 case MLX5_CMD_OP_ACCESS_REG:
448 return "MLX5_CMD_OP_ACCESS_REG";
449
450 default: return "unknown command opcode";
451 }
452}
453
454static void dump_command(struct mlx5_core_dev *dev,
455 struct mlx5_cmd_work_ent *ent, int input)
456{
457 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
458 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
459 struct mlx5_cmd_mailbox *next = msg->next;
460 int data_only;
461 int offset = 0;
462 int dump_len;
463
464 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
465
466 if (data_only)
467 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
468 "dump command data %s(0x%x) %s\n",
469 mlx5_command_str(op), op,
470 input ? "INPUT" : "OUTPUT");
471 else
472 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
473 mlx5_command_str(op), op,
474 input ? "INPUT" : "OUTPUT");
475
476 if (data_only) {
477 if (input) {
478 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
479 offset += sizeof(ent->lay->in);
480 } else {
481 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
482 offset += sizeof(ent->lay->out);
483 }
484 } else {
485 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
486 offset += sizeof(*ent->lay);
487 }
488
489 while (next && offset < msg->len) {
490 if (data_only) {
491 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
492 dump_buf(next->buf, dump_len, 1, offset);
493 offset += MLX5_CMD_DATA_BLOCK_SIZE;
494 } else {
495 mlx5_core_dbg(dev, "command block:\n");
496 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
497 offset += sizeof(struct mlx5_cmd_prot_block);
498 }
499 next = next->next;
500 }
501
502 if (data_only)
503 pr_debug("\n");
504}
505
506static void cmd_work_handler(struct work_struct *work)
507{
508 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
509 struct mlx5_cmd *cmd = ent->cmd;
510 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
511 struct mlx5_cmd_layout *lay;
512 struct semaphore *sem;
513
514 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
515 down(sem);
516 if (!ent->page_queue) {
517 ent->idx = alloc_ent(cmd);
518 if (ent->idx < 0) {
519 mlx5_core_err(dev, "failed to allocate command entry\n");
520 up(sem);
521 return;
522 }
523 } else {
524 ent->idx = cmd->max_reg_cmds;
525 }
526
527 ent->token = alloc_token(cmd);
528 cmd->ent_arr[ent->idx] = ent;
529 lay = get_inst(cmd, ent->idx);
530 ent->lay = lay;
531 memset(lay, 0, sizeof(*lay));
532 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
533 if (ent->in->next)
534 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
535 lay->inlen = cpu_to_be32(ent->in->len);
536 if (ent->out->next)
537 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
538 lay->outlen = cpu_to_be32(ent->out->len);
539 lay->type = MLX5_PCI_CMD_XPORT;
540 lay->token = ent->token;
541 lay->status_own = CMD_OWNER_HW;
542 if (!cmd->checksum_disabled)
543 set_signature(ent);
544 dump_command(dev, ent, 1);
545 ktime_get_ts(&ent->ts1);
546
547 /* ring doorbell after the descriptor is valid */
548 wmb();
549 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
550 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
551 mmiowb();
552 if (cmd->mode == CMD_MODE_POLLING) {
553 poll_timeout(ent);
554 /* make sure we read the descriptor after ownership is SW */
555 rmb();
556 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
557 }
558}
559
560static const char *deliv_status_to_str(u8 status)
561{
562 switch (status) {
563 case MLX5_CMD_DELIVERY_STAT_OK:
564 return "no errors";
565 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
566 return "signature error";
567 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
568 return "token error";
569 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
570 return "bad block number";
571 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
572 return "output pointer not aligned to block size";
573 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
574 return "input pointer not aligned to block size";
575 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
576 return "firmware internal error";
577 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
578 return "command input length error";
579 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
580 return "command ouput length error";
581 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
582 return "reserved fields not cleared";
583 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
584 return "bad command descriptor type";
585 default:
586 return "unknown status code";
587 }
588}
589
590static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
591{
592 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
593
594 return be16_to_cpu(hdr->opcode);
595}
596
597static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
598{
599 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
600 struct mlx5_cmd *cmd = &dev->cmd;
601 int err;
602
603 if (cmd->mode == CMD_MODE_POLLING) {
604 wait_for_completion(&ent->done);
605 err = ent->ret;
606 } else {
607 if (!wait_for_completion_timeout(&ent->done, timeout))
608 err = -ETIMEDOUT;
609 else
610 err = 0;
611 }
612 if (err == -ETIMEDOUT) {
613 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
614 mlx5_command_str(msg_to_opcode(ent->in)),
615 msg_to_opcode(ent->in));
616 }
617 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
618 deliv_status_to_str(ent->status), ent->status);
619
620 return err;
621}
622
623/* Notes:
624 * 1. Callback functions may not sleep
625 * 2. page queue commands do not support asynchrous completion
626 */
627static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
628 struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
629 void *context, int page_queue, u8 *status)
630{
631 struct mlx5_cmd *cmd = &dev->cmd;
632 struct mlx5_cmd_work_ent *ent;
633 ktime_t t1, t2, delta;
634 struct mlx5_cmd_stats *stats;
635 int err = 0;
636 s64 ds;
637 u16 op;
638
639 if (callback && page_queue)
640 return -EINVAL;
641
642 ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
643 if (IS_ERR(ent))
644 return PTR_ERR(ent);
645
646 if (!callback)
647 init_completion(&ent->done);
648
649 INIT_WORK(&ent->work, cmd_work_handler);
650 if (page_queue) {
651 cmd_work_handler(&ent->work);
652 } else if (!queue_work(cmd->wq, &ent->work)) {
653 mlx5_core_warn(dev, "failed to queue work\n");
654 err = -ENOMEM;
655 goto out_free;
656 }
657
658 if (!callback) {
659 err = wait_func(dev, ent);
660 if (err == -ETIMEDOUT)
661 goto out;
662
663 t1 = timespec_to_ktime(ent->ts1);
664 t2 = timespec_to_ktime(ent->ts2);
665 delta = ktime_sub(t2, t1);
666 ds = ktime_to_ns(delta);
667 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
668 if (op < ARRAY_SIZE(cmd->stats)) {
669 stats = &cmd->stats[op];
670 spin_lock(&stats->lock);
671 stats->sum += ds;
672 ++stats->n;
673 spin_unlock(&stats->lock);
674 }
675 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
676 "fw exec time for %s is %lld nsec\n",
677 mlx5_command_str(op), ds);
678 *status = ent->status;
679 free_cmd(ent);
680 }
681
682 return err;
683
684out_free:
685 free_cmd(ent);
686out:
687 return err;
688}
689
690static ssize_t dbg_write(struct file *filp, const char __user *buf,
691 size_t count, loff_t *pos)
692{
693 struct mlx5_core_dev *dev = filp->private_data;
694 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
695 char lbuf[3];
696 int err;
697
698 if (!dbg->in_msg || !dbg->out_msg)
699 return -ENOMEM;
700
701 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300702 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300703
704 lbuf[sizeof(lbuf) - 1] = 0;
705
706 if (strcmp(lbuf, "go"))
707 return -EINVAL;
708
709 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
710
711 return err ? err : count;
712}
713
714
715static const struct file_operations fops = {
716 .owner = THIS_MODULE,
717 .open = simple_open,
718 .write = dbg_write,
719};
720
721static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
722{
723 struct mlx5_cmd_prot_block *block;
724 struct mlx5_cmd_mailbox *next;
725 int copy;
726
727 if (!to || !from)
728 return -ENOMEM;
729
730 copy = min_t(int, size, sizeof(to->first.data));
731 memcpy(to->first.data, from, copy);
732 size -= copy;
733 from += copy;
734
735 next = to->next;
736 while (size) {
737 if (!next) {
738 /* this is a BUG */
739 return -ENOMEM;
740 }
741
742 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
743 block = next->buf;
744 memcpy(block->data, from, copy);
745 from += copy;
746 size -= copy;
747 next = next->next;
748 }
749
750 return 0;
751}
752
753static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
754{
755 struct mlx5_cmd_prot_block *block;
756 struct mlx5_cmd_mailbox *next;
757 int copy;
758
759 if (!to || !from)
760 return -ENOMEM;
761
762 copy = min_t(int, size, sizeof(from->first.data));
763 memcpy(to, from->first.data, copy);
764 size -= copy;
765 to += copy;
766
767 next = from->next;
768 while (size) {
769 if (!next) {
770 /* this is a BUG */
771 return -ENOMEM;
772 }
773
774 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
775 block = next->buf;
776 if (xor8_buf(block, sizeof(*block)) != 0xff)
777 return -EINVAL;
778
779 memcpy(to, block->data, copy);
780 to += copy;
781 size -= copy;
782 next = next->next;
783 }
784
785 return 0;
786}
787
788static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
789 gfp_t flags)
790{
791 struct mlx5_cmd_mailbox *mailbox;
792
793 mailbox = kmalloc(sizeof(*mailbox), flags);
794 if (!mailbox)
795 return ERR_PTR(-ENOMEM);
796
797 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
798 &mailbox->dma);
799 if (!mailbox->buf) {
800 mlx5_core_dbg(dev, "failed allocation\n");
801 kfree(mailbox);
802 return ERR_PTR(-ENOMEM);
803 }
804 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
805 mailbox->next = NULL;
806
807 return mailbox;
808}
809
810static void free_cmd_box(struct mlx5_core_dev *dev,
811 struct mlx5_cmd_mailbox *mailbox)
812{
813 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
814 kfree(mailbox);
815}
816
817static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
818 gfp_t flags, int size)
819{
820 struct mlx5_cmd_mailbox *tmp, *head = NULL;
821 struct mlx5_cmd_prot_block *block;
822 struct mlx5_cmd_msg *msg;
823 int blen;
824 int err;
825 int n;
826 int i;
827
828 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
829 if (!msg)
830 return ERR_PTR(-ENOMEM);
831
832 blen = size - min_t(int, sizeof(msg->first.data), size);
833 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
834
835 for (i = 0; i < n; i++) {
836 tmp = alloc_cmd_box(dev, flags);
837 if (IS_ERR(tmp)) {
838 mlx5_core_warn(dev, "failed allocating block\n");
839 err = PTR_ERR(tmp);
840 goto err_alloc;
841 }
842
843 block = tmp->buf;
844 tmp->next = head;
845 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
846 block->block_num = cpu_to_be32(n - i - 1);
847 head = tmp;
848 }
849 msg->next = head;
850 msg->len = size;
851 return msg;
852
853err_alloc:
854 while (head) {
855 tmp = head->next;
856 free_cmd_box(dev, head);
857 head = tmp;
858 }
859 kfree(msg);
860
861 return ERR_PTR(err);
862}
863
864static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
865 struct mlx5_cmd_msg *msg)
866{
867 struct mlx5_cmd_mailbox *head = msg->next;
868 struct mlx5_cmd_mailbox *next;
869
870 while (head) {
871 next = head->next;
872 free_cmd_box(dev, head);
873 head = next;
874 }
875 kfree(msg);
876}
877
878static ssize_t data_write(struct file *filp, const char __user *buf,
879 size_t count, loff_t *pos)
880{
881 struct mlx5_core_dev *dev = filp->private_data;
882 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
883 void *ptr;
884 int err;
885
886 if (*pos != 0)
887 return -EINVAL;
888
889 kfree(dbg->in_msg);
890 dbg->in_msg = NULL;
891 dbg->inlen = 0;
892
893 ptr = kzalloc(count, GFP_KERNEL);
894 if (!ptr)
895 return -ENOMEM;
896
897 if (copy_from_user(ptr, buf, count)) {
Dan Carpenter5e631a02013-07-10 13:58:59 +0300898 err = -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300899 goto out;
900 }
901 dbg->in_msg = ptr;
902 dbg->inlen = count;
903
904 *pos = count;
905
906 return count;
907
908out:
909 kfree(ptr);
910 return err;
911}
912
913static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
914 loff_t *pos)
915{
916 struct mlx5_core_dev *dev = filp->private_data;
917 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
918 int copy;
919
920 if (*pos)
921 return 0;
922
923 if (!dbg->out_msg)
924 return -ENOMEM;
925
926 copy = min_t(int, count, dbg->outlen);
927 if (copy_to_user(buf, dbg->out_msg, copy))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300928 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300929
930 *pos += copy;
931
932 return copy;
933}
934
935static const struct file_operations dfops = {
936 .owner = THIS_MODULE,
937 .open = simple_open,
938 .write = data_write,
939 .read = data_read,
940};
941
942static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
943 loff_t *pos)
944{
945 struct mlx5_core_dev *dev = filp->private_data;
946 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
947 char outlen[8];
948 int err;
949
950 if (*pos)
951 return 0;
952
953 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
954 if (err < 0)
955 return err;
956
957 if (copy_to_user(buf, &outlen, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300958 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300959
960 *pos += err;
961
962 return err;
963}
964
965static ssize_t outlen_write(struct file *filp, const char __user *buf,
966 size_t count, loff_t *pos)
967{
968 struct mlx5_core_dev *dev = filp->private_data;
969 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
970 char outlen_str[8];
971 int outlen;
972 void *ptr;
973 int err;
974
975 if (*pos != 0 || count > 6)
976 return -EINVAL;
977
978 kfree(dbg->out_msg);
979 dbg->out_msg = NULL;
980 dbg->outlen = 0;
981
982 if (copy_from_user(outlen_str, buf, count))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300983 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300984
985 outlen_str[7] = 0;
986
987 err = sscanf(outlen_str, "%d", &outlen);
988 if (err < 0)
989 return err;
990
991 ptr = kzalloc(outlen, GFP_KERNEL);
992 if (!ptr)
993 return -ENOMEM;
994
995 dbg->out_msg = ptr;
996 dbg->outlen = outlen;
997
998 *pos = count;
999
1000 return count;
1001}
1002
1003static const struct file_operations olfops = {
1004 .owner = THIS_MODULE,
1005 .open = simple_open,
1006 .write = outlen_write,
1007 .read = outlen_read,
1008};
1009
1010static void set_wqname(struct mlx5_core_dev *dev)
1011{
1012 struct mlx5_cmd *cmd = &dev->cmd;
1013
1014 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1015 dev_name(&dev->pdev->dev));
1016}
1017
1018static void clean_debug_files(struct mlx5_core_dev *dev)
1019{
1020 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1021
1022 if (!mlx5_debugfs_root)
1023 return;
1024
1025 mlx5_cmdif_debugfs_cleanup(dev);
1026 debugfs_remove_recursive(dbg->dbg_root);
1027}
1028
1029static int create_debugfs_files(struct mlx5_core_dev *dev)
1030{
1031 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1032 int err = -ENOMEM;
1033
1034 if (!mlx5_debugfs_root)
1035 return 0;
1036
1037 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1038 if (!dbg->dbg_root)
1039 return err;
1040
1041 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1042 dev, &dfops);
1043 if (!dbg->dbg_in)
1044 goto err_dbg;
1045
1046 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1047 dev, &dfops);
1048 if (!dbg->dbg_out)
1049 goto err_dbg;
1050
1051 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1052 dev, &olfops);
1053 if (!dbg->dbg_outlen)
1054 goto err_dbg;
1055
1056 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1057 &dbg->status);
1058 if (!dbg->dbg_status)
1059 goto err_dbg;
1060
1061 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1062 if (!dbg->dbg_run)
1063 goto err_dbg;
1064
1065 mlx5_cmdif_debugfs_init(dev);
1066
1067 return 0;
1068
1069err_dbg:
1070 clean_debug_files(dev);
1071 return err;
1072}
1073
1074void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1075{
1076 struct mlx5_cmd *cmd = &dev->cmd;
1077 int i;
1078
1079 for (i = 0; i < cmd->max_reg_cmds; i++)
1080 down(&cmd->sem);
1081
1082 down(&cmd->pages_sem);
1083
1084 flush_workqueue(cmd->wq);
1085
1086 cmd->mode = CMD_MODE_EVENTS;
1087
1088 up(&cmd->pages_sem);
1089 for (i = 0; i < cmd->max_reg_cmds; i++)
1090 up(&cmd->sem);
1091}
1092
1093void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1094{
1095 struct mlx5_cmd *cmd = &dev->cmd;
1096 int i;
1097
1098 for (i = 0; i < cmd->max_reg_cmds; i++)
1099 down(&cmd->sem);
1100
1101 down(&cmd->pages_sem);
1102
1103 flush_workqueue(cmd->wq);
1104 cmd->mode = CMD_MODE_POLLING;
1105
1106 up(&cmd->pages_sem);
1107 for (i = 0; i < cmd->max_reg_cmds; i++)
1108 up(&cmd->sem);
1109}
1110
1111void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1112{
1113 struct mlx5_cmd *cmd = &dev->cmd;
1114 struct mlx5_cmd_work_ent *ent;
1115 mlx5_cmd_cbk_t callback;
1116 void *context;
1117 int err;
1118 int i;
1119
1120 for (i = 0; i < (1 << cmd->log_sz); i++) {
1121 if (test_bit(i, &vector)) {
Dan Carpenter11940c82013-07-22 11:02:01 +03001122 struct semaphore *sem;
1123
Eli Cohene126ba92013-07-07 17:25:49 +03001124 ent = cmd->ent_arr[i];
Dan Carpenter11940c82013-07-22 11:02:01 +03001125 if (ent->page_queue)
1126 sem = &cmd->pages_sem;
1127 else
1128 sem = &cmd->sem;
Eli Cohene126ba92013-07-07 17:25:49 +03001129 ktime_get_ts(&ent->ts2);
1130 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1131 dump_command(dev, ent, 0);
1132 if (!ent->ret) {
1133 if (!cmd->checksum_disabled)
1134 ent->ret = verify_signature(ent);
1135 else
1136 ent->ret = 0;
1137 ent->status = ent->lay->status_own >> 1;
1138 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1139 ent->ret, deliv_status_to_str(ent->status), ent->status);
1140 }
1141 free_ent(cmd, ent->idx);
1142 if (ent->callback) {
1143 callback = ent->callback;
1144 context = ent->context;
1145 err = ent->ret;
1146 free_cmd(ent);
1147 callback(err, context);
1148 } else {
1149 complete(&ent->done);
1150 }
Dan Carpenter11940c82013-07-22 11:02:01 +03001151 up(sem);
Eli Cohene126ba92013-07-07 17:25:49 +03001152 }
1153 }
1154}
1155EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1156
1157static int status_to_err(u8 status)
1158{
1159 return status ? -1 : 0; /* TBD more meaningful codes */
1160}
1161
1162static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1163{
1164 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1165 struct mlx5_cmd *cmd = &dev->cmd;
1166 struct cache_ent *ent = NULL;
1167
1168 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1169 ent = &cmd->cache.large;
1170 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1171 ent = &cmd->cache.med;
1172
1173 if (ent) {
1174 spin_lock(&ent->lock);
1175 if (!list_empty(&ent->head)) {
1176 msg = list_entry(ent->head.next, typeof(*msg), list);
1177 /* For cached lists, we must explicitly state what is
1178 * the real size
1179 */
1180 msg->len = in_size;
1181 list_del(&msg->list);
1182 }
1183 spin_unlock(&ent->lock);
1184 }
1185
1186 if (IS_ERR(msg))
1187 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
1188
1189 return msg;
1190}
1191
1192static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1193{
1194 if (msg->cache) {
1195 spin_lock(&msg->cache->lock);
1196 list_add_tail(&msg->list, &msg->cache->head);
1197 spin_unlock(&msg->cache->lock);
1198 } else {
1199 mlx5_free_cmd_msg(dev, msg);
1200 }
1201}
1202
1203static int is_manage_pages(struct mlx5_inbox_hdr *in)
1204{
1205 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1206}
1207
1208int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1209 int out_size)
1210{
1211 struct mlx5_cmd_msg *inb;
1212 struct mlx5_cmd_msg *outb;
1213 int pages_queue;
1214 int err;
1215 u8 status = 0;
1216
1217 pages_queue = is_manage_pages(in);
1218
1219 inb = alloc_msg(dev, in_size);
1220 if (IS_ERR(inb)) {
1221 err = PTR_ERR(inb);
1222 return err;
1223 }
1224
1225 err = mlx5_copy_to_msg(inb, in, in_size);
1226 if (err) {
1227 mlx5_core_warn(dev, "err %d\n", err);
1228 goto out_in;
1229 }
1230
1231 outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
1232 if (IS_ERR(outb)) {
1233 err = PTR_ERR(outb);
1234 goto out_in;
1235 }
1236
1237 err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
1238 if (err)
1239 goto out_out;
1240
1241 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1242 if (status) {
1243 err = status_to_err(status);
1244 goto out_out;
1245 }
1246
1247 err = mlx5_copy_from_msg(out, outb, out_size);
1248
1249out_out:
1250 mlx5_free_cmd_msg(dev, outb);
1251
1252out_in:
1253 free_msg(dev, inb);
1254 return err;
1255}
1256EXPORT_SYMBOL(mlx5_cmd_exec);
1257
1258static void destroy_msg_cache(struct mlx5_core_dev *dev)
1259{
1260 struct mlx5_cmd *cmd = &dev->cmd;
1261 struct mlx5_cmd_msg *msg;
1262 struct mlx5_cmd_msg *n;
1263
1264 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1265 list_del(&msg->list);
1266 mlx5_free_cmd_msg(dev, msg);
1267 }
1268
1269 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1270 list_del(&msg->list);
1271 mlx5_free_cmd_msg(dev, msg);
1272 }
1273}
1274
1275static int create_msg_cache(struct mlx5_core_dev *dev)
1276{
1277 struct mlx5_cmd *cmd = &dev->cmd;
1278 struct mlx5_cmd_msg *msg;
1279 int err;
1280 int i;
1281
1282 spin_lock_init(&cmd->cache.large.lock);
1283 INIT_LIST_HEAD(&cmd->cache.large.head);
1284 spin_lock_init(&cmd->cache.med.lock);
1285 INIT_LIST_HEAD(&cmd->cache.med.head);
1286
1287 for (i = 0; i < NUM_LONG_LISTS; i++) {
1288 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1289 if (IS_ERR(msg)) {
1290 err = PTR_ERR(msg);
1291 goto ex_err;
1292 }
1293 msg->cache = &cmd->cache.large;
1294 list_add_tail(&msg->list, &cmd->cache.large.head);
1295 }
1296
1297 for (i = 0; i < NUM_MED_LISTS; i++) {
1298 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1299 if (IS_ERR(msg)) {
1300 err = PTR_ERR(msg);
1301 goto ex_err;
1302 }
1303 msg->cache = &cmd->cache.med;
1304 list_add_tail(&msg->list, &cmd->cache.med.head);
1305 }
1306
1307 return 0;
1308
1309ex_err:
1310 destroy_msg_cache(dev);
1311 return err;
1312}
1313
1314int mlx5_cmd_init(struct mlx5_core_dev *dev)
1315{
1316 int size = sizeof(struct mlx5_cmd_prot_block);
1317 int align = roundup_pow_of_two(size);
1318 struct mlx5_cmd *cmd = &dev->cmd;
1319 u32 cmd_h, cmd_l;
1320 u16 cmd_if_rev;
1321 int err;
1322 int i;
1323
1324 cmd_if_rev = cmdif_rev(dev);
1325 if (cmd_if_rev != CMD_IF_REV) {
1326 dev_err(&dev->pdev->dev,
1327 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1328 CMD_IF_REV, cmd_if_rev);
1329 return -EINVAL;
1330 }
1331
1332 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1333 if (!cmd->pool)
1334 return -ENOMEM;
1335
1336 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1337 if (!cmd->cmd_buf) {
1338 err = -ENOMEM;
1339 goto err_free_pool;
1340 }
1341 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1342 DMA_BIDIRECTIONAL);
1343 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1344 err = -ENOMEM;
1345 goto err_free;
1346 }
1347
1348 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1349 cmd->log_sz = cmd_l >> 4 & 0xf;
1350 cmd->log_stride = cmd_l & 0xf;
1351 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1352 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1353 1 << cmd->log_sz);
1354 err = -EINVAL;
1355 goto err_map;
1356 }
1357
1358 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1359 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1360 err = -EINVAL;
1361 goto err_map;
1362 }
1363
1364 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1365 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1366
1367 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1368 if (cmd->cmdif_rev > CMD_IF_REV) {
1369 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1370 CMD_IF_REV, cmd->cmdif_rev);
1371 err = -ENOTSUPP;
1372 goto err_map;
1373 }
1374
1375 spin_lock_init(&cmd->alloc_lock);
1376 spin_lock_init(&cmd->token_lock);
1377 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1378 spin_lock_init(&cmd->stats[i].lock);
1379
1380 sema_init(&cmd->sem, cmd->max_reg_cmds);
1381 sema_init(&cmd->pages_sem, 1);
1382
1383 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1384 cmd_l = (u32)(cmd->dma);
1385 if (cmd_l & 0xfff) {
1386 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1387 err = -ENOMEM;
1388 goto err_map;
1389 }
1390
1391 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1392 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1393
1394 /* Make sure firmware sees the complete address before we proceed */
1395 wmb();
1396
1397 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1398
1399 cmd->mode = CMD_MODE_POLLING;
1400
1401 err = create_msg_cache(dev);
1402 if (err) {
1403 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1404 goto err_map;
1405 }
1406
1407 set_wqname(dev);
1408 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1409 if (!cmd->wq) {
1410 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1411 err = -ENOMEM;
1412 goto err_cache;
1413 }
1414
1415 err = create_debugfs_files(dev);
1416 if (err) {
1417 err = -ENOMEM;
1418 goto err_wq;
1419 }
1420
1421 return 0;
1422
1423err_wq:
1424 destroy_workqueue(cmd->wq);
1425
1426err_cache:
1427 destroy_msg_cache(dev);
1428
1429err_map:
1430 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1431 DMA_BIDIRECTIONAL);
1432err_free:
1433 free_pages((unsigned long)cmd->cmd_buf, 0);
1434
1435err_free_pool:
1436 pci_pool_destroy(cmd->pool);
1437
1438 return err;
1439}
1440EXPORT_SYMBOL(mlx5_cmd_init);
1441
1442void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1443{
1444 struct mlx5_cmd *cmd = &dev->cmd;
1445
1446 clean_debug_files(dev);
1447 destroy_workqueue(cmd->wq);
1448 destroy_msg_cache(dev);
1449 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1450 DMA_BIDIRECTIONAL);
1451 free_pages((unsigned long)cmd->cmd_buf, 0);
1452 pci_pool_destroy(cmd->pool);
1453}
1454EXPORT_SYMBOL(mlx5_cmd_cleanup);
1455
1456static const char *cmd_status_str(u8 status)
1457{
1458 switch (status) {
1459 case MLX5_CMD_STAT_OK:
1460 return "OK";
1461 case MLX5_CMD_STAT_INT_ERR:
1462 return "internal error";
1463 case MLX5_CMD_STAT_BAD_OP_ERR:
1464 return "bad operation";
1465 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1466 return "bad parameter";
1467 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1468 return "bad system state";
1469 case MLX5_CMD_STAT_BAD_RES_ERR:
1470 return "bad resource";
1471 case MLX5_CMD_STAT_RES_BUSY:
1472 return "resource busy";
1473 case MLX5_CMD_STAT_LIM_ERR:
1474 return "limits exceeded";
1475 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1476 return "bad resource state";
1477 case MLX5_CMD_STAT_IX_ERR:
1478 return "bad index";
1479 case MLX5_CMD_STAT_NO_RES_ERR:
1480 return "no resources";
1481 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1482 return "bad input length";
1483 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1484 return "bad output length";
1485 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1486 return "bad QP state";
1487 case MLX5_CMD_STAT_BAD_PKT_ERR:
1488 return "bad packet (discarded)";
1489 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1490 return "bad size too many outstanding CQEs";
1491 default:
1492 return "unknown status";
1493 }
1494}
1495
1496int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1497{
1498 if (!hdr->status)
1499 return 0;
1500
1501 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1502 cmd_status_str(hdr->status), hdr->status,
1503 be32_to_cpu(hdr->syndrome));
1504
1505 switch (hdr->status) {
1506 case MLX5_CMD_STAT_OK: return 0;
1507 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1508 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1509 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1510 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1511 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1513 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
1514 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1515 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1516 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1517 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1518 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1519 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1520 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1521 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1522 default: return -EIO;
1523 }
1524}