blob: 8675d26a678ba39cce44359453681f65e654022c [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/delay.h>
41#include <linux/random.h>
42#include <linux/io-mapping.h>
43#include <linux/mlx5/driver.h>
44#include <linux/debugfs.h>
45
46#include "mlx5_core.h"
47
48enum {
Moshe Lazer0a324f312013-08-14 17:46:48 +030049 CMD_IF_REV = 5,
Eli Cohene126ba92013-07-07 17:25:49 +030050};
51
52enum {
53 CMD_MODE_POLLING,
54 CMD_MODE_EVENTS
55};
56
57enum {
58 NUM_LONG_LISTS = 2,
59 NUM_MED_LISTS = 64,
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
63};
64
65enum {
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
77};
78
79enum {
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
96};
97
98static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
Eli Cohen746b5582013-10-23 09:53:14 +0300101 void *uout, int uout_size,
Eli Cohene126ba92013-07-07 17:25:49 +0300102 mlx5_cmd_cbk_t cbk,
103 void *context, int page_queue)
104{
105 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
106 struct mlx5_cmd_work_ent *ent;
107
108 ent = kzalloc(sizeof(*ent), alloc_flags);
109 if (!ent)
110 return ERR_PTR(-ENOMEM);
111
112 ent->in = in;
113 ent->out = out;
Eli Cohen746b5582013-10-23 09:53:14 +0300114 ent->uout = uout;
115 ent->uout_size = uout_size;
Eli Cohene126ba92013-07-07 17:25:49 +0300116 ent->callback = cbk;
117 ent->context = context;
118 ent->cmd = cmd;
119 ent->page_queue = page_queue;
120
121 return ent;
122}
123
124static u8 alloc_token(struct mlx5_cmd *cmd)
125{
126 u8 token;
127
128 spin_lock(&cmd->token_lock);
129 token = cmd->token++ % 255 + 1;
130 spin_unlock(&cmd->token_lock);
131
132 return token;
133}
134
135static int alloc_ent(struct mlx5_cmd *cmd)
136{
137 unsigned long flags;
138 int ret;
139
140 spin_lock_irqsave(&cmd->alloc_lock, flags);
141 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
142 if (ret < cmd->max_reg_cmds)
143 clear_bit(ret, &cmd->bitmask);
144 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
145
146 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
147}
148
149static void free_ent(struct mlx5_cmd *cmd, int idx)
150{
151 unsigned long flags;
152
153 spin_lock_irqsave(&cmd->alloc_lock, flags);
154 set_bit(idx, &cmd->bitmask);
155 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
156}
157
158static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
159{
160 return cmd->cmd_buf + (idx << cmd->log_stride);
161}
162
163static u8 xor8_buf(void *buf, int len)
164{
165 u8 *ptr = buf;
166 u8 sum = 0;
167 int i;
168
169 for (i = 0; i < len; i++)
170 sum ^= ptr[i];
171
172 return sum;
173}
174
175static int verify_block_sig(struct mlx5_cmd_prot_block *block)
176{
177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
178 return -EINVAL;
179
180 if (xor8_buf(block, sizeof(*block)) != 0xff)
181 return -EINVAL;
182
183 return 0;
184}
185
Eli Cohenc1868b82013-09-11 16:35:25 +0300186static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
187 int csum)
Eli Cohene126ba92013-07-07 17:25:49 +0300188{
189 block->token = token;
Eli Cohenc1868b82013-09-11 16:35:25 +0300190 if (csum) {
191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
192 sizeof(block->data) - 2);
193 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
194 }
Eli Cohene126ba92013-07-07 17:25:49 +0300195}
196
Eli Cohenc1868b82013-09-11 16:35:25 +0300197static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
Eli Cohene126ba92013-07-07 17:25:49 +0300198{
199 struct mlx5_cmd_mailbox *next = msg->next;
200
201 while (next) {
Eli Cohenc1868b82013-09-11 16:35:25 +0300202 calc_block_sig(next->buf, token, csum);
Eli Cohene126ba92013-07-07 17:25:49 +0300203 next = next->next;
204 }
205}
206
Eli Cohenc1868b82013-09-11 16:35:25 +0300207static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
Eli Cohene126ba92013-07-07 17:25:49 +0300208{
209 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
Eli Cohenc1868b82013-09-11 16:35:25 +0300210 calc_chain_sig(ent->in, ent->token, csum);
211 calc_chain_sig(ent->out, ent->token, csum);
Eli Cohene126ba92013-07-07 17:25:49 +0300212}
213
214static void poll_timeout(struct mlx5_cmd_work_ent *ent)
215{
216 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
217 u8 own;
218
219 do {
220 own = ent->lay->status_own;
221 if (!(own & CMD_OWNER_HW)) {
222 ent->ret = 0;
223 return;
224 }
225 usleep_range(5000, 10000);
226 } while (time_before(jiffies, poll_end));
227
228 ent->ret = -ETIMEDOUT;
229}
230
231static void free_cmd(struct mlx5_cmd_work_ent *ent)
232{
233 kfree(ent);
234}
235
236
237static int verify_signature(struct mlx5_cmd_work_ent *ent)
238{
239 struct mlx5_cmd_mailbox *next = ent->out->next;
240 int err;
241 u8 sig;
242
243 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
244 if (sig != 0xff)
245 return -EINVAL;
246
247 while (next) {
248 err = verify_block_sig(next->buf);
249 if (err)
250 return err;
251
252 next = next->next;
253 }
254
255 return 0;
256}
257
258static void dump_buf(void *buf, int size, int data_only, int offset)
259{
260 __be32 *p = buf;
261 int i;
262
263 for (i = 0; i < size; i += 16) {
264 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
265 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
266 be32_to_cpu(p[3]));
267 p += 4;
268 offset += 16;
269 }
270 if (!data_only)
271 pr_debug("\n");
272}
273
274const char *mlx5_command_str(int command)
275{
276 switch (command) {
277 case MLX5_CMD_OP_QUERY_HCA_CAP:
278 return "QUERY_HCA_CAP";
279
280 case MLX5_CMD_OP_SET_HCA_CAP:
281 return "SET_HCA_CAP";
282
283 case MLX5_CMD_OP_QUERY_ADAPTER:
284 return "QUERY_ADAPTER";
285
286 case MLX5_CMD_OP_INIT_HCA:
287 return "INIT_HCA";
288
289 case MLX5_CMD_OP_TEARDOWN_HCA:
290 return "TEARDOWN_HCA";
291
Eli Cohencd23b142013-07-18 15:31:08 +0300292 case MLX5_CMD_OP_ENABLE_HCA:
293 return "MLX5_CMD_OP_ENABLE_HCA";
294
295 case MLX5_CMD_OP_DISABLE_HCA:
296 return "MLX5_CMD_OP_DISABLE_HCA";
297
Eli Cohene126ba92013-07-07 17:25:49 +0300298 case MLX5_CMD_OP_QUERY_PAGES:
299 return "QUERY_PAGES";
300
301 case MLX5_CMD_OP_MANAGE_PAGES:
302 return "MANAGE_PAGES";
303
304 case MLX5_CMD_OP_CREATE_MKEY:
305 return "CREATE_MKEY";
306
307 case MLX5_CMD_OP_QUERY_MKEY:
308 return "QUERY_MKEY";
309
310 case MLX5_CMD_OP_DESTROY_MKEY:
311 return "DESTROY_MKEY";
312
313 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
314 return "QUERY_SPECIAL_CONTEXTS";
315
316 case MLX5_CMD_OP_CREATE_EQ:
317 return "CREATE_EQ";
318
319 case MLX5_CMD_OP_DESTROY_EQ:
320 return "DESTROY_EQ";
321
322 case MLX5_CMD_OP_QUERY_EQ:
323 return "QUERY_EQ";
324
325 case MLX5_CMD_OP_CREATE_CQ:
326 return "CREATE_CQ";
327
328 case MLX5_CMD_OP_DESTROY_CQ:
329 return "DESTROY_CQ";
330
331 case MLX5_CMD_OP_QUERY_CQ:
332 return "QUERY_CQ";
333
334 case MLX5_CMD_OP_MODIFY_CQ:
335 return "MODIFY_CQ";
336
337 case MLX5_CMD_OP_CREATE_QP:
338 return "CREATE_QP";
339
340 case MLX5_CMD_OP_DESTROY_QP:
341 return "DESTROY_QP";
342
343 case MLX5_CMD_OP_RST2INIT_QP:
344 return "RST2INIT_QP";
345
346 case MLX5_CMD_OP_INIT2RTR_QP:
347 return "INIT2RTR_QP";
348
349 case MLX5_CMD_OP_RTR2RTS_QP:
350 return "RTR2RTS_QP";
351
352 case MLX5_CMD_OP_RTS2RTS_QP:
353 return "RTS2RTS_QP";
354
355 case MLX5_CMD_OP_SQERR2RTS_QP:
356 return "SQERR2RTS_QP";
357
358 case MLX5_CMD_OP_2ERR_QP:
359 return "2ERR_QP";
360
361 case MLX5_CMD_OP_RTS2SQD_QP:
362 return "RTS2SQD_QP";
363
364 case MLX5_CMD_OP_SQD2RTS_QP:
365 return "SQD2RTS_QP";
366
367 case MLX5_CMD_OP_2RST_QP:
368 return "2RST_QP";
369
370 case MLX5_CMD_OP_QUERY_QP:
371 return "QUERY_QP";
372
373 case MLX5_CMD_OP_CONF_SQP:
374 return "CONF_SQP";
375
376 case MLX5_CMD_OP_MAD_IFC:
377 return "MAD_IFC";
378
379 case MLX5_CMD_OP_INIT2INIT_QP:
380 return "INIT2INIT_QP";
381
382 case MLX5_CMD_OP_SUSPEND_QP:
383 return "SUSPEND_QP";
384
385 case MLX5_CMD_OP_UNSUSPEND_QP:
386 return "UNSUSPEND_QP";
387
388 case MLX5_CMD_OP_SQD2SQD_QP:
389 return "SQD2SQD_QP";
390
391 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
392 return "ALLOC_QP_COUNTER_SET";
393
394 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
395 return "DEALLOC_QP_COUNTER_SET";
396
397 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
398 return "QUERY_QP_COUNTER_SET";
399
400 case MLX5_CMD_OP_CREATE_PSV:
401 return "CREATE_PSV";
402
403 case MLX5_CMD_OP_DESTROY_PSV:
404 return "DESTROY_PSV";
405
406 case MLX5_CMD_OP_QUERY_PSV:
407 return "QUERY_PSV";
408
409 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
410 return "QUERY_SIG_RULE_TABLE";
411
412 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
413 return "QUERY_BLOCK_SIZE_TABLE";
414
415 case MLX5_CMD_OP_CREATE_SRQ:
416 return "CREATE_SRQ";
417
418 case MLX5_CMD_OP_DESTROY_SRQ:
419 return "DESTROY_SRQ";
420
421 case MLX5_CMD_OP_QUERY_SRQ:
422 return "QUERY_SRQ";
423
424 case MLX5_CMD_OP_ARM_RQ:
425 return "ARM_RQ";
426
427 case MLX5_CMD_OP_RESIZE_SRQ:
428 return "RESIZE_SRQ";
429
430 case MLX5_CMD_OP_ALLOC_PD:
431 return "ALLOC_PD";
432
433 case MLX5_CMD_OP_DEALLOC_PD:
434 return "DEALLOC_PD";
435
436 case MLX5_CMD_OP_ALLOC_UAR:
437 return "ALLOC_UAR";
438
439 case MLX5_CMD_OP_DEALLOC_UAR:
440 return "DEALLOC_UAR";
441
442 case MLX5_CMD_OP_ATTACH_TO_MCG:
443 return "ATTACH_TO_MCG";
444
445 case MLX5_CMD_OP_DETACH_FROM_MCG:
446 return "DETACH_FROM_MCG";
447
448 case MLX5_CMD_OP_ALLOC_XRCD:
449 return "ALLOC_XRCD";
450
451 case MLX5_CMD_OP_DEALLOC_XRCD:
452 return "DEALLOC_XRCD";
453
454 case MLX5_CMD_OP_ACCESS_REG:
455 return "MLX5_CMD_OP_ACCESS_REG";
456
457 default: return "unknown command opcode";
458 }
459}
460
461static void dump_command(struct mlx5_core_dev *dev,
462 struct mlx5_cmd_work_ent *ent, int input)
463{
464 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
465 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
466 struct mlx5_cmd_mailbox *next = msg->next;
467 int data_only;
468 int offset = 0;
469 int dump_len;
470
471 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
472
473 if (data_only)
474 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
475 "dump command data %s(0x%x) %s\n",
476 mlx5_command_str(op), op,
477 input ? "INPUT" : "OUTPUT");
478 else
479 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
480 mlx5_command_str(op), op,
481 input ? "INPUT" : "OUTPUT");
482
483 if (data_only) {
484 if (input) {
485 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
486 offset += sizeof(ent->lay->in);
487 } else {
488 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
489 offset += sizeof(ent->lay->out);
490 }
491 } else {
492 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
493 offset += sizeof(*ent->lay);
494 }
495
496 while (next && offset < msg->len) {
497 if (data_only) {
498 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
499 dump_buf(next->buf, dump_len, 1, offset);
500 offset += MLX5_CMD_DATA_BLOCK_SIZE;
501 } else {
502 mlx5_core_dbg(dev, "command block:\n");
503 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
504 offset += sizeof(struct mlx5_cmd_prot_block);
505 }
506 next = next->next;
507 }
508
509 if (data_only)
510 pr_debug("\n");
511}
512
513static void cmd_work_handler(struct work_struct *work)
514{
515 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
516 struct mlx5_cmd *cmd = ent->cmd;
517 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
518 struct mlx5_cmd_layout *lay;
519 struct semaphore *sem;
520
521 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
522 down(sem);
523 if (!ent->page_queue) {
524 ent->idx = alloc_ent(cmd);
525 if (ent->idx < 0) {
526 mlx5_core_err(dev, "failed to allocate command entry\n");
527 up(sem);
528 return;
529 }
530 } else {
531 ent->idx = cmd->max_reg_cmds;
532 }
533
534 ent->token = alloc_token(cmd);
535 cmd->ent_arr[ent->idx] = ent;
536 lay = get_inst(cmd, ent->idx);
537 ent->lay = lay;
538 memset(lay, 0, sizeof(*lay));
539 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
Eli Cohen746b5582013-10-23 09:53:14 +0300540 ent->op = be32_to_cpu(lay->in[0]) >> 16;
Eli Cohene126ba92013-07-07 17:25:49 +0300541 if (ent->in->next)
542 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
543 lay->inlen = cpu_to_be32(ent->in->len);
544 if (ent->out->next)
545 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
546 lay->outlen = cpu_to_be32(ent->out->len);
547 lay->type = MLX5_PCI_CMD_XPORT;
548 lay->token = ent->token;
549 lay->status_own = CMD_OWNER_HW;
Eli Cohenc1868b82013-09-11 16:35:25 +0300550 set_signature(ent, !cmd->checksum_disabled);
Eli Cohene126ba92013-07-07 17:25:49 +0300551 dump_command(dev, ent, 1);
552 ktime_get_ts(&ent->ts1);
553
554 /* ring doorbell after the descriptor is valid */
555 wmb();
556 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
557 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
558 mmiowb();
559 if (cmd->mode == CMD_MODE_POLLING) {
560 poll_timeout(ent);
561 /* make sure we read the descriptor after ownership is SW */
562 rmb();
563 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
564 }
565}
566
567static const char *deliv_status_to_str(u8 status)
568{
569 switch (status) {
570 case MLX5_CMD_DELIVERY_STAT_OK:
571 return "no errors";
572 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
573 return "signature error";
574 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
575 return "token error";
576 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
577 return "bad block number";
578 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
579 return "output pointer not aligned to block size";
580 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
581 return "input pointer not aligned to block size";
582 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
583 return "firmware internal error";
584 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
585 return "command input length error";
586 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
587 return "command ouput length error";
588 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
589 return "reserved fields not cleared";
590 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
591 return "bad command descriptor type";
592 default:
593 return "unknown status code";
594 }
595}
596
597static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
598{
599 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
600
601 return be16_to_cpu(hdr->opcode);
602}
603
604static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
605{
606 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
607 struct mlx5_cmd *cmd = &dev->cmd;
608 int err;
609
610 if (cmd->mode == CMD_MODE_POLLING) {
611 wait_for_completion(&ent->done);
612 err = ent->ret;
613 } else {
614 if (!wait_for_completion_timeout(&ent->done, timeout))
615 err = -ETIMEDOUT;
616 else
617 err = 0;
618 }
619 if (err == -ETIMEDOUT) {
620 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
621 mlx5_command_str(msg_to_opcode(ent->in)),
622 msg_to_opcode(ent->in));
623 }
624 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
625 deliv_status_to_str(ent->status), ent->status);
626
627 return err;
628}
629
630/* Notes:
631 * 1. Callback functions may not sleep
632 * 2. page queue commands do not support asynchrous completion
633 */
634static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
Eli Cohen746b5582013-10-23 09:53:14 +0300635 struct mlx5_cmd_msg *out, void *uout, int uout_size,
636 mlx5_cmd_cbk_t callback,
Eli Cohene126ba92013-07-07 17:25:49 +0300637 void *context, int page_queue, u8 *status)
638{
639 struct mlx5_cmd *cmd = &dev->cmd;
640 struct mlx5_cmd_work_ent *ent;
641 ktime_t t1, t2, delta;
642 struct mlx5_cmd_stats *stats;
643 int err = 0;
644 s64 ds;
645 u16 op;
646
647 if (callback && page_queue)
648 return -EINVAL;
649
Eli Cohen746b5582013-10-23 09:53:14 +0300650 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
651 page_queue);
Eli Cohene126ba92013-07-07 17:25:49 +0300652 if (IS_ERR(ent))
653 return PTR_ERR(ent);
654
655 if (!callback)
656 init_completion(&ent->done);
657
658 INIT_WORK(&ent->work, cmd_work_handler);
659 if (page_queue) {
660 cmd_work_handler(&ent->work);
661 } else if (!queue_work(cmd->wq, &ent->work)) {
662 mlx5_core_warn(dev, "failed to queue work\n");
663 err = -ENOMEM;
664 goto out_free;
665 }
666
667 if (!callback) {
668 err = wait_func(dev, ent);
669 if (err == -ETIMEDOUT)
670 goto out;
671
672 t1 = timespec_to_ktime(ent->ts1);
673 t2 = timespec_to_ktime(ent->ts2);
674 delta = ktime_sub(t2, t1);
675 ds = ktime_to_ns(delta);
676 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
677 if (op < ARRAY_SIZE(cmd->stats)) {
678 stats = &cmd->stats[op];
Eli Cohen746b5582013-10-23 09:53:14 +0300679 spin_lock_irq(&stats->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300680 stats->sum += ds;
681 ++stats->n;
Eli Cohen746b5582013-10-23 09:53:14 +0300682 spin_unlock_irq(&stats->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300683 }
684 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
685 "fw exec time for %s is %lld nsec\n",
686 mlx5_command_str(op), ds);
687 *status = ent->status;
688 free_cmd(ent);
689 }
690
691 return err;
692
693out_free:
694 free_cmd(ent);
695out:
696 return err;
697}
698
699static ssize_t dbg_write(struct file *filp, const char __user *buf,
700 size_t count, loff_t *pos)
701{
702 struct mlx5_core_dev *dev = filp->private_data;
703 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
704 char lbuf[3];
705 int err;
706
707 if (!dbg->in_msg || !dbg->out_msg)
708 return -ENOMEM;
709
710 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300711 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300712
713 lbuf[sizeof(lbuf) - 1] = 0;
714
715 if (strcmp(lbuf, "go"))
716 return -EINVAL;
717
718 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
719
720 return err ? err : count;
721}
722
723
724static const struct file_operations fops = {
725 .owner = THIS_MODULE,
726 .open = simple_open,
727 .write = dbg_write,
728};
729
730static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
731{
732 struct mlx5_cmd_prot_block *block;
733 struct mlx5_cmd_mailbox *next;
734 int copy;
735
736 if (!to || !from)
737 return -ENOMEM;
738
739 copy = min_t(int, size, sizeof(to->first.data));
740 memcpy(to->first.data, from, copy);
741 size -= copy;
742 from += copy;
743
744 next = to->next;
745 while (size) {
746 if (!next) {
747 /* this is a BUG */
748 return -ENOMEM;
749 }
750
751 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
752 block = next->buf;
753 memcpy(block->data, from, copy);
754 from += copy;
755 size -= copy;
756 next = next->next;
757 }
758
759 return 0;
760}
761
762static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
763{
764 struct mlx5_cmd_prot_block *block;
765 struct mlx5_cmd_mailbox *next;
766 int copy;
767
768 if (!to || !from)
769 return -ENOMEM;
770
771 copy = min_t(int, size, sizeof(from->first.data));
772 memcpy(to, from->first.data, copy);
773 size -= copy;
774 to += copy;
775
776 next = from->next;
777 while (size) {
778 if (!next) {
779 /* this is a BUG */
780 return -ENOMEM;
781 }
782
783 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
784 block = next->buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300785
786 memcpy(to, block->data, copy);
787 to += copy;
788 size -= copy;
789 next = next->next;
790 }
791
792 return 0;
793}
794
795static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
796 gfp_t flags)
797{
798 struct mlx5_cmd_mailbox *mailbox;
799
800 mailbox = kmalloc(sizeof(*mailbox), flags);
801 if (!mailbox)
802 return ERR_PTR(-ENOMEM);
803
804 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
805 &mailbox->dma);
806 if (!mailbox->buf) {
807 mlx5_core_dbg(dev, "failed allocation\n");
808 kfree(mailbox);
809 return ERR_PTR(-ENOMEM);
810 }
811 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
812 mailbox->next = NULL;
813
814 return mailbox;
815}
816
817static void free_cmd_box(struct mlx5_core_dev *dev,
818 struct mlx5_cmd_mailbox *mailbox)
819{
820 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
821 kfree(mailbox);
822}
823
824static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
825 gfp_t flags, int size)
826{
827 struct mlx5_cmd_mailbox *tmp, *head = NULL;
828 struct mlx5_cmd_prot_block *block;
829 struct mlx5_cmd_msg *msg;
830 int blen;
831 int err;
832 int n;
833 int i;
834
Eli Cohen746b5582013-10-23 09:53:14 +0300835 msg = kzalloc(sizeof(*msg), flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300836 if (!msg)
837 return ERR_PTR(-ENOMEM);
838
839 blen = size - min_t(int, sizeof(msg->first.data), size);
840 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
841
842 for (i = 0; i < n; i++) {
843 tmp = alloc_cmd_box(dev, flags);
844 if (IS_ERR(tmp)) {
845 mlx5_core_warn(dev, "failed allocating block\n");
846 err = PTR_ERR(tmp);
847 goto err_alloc;
848 }
849
850 block = tmp->buf;
851 tmp->next = head;
852 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
853 block->block_num = cpu_to_be32(n - i - 1);
854 head = tmp;
855 }
856 msg->next = head;
857 msg->len = size;
858 return msg;
859
860err_alloc:
861 while (head) {
862 tmp = head->next;
863 free_cmd_box(dev, head);
864 head = tmp;
865 }
866 kfree(msg);
867
868 return ERR_PTR(err);
869}
870
871static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
872 struct mlx5_cmd_msg *msg)
873{
874 struct mlx5_cmd_mailbox *head = msg->next;
875 struct mlx5_cmd_mailbox *next;
876
877 while (head) {
878 next = head->next;
879 free_cmd_box(dev, head);
880 head = next;
881 }
882 kfree(msg);
883}
884
885static ssize_t data_write(struct file *filp, const char __user *buf,
886 size_t count, loff_t *pos)
887{
888 struct mlx5_core_dev *dev = filp->private_data;
889 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
890 void *ptr;
891 int err;
892
893 if (*pos != 0)
894 return -EINVAL;
895
896 kfree(dbg->in_msg);
897 dbg->in_msg = NULL;
898 dbg->inlen = 0;
899
900 ptr = kzalloc(count, GFP_KERNEL);
901 if (!ptr)
902 return -ENOMEM;
903
904 if (copy_from_user(ptr, buf, count)) {
Dan Carpenter5e631a02013-07-10 13:58:59 +0300905 err = -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300906 goto out;
907 }
908 dbg->in_msg = ptr;
909 dbg->inlen = count;
910
911 *pos = count;
912
913 return count;
914
915out:
916 kfree(ptr);
917 return err;
918}
919
920static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
921 loff_t *pos)
922{
923 struct mlx5_core_dev *dev = filp->private_data;
924 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
925 int copy;
926
927 if (*pos)
928 return 0;
929
930 if (!dbg->out_msg)
931 return -ENOMEM;
932
933 copy = min_t(int, count, dbg->outlen);
934 if (copy_to_user(buf, dbg->out_msg, copy))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300935 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300936
937 *pos += copy;
938
939 return copy;
940}
941
942static const struct file_operations dfops = {
943 .owner = THIS_MODULE,
944 .open = simple_open,
945 .write = data_write,
946 .read = data_read,
947};
948
949static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
950 loff_t *pos)
951{
952 struct mlx5_core_dev *dev = filp->private_data;
953 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
954 char outlen[8];
955 int err;
956
957 if (*pos)
958 return 0;
959
960 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
961 if (err < 0)
962 return err;
963
964 if (copy_to_user(buf, &outlen, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300965 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300966
967 *pos += err;
968
969 return err;
970}
971
972static ssize_t outlen_write(struct file *filp, const char __user *buf,
973 size_t count, loff_t *pos)
974{
975 struct mlx5_core_dev *dev = filp->private_data;
976 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
977 char outlen_str[8];
978 int outlen;
979 void *ptr;
980 int err;
981
982 if (*pos != 0 || count > 6)
983 return -EINVAL;
984
985 kfree(dbg->out_msg);
986 dbg->out_msg = NULL;
987 dbg->outlen = 0;
988
989 if (copy_from_user(outlen_str, buf, count))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300990 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300991
992 outlen_str[7] = 0;
993
994 err = sscanf(outlen_str, "%d", &outlen);
995 if (err < 0)
996 return err;
997
998 ptr = kzalloc(outlen, GFP_KERNEL);
999 if (!ptr)
1000 return -ENOMEM;
1001
1002 dbg->out_msg = ptr;
1003 dbg->outlen = outlen;
1004
1005 *pos = count;
1006
1007 return count;
1008}
1009
1010static const struct file_operations olfops = {
1011 .owner = THIS_MODULE,
1012 .open = simple_open,
1013 .write = outlen_write,
1014 .read = outlen_read,
1015};
1016
1017static void set_wqname(struct mlx5_core_dev *dev)
1018{
1019 struct mlx5_cmd *cmd = &dev->cmd;
1020
1021 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1022 dev_name(&dev->pdev->dev));
1023}
1024
1025static void clean_debug_files(struct mlx5_core_dev *dev)
1026{
1027 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1028
1029 if (!mlx5_debugfs_root)
1030 return;
1031
1032 mlx5_cmdif_debugfs_cleanup(dev);
1033 debugfs_remove_recursive(dbg->dbg_root);
1034}
1035
1036static int create_debugfs_files(struct mlx5_core_dev *dev)
1037{
1038 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1039 int err = -ENOMEM;
1040
1041 if (!mlx5_debugfs_root)
1042 return 0;
1043
1044 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1045 if (!dbg->dbg_root)
1046 return err;
1047
1048 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1049 dev, &dfops);
1050 if (!dbg->dbg_in)
1051 goto err_dbg;
1052
1053 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1054 dev, &dfops);
1055 if (!dbg->dbg_out)
1056 goto err_dbg;
1057
1058 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1059 dev, &olfops);
1060 if (!dbg->dbg_outlen)
1061 goto err_dbg;
1062
1063 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1064 &dbg->status);
1065 if (!dbg->dbg_status)
1066 goto err_dbg;
1067
1068 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1069 if (!dbg->dbg_run)
1070 goto err_dbg;
1071
1072 mlx5_cmdif_debugfs_init(dev);
1073
1074 return 0;
1075
1076err_dbg:
1077 clean_debug_files(dev);
1078 return err;
1079}
1080
1081void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1082{
1083 struct mlx5_cmd *cmd = &dev->cmd;
1084 int i;
1085
1086 for (i = 0; i < cmd->max_reg_cmds; i++)
1087 down(&cmd->sem);
1088
1089 down(&cmd->pages_sem);
1090
1091 flush_workqueue(cmd->wq);
1092
1093 cmd->mode = CMD_MODE_EVENTS;
1094
1095 up(&cmd->pages_sem);
1096 for (i = 0; i < cmd->max_reg_cmds; i++)
1097 up(&cmd->sem);
1098}
1099
1100void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1101{
1102 struct mlx5_cmd *cmd = &dev->cmd;
1103 int i;
1104
1105 for (i = 0; i < cmd->max_reg_cmds; i++)
1106 down(&cmd->sem);
1107
1108 down(&cmd->pages_sem);
1109
1110 flush_workqueue(cmd->wq);
1111 cmd->mode = CMD_MODE_POLLING;
1112
1113 up(&cmd->pages_sem);
1114 for (i = 0; i < cmd->max_reg_cmds; i++)
1115 up(&cmd->sem);
1116}
1117
Eli Cohen746b5582013-10-23 09:53:14 +03001118static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1119{
1120 unsigned long flags;
1121
1122 if (msg->cache) {
1123 spin_lock_irqsave(&msg->cache->lock, flags);
1124 list_add_tail(&msg->list, &msg->cache->head);
1125 spin_unlock_irqrestore(&msg->cache->lock, flags);
1126 } else {
1127 mlx5_free_cmd_msg(dev, msg);
1128 }
1129}
1130
Eli Cohene126ba92013-07-07 17:25:49 +03001131void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1132{
1133 struct mlx5_cmd *cmd = &dev->cmd;
1134 struct mlx5_cmd_work_ent *ent;
1135 mlx5_cmd_cbk_t callback;
1136 void *context;
1137 int err;
1138 int i;
Eli Cohen746b5582013-10-23 09:53:14 +03001139 ktime_t t1, t2, delta;
1140 s64 ds;
1141 struct mlx5_cmd_stats *stats;
1142 unsigned long flags;
Eli Cohene126ba92013-07-07 17:25:49 +03001143
1144 for (i = 0; i < (1 << cmd->log_sz); i++) {
1145 if (test_bit(i, &vector)) {
Dan Carpenter11940c82013-07-22 11:02:01 +03001146 struct semaphore *sem;
1147
Eli Cohene126ba92013-07-07 17:25:49 +03001148 ent = cmd->ent_arr[i];
Dan Carpenter11940c82013-07-22 11:02:01 +03001149 if (ent->page_queue)
1150 sem = &cmd->pages_sem;
1151 else
1152 sem = &cmd->sem;
Eli Cohene126ba92013-07-07 17:25:49 +03001153 ktime_get_ts(&ent->ts2);
1154 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1155 dump_command(dev, ent, 0);
1156 if (!ent->ret) {
1157 if (!cmd->checksum_disabled)
1158 ent->ret = verify_signature(ent);
1159 else
1160 ent->ret = 0;
1161 ent->status = ent->lay->status_own >> 1;
1162 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1163 ent->ret, deliv_status_to_str(ent->status), ent->status);
1164 }
1165 free_ent(cmd, ent->idx);
1166 if (ent->callback) {
Eli Cohen746b5582013-10-23 09:53:14 +03001167 t1 = timespec_to_ktime(ent->ts1);
1168 t2 = timespec_to_ktime(ent->ts2);
1169 delta = ktime_sub(t2, t1);
1170 ds = ktime_to_ns(delta);
1171 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1172 stats = &cmd->stats[ent->op];
1173 spin_lock_irqsave(&stats->lock, flags);
1174 stats->sum += ds;
1175 ++stats->n;
1176 spin_unlock_irqrestore(&stats->lock, flags);
1177 }
1178
Eli Cohene126ba92013-07-07 17:25:49 +03001179 callback = ent->callback;
1180 context = ent->context;
1181 err = ent->ret;
Eli Cohen746b5582013-10-23 09:53:14 +03001182 if (!err)
1183 err = mlx5_copy_from_msg(ent->uout,
1184 ent->out,
1185 ent->uout_size);
1186
1187 mlx5_free_cmd_msg(dev, ent->out);
1188 free_msg(dev, ent->in);
1189
Eli Cohene126ba92013-07-07 17:25:49 +03001190 free_cmd(ent);
1191 callback(err, context);
1192 } else {
1193 complete(&ent->done);
1194 }
Dan Carpenter11940c82013-07-22 11:02:01 +03001195 up(sem);
Eli Cohene126ba92013-07-07 17:25:49 +03001196 }
1197 }
1198}
1199EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1200
1201static int status_to_err(u8 status)
1202{
1203 return status ? -1 : 0; /* TBD more meaningful codes */
1204}
1205
Eli Cohen746b5582013-10-23 09:53:14 +03001206static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1207 gfp_t gfp)
Eli Cohene126ba92013-07-07 17:25:49 +03001208{
1209 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1210 struct mlx5_cmd *cmd = &dev->cmd;
1211 struct cache_ent *ent = NULL;
1212
1213 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1214 ent = &cmd->cache.large;
1215 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1216 ent = &cmd->cache.med;
1217
1218 if (ent) {
Eli Cohen746b5582013-10-23 09:53:14 +03001219 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001220 if (!list_empty(&ent->head)) {
1221 msg = list_entry(ent->head.next, typeof(*msg), list);
1222 /* For cached lists, we must explicitly state what is
1223 * the real size
1224 */
1225 msg->len = in_size;
1226 list_del(&msg->list);
1227 }
Eli Cohen746b5582013-10-23 09:53:14 +03001228 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001229 }
1230
1231 if (IS_ERR(msg))
Eli Cohen746b5582013-10-23 09:53:14 +03001232 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
Eli Cohene126ba92013-07-07 17:25:49 +03001233
1234 return msg;
1235}
1236
Eli Cohene126ba92013-07-07 17:25:49 +03001237static int is_manage_pages(struct mlx5_inbox_hdr *in)
1238{
1239 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1240}
1241
Eli Cohen746b5582013-10-23 09:53:14 +03001242static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1243 int out_size, mlx5_cmd_cbk_t callback, void *context)
Eli Cohene126ba92013-07-07 17:25:49 +03001244{
1245 struct mlx5_cmd_msg *inb;
1246 struct mlx5_cmd_msg *outb;
1247 int pages_queue;
Eli Cohen746b5582013-10-23 09:53:14 +03001248 gfp_t gfp;
Eli Cohene126ba92013-07-07 17:25:49 +03001249 int err;
1250 u8 status = 0;
1251
1252 pages_queue = is_manage_pages(in);
Eli Cohen746b5582013-10-23 09:53:14 +03001253 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
Eli Cohene126ba92013-07-07 17:25:49 +03001254
Eli Cohen746b5582013-10-23 09:53:14 +03001255 inb = alloc_msg(dev, in_size, gfp);
Eli Cohene126ba92013-07-07 17:25:49 +03001256 if (IS_ERR(inb)) {
1257 err = PTR_ERR(inb);
1258 return err;
1259 }
1260
1261 err = mlx5_copy_to_msg(inb, in, in_size);
1262 if (err) {
1263 mlx5_core_warn(dev, "err %d\n", err);
1264 goto out_in;
1265 }
1266
Eli Cohen746b5582013-10-23 09:53:14 +03001267 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
Eli Cohene126ba92013-07-07 17:25:49 +03001268 if (IS_ERR(outb)) {
1269 err = PTR_ERR(outb);
1270 goto out_in;
1271 }
1272
Eli Cohen746b5582013-10-23 09:53:14 +03001273 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1274 pages_queue, &status);
Eli Cohene126ba92013-07-07 17:25:49 +03001275 if (err)
1276 goto out_out;
1277
1278 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1279 if (status) {
1280 err = status_to_err(status);
1281 goto out_out;
1282 }
1283
1284 err = mlx5_copy_from_msg(out, outb, out_size);
1285
1286out_out:
Eli Cohen746b5582013-10-23 09:53:14 +03001287 if (!callback)
1288 mlx5_free_cmd_msg(dev, outb);
Eli Cohene126ba92013-07-07 17:25:49 +03001289
1290out_in:
Eli Cohen746b5582013-10-23 09:53:14 +03001291 if (!callback)
1292 free_msg(dev, inb);
Eli Cohene126ba92013-07-07 17:25:49 +03001293 return err;
1294}
Eli Cohen746b5582013-10-23 09:53:14 +03001295
1296int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1297 int out_size)
1298{
1299 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1300}
Eli Cohene126ba92013-07-07 17:25:49 +03001301EXPORT_SYMBOL(mlx5_cmd_exec);
1302
Eli Cohen746b5582013-10-23 09:53:14 +03001303int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1304 void *out, int out_size, mlx5_cmd_cbk_t callback,
1305 void *context)
1306{
1307 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1308}
1309EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1310
Eli Cohene126ba92013-07-07 17:25:49 +03001311static void destroy_msg_cache(struct mlx5_core_dev *dev)
1312{
1313 struct mlx5_cmd *cmd = &dev->cmd;
1314 struct mlx5_cmd_msg *msg;
1315 struct mlx5_cmd_msg *n;
1316
1317 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1318 list_del(&msg->list);
1319 mlx5_free_cmd_msg(dev, msg);
1320 }
1321
1322 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1323 list_del(&msg->list);
1324 mlx5_free_cmd_msg(dev, msg);
1325 }
1326}
1327
1328static int create_msg_cache(struct mlx5_core_dev *dev)
1329{
1330 struct mlx5_cmd *cmd = &dev->cmd;
1331 struct mlx5_cmd_msg *msg;
1332 int err;
1333 int i;
1334
1335 spin_lock_init(&cmd->cache.large.lock);
1336 INIT_LIST_HEAD(&cmd->cache.large.head);
1337 spin_lock_init(&cmd->cache.med.lock);
1338 INIT_LIST_HEAD(&cmd->cache.med.head);
1339
1340 for (i = 0; i < NUM_LONG_LISTS; i++) {
1341 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1342 if (IS_ERR(msg)) {
1343 err = PTR_ERR(msg);
1344 goto ex_err;
1345 }
1346 msg->cache = &cmd->cache.large;
1347 list_add_tail(&msg->list, &cmd->cache.large.head);
1348 }
1349
1350 for (i = 0; i < NUM_MED_LISTS; i++) {
1351 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1352 if (IS_ERR(msg)) {
1353 err = PTR_ERR(msg);
1354 goto ex_err;
1355 }
1356 msg->cache = &cmd->cache.med;
1357 list_add_tail(&msg->list, &cmd->cache.med.head);
1358 }
1359
1360 return 0;
1361
1362ex_err:
1363 destroy_msg_cache(dev);
1364 return err;
1365}
1366
1367int mlx5_cmd_init(struct mlx5_core_dev *dev)
1368{
1369 int size = sizeof(struct mlx5_cmd_prot_block);
1370 int align = roundup_pow_of_two(size);
1371 struct mlx5_cmd *cmd = &dev->cmd;
1372 u32 cmd_h, cmd_l;
1373 u16 cmd_if_rev;
1374 int err;
1375 int i;
1376
1377 cmd_if_rev = cmdif_rev(dev);
1378 if (cmd_if_rev != CMD_IF_REV) {
1379 dev_err(&dev->pdev->dev,
1380 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1381 CMD_IF_REV, cmd_if_rev);
1382 return -EINVAL;
1383 }
1384
1385 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1386 if (!cmd->pool)
1387 return -ENOMEM;
1388
1389 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1390 if (!cmd->cmd_buf) {
1391 err = -ENOMEM;
1392 goto err_free_pool;
1393 }
1394 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1395 DMA_BIDIRECTIONAL);
1396 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1397 err = -ENOMEM;
1398 goto err_free;
1399 }
1400
1401 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1402 cmd->log_sz = cmd_l >> 4 & 0xf;
1403 cmd->log_stride = cmd_l & 0xf;
1404 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1405 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1406 1 << cmd->log_sz);
1407 err = -EINVAL;
1408 goto err_map;
1409 }
1410
1411 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1412 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1413 err = -EINVAL;
1414 goto err_map;
1415 }
1416
Eli Cohenc1868b82013-09-11 16:35:25 +03001417 cmd->checksum_disabled = 1;
Eli Cohene126ba92013-07-07 17:25:49 +03001418 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1419 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1420
1421 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1422 if (cmd->cmdif_rev > CMD_IF_REV) {
1423 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1424 CMD_IF_REV, cmd->cmdif_rev);
1425 err = -ENOTSUPP;
1426 goto err_map;
1427 }
1428
1429 spin_lock_init(&cmd->alloc_lock);
1430 spin_lock_init(&cmd->token_lock);
1431 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1432 spin_lock_init(&cmd->stats[i].lock);
1433
1434 sema_init(&cmd->sem, cmd->max_reg_cmds);
1435 sema_init(&cmd->pages_sem, 1);
1436
1437 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1438 cmd_l = (u32)(cmd->dma);
1439 if (cmd_l & 0xfff) {
1440 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1441 err = -ENOMEM;
1442 goto err_map;
1443 }
1444
1445 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1446 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1447
1448 /* Make sure firmware sees the complete address before we proceed */
1449 wmb();
1450
1451 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1452
1453 cmd->mode = CMD_MODE_POLLING;
1454
1455 err = create_msg_cache(dev);
1456 if (err) {
1457 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1458 goto err_map;
1459 }
1460
1461 set_wqname(dev);
1462 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1463 if (!cmd->wq) {
1464 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1465 err = -ENOMEM;
1466 goto err_cache;
1467 }
1468
1469 err = create_debugfs_files(dev);
1470 if (err) {
1471 err = -ENOMEM;
1472 goto err_wq;
1473 }
1474
1475 return 0;
1476
1477err_wq:
1478 destroy_workqueue(cmd->wq);
1479
1480err_cache:
1481 destroy_msg_cache(dev);
1482
1483err_map:
1484 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1485 DMA_BIDIRECTIONAL);
1486err_free:
1487 free_pages((unsigned long)cmd->cmd_buf, 0);
1488
1489err_free_pool:
1490 pci_pool_destroy(cmd->pool);
1491
1492 return err;
1493}
1494EXPORT_SYMBOL(mlx5_cmd_init);
1495
1496void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1497{
1498 struct mlx5_cmd *cmd = &dev->cmd;
1499
1500 clean_debug_files(dev);
1501 destroy_workqueue(cmd->wq);
1502 destroy_msg_cache(dev);
1503 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1504 DMA_BIDIRECTIONAL);
1505 free_pages((unsigned long)cmd->cmd_buf, 0);
1506 pci_pool_destroy(cmd->pool);
1507}
1508EXPORT_SYMBOL(mlx5_cmd_cleanup);
1509
1510static const char *cmd_status_str(u8 status)
1511{
1512 switch (status) {
1513 case MLX5_CMD_STAT_OK:
1514 return "OK";
1515 case MLX5_CMD_STAT_INT_ERR:
1516 return "internal error";
1517 case MLX5_CMD_STAT_BAD_OP_ERR:
1518 return "bad operation";
1519 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1520 return "bad parameter";
1521 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1522 return "bad system state";
1523 case MLX5_CMD_STAT_BAD_RES_ERR:
1524 return "bad resource";
1525 case MLX5_CMD_STAT_RES_BUSY:
1526 return "resource busy";
1527 case MLX5_CMD_STAT_LIM_ERR:
1528 return "limits exceeded";
1529 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1530 return "bad resource state";
1531 case MLX5_CMD_STAT_IX_ERR:
1532 return "bad index";
1533 case MLX5_CMD_STAT_NO_RES_ERR:
1534 return "no resources";
1535 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1536 return "bad input length";
1537 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1538 return "bad output length";
1539 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1540 return "bad QP state";
1541 case MLX5_CMD_STAT_BAD_PKT_ERR:
1542 return "bad packet (discarded)";
1543 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1544 return "bad size too many outstanding CQEs";
1545 default:
1546 return "unknown status";
1547 }
1548}
1549
1550int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1551{
1552 if (!hdr->status)
1553 return 0;
1554
1555 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1556 cmd_status_str(hdr->status), hdr->status,
1557 be32_to_cpu(hdr->syndrome));
1558
1559 switch (hdr->status) {
1560 case MLX5_CMD_STAT_OK: return 0;
1561 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1562 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1563 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1564 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1565 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1566 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
Eli Cohen9c865132013-09-11 16:35:33 +03001567 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +03001568 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1569 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1570 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1571 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1572 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1573 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1574 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1575 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1576 default: return -EIO;
1577 }
1578}