blob: 64f725fde6020bc77cda63b8e067f36b5b8a4ef2 [file] [log] [blame]
Jiri Pirkoeda65002015-07-29 23:33:47 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/export.h>
38#include <linux/err.h>
39#include <linux/device.h>
40#include <linux/pci.h>
41#include <linux/interrupt.h>
42#include <linux/wait.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/if_vlan.h>
46#include <linux/log2.h>
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49
50#include "pci.h"
51#include "core.h"
52#include "cmd.h"
53#include "port.h"
54
55static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
56
57static const struct pci_device_id mlxsw_pci_id_table[] = {
58 {0, }
59};
60
61static struct dentry *mlxsw_pci_dbg_root;
62
63static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
64{
65 switch (id->device) {
66 default:
67 BUG();
68 }
69}
70
71#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
72 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
73#define mlxsw_pci_read32(mlxsw_pci, reg) \
74 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
75
76enum mlxsw_pci_queue_type {
77 MLXSW_PCI_QUEUE_TYPE_SDQ,
78 MLXSW_PCI_QUEUE_TYPE_RDQ,
79 MLXSW_PCI_QUEUE_TYPE_CQ,
80 MLXSW_PCI_QUEUE_TYPE_EQ,
81};
82
83static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
84{
85 switch (q_type) {
86 case MLXSW_PCI_QUEUE_TYPE_SDQ:
87 return "sdq";
88 case MLXSW_PCI_QUEUE_TYPE_RDQ:
89 return "rdq";
90 case MLXSW_PCI_QUEUE_TYPE_CQ:
91 return "cq";
92 case MLXSW_PCI_QUEUE_TYPE_EQ:
93 return "eq";
94 }
95 BUG();
96}
97
98#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
99
100static const u16 mlxsw_pci_doorbell_type_offset[] = {
101 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
102 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
103 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
104 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
105};
106
107static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
108 0, /* unused */
109 0, /* unused */
110 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
111 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
112};
113
114struct mlxsw_pci_mem_item {
115 char *buf;
116 dma_addr_t mapaddr;
117 size_t size;
118};
119
120struct mlxsw_pci_queue_elem_info {
121 char *elem; /* pointer to actual dma mapped element mem chunk */
122 union {
123 struct {
124 struct sk_buff *skb;
125 } sdq;
126 struct {
127 struct sk_buff *skb;
128 } rdq;
129 } u;
130};
131
132struct mlxsw_pci_queue {
133 spinlock_t lock; /* for queue accesses */
134 struct mlxsw_pci_mem_item mem_item;
135 struct mlxsw_pci_queue_elem_info *elem_info;
136 u16 producer_counter;
137 u16 consumer_counter;
138 u16 count; /* number of elements in queue */
139 u8 num; /* queue number */
140 u8 elem_size; /* size of one element */
141 enum mlxsw_pci_queue_type type;
142 struct tasklet_struct tasklet; /* queue processing tasklet */
143 struct mlxsw_pci *pci;
144 union {
145 struct {
146 u32 comp_sdq_count;
147 u32 comp_rdq_count;
148 } cq;
149 struct {
150 u32 ev_cmd_count;
151 u32 ev_comp_count;
152 u32 ev_other_count;
153 } eq;
154 } u;
155};
156
157struct mlxsw_pci_queue_type_group {
158 struct mlxsw_pci_queue *q;
159 u8 count; /* number of queues in group */
160};
161
162struct mlxsw_pci {
163 struct pci_dev *pdev;
164 u8 __iomem *hw_addr;
165 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
166 u32 doorbell_offset;
167 struct msix_entry msix_entry;
168 struct mlxsw_core *core;
169 struct {
170 u16 num_pages;
171 struct mlxsw_pci_mem_item *items;
172 } fw_area;
173 struct {
174 struct mutex lock; /* Lock access to command registers */
175 bool nopoll;
176 wait_queue_head_t wait;
177 bool wait_done;
178 struct {
179 u8 status;
180 u64 out_param;
181 } comp;
182 } cmd;
183 struct mlxsw_bus_info bus_info;
184 struct dentry *dbg_dir;
185};
186
187static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
188{
189 tasklet_schedule(&q->tasklet);
190}
191
192static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
193 size_t elem_size, int elem_index)
194{
195 return q->mem_item.buf + (elem_size * elem_index);
196}
197
198static struct mlxsw_pci_queue_elem_info *
199mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
200{
201 return &q->elem_info[elem_index];
202}
203
204static struct mlxsw_pci_queue_elem_info *
205mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
206{
207 int index = q->producer_counter & (q->count - 1);
208
209 if ((q->producer_counter - q->consumer_counter) == q->count)
210 return NULL;
211 return mlxsw_pci_queue_elem_info_get(q, index);
212}
213
214static struct mlxsw_pci_queue_elem_info *
215mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
216{
217 int index = q->consumer_counter & (q->count - 1);
218
219 return mlxsw_pci_queue_elem_info_get(q, index);
220}
221
222static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
223{
224 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
225}
226
227static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
228{
229 return owner_bit != !!(q->consumer_counter & q->count);
230}
231
232static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
233 u32 (*get_elem_owner_func)(char *))
234{
235 struct mlxsw_pci_queue_elem_info *elem_info;
236 char *elem;
237 bool owner_bit;
238
239 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
240 elem = elem_info->elem;
241 owner_bit = get_elem_owner_func(elem);
242 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
243 return NULL;
244 q->consumer_counter++;
245 rmb(); /* make sure we read owned bit before the rest of elem */
246 return elem;
247}
248
249static struct mlxsw_pci_queue_type_group *
250mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
251 enum mlxsw_pci_queue_type q_type)
252{
253 return &mlxsw_pci->queues[q_type];
254}
255
256static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
257 enum mlxsw_pci_queue_type q_type)
258{
259 struct mlxsw_pci_queue_type_group *queue_group;
260
261 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
262 return queue_group->count;
263}
264
265static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
266{
267 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
268}
269
270static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
271{
272 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
273}
274
275static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
276{
277 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
278}
279
280static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
281{
282 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
283}
284
285static struct mlxsw_pci_queue *
286__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
287 enum mlxsw_pci_queue_type q_type, u8 q_num)
288{
289 return &mlxsw_pci->queues[q_type].q[q_num];
290}
291
292static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
293 u8 q_num)
294{
295 return __mlxsw_pci_queue_get(mlxsw_pci,
296 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
297}
298
299static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
300 u8 q_num)
301{
302 return __mlxsw_pci_queue_get(mlxsw_pci,
303 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
304}
305
306static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
307 u8 q_num)
308{
309 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
310}
311
312static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
313 u8 q_num)
314{
315 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
316}
317
318static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
319 struct mlxsw_pci_queue *q,
320 u16 val)
321{
322 mlxsw_pci_write32(mlxsw_pci,
323 DOORBELL(mlxsw_pci->doorbell_offset,
324 mlxsw_pci_doorbell_type_offset[q->type],
325 q->num), val);
326}
327
328static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
329 struct mlxsw_pci_queue *q,
330 u16 val)
331{
332 mlxsw_pci_write32(mlxsw_pci,
333 DOORBELL(mlxsw_pci->doorbell_offset,
334 mlxsw_pci_doorbell_arm_type_offset[q->type],
335 q->num), val);
336}
337
338static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
339 struct mlxsw_pci_queue *q)
340{
341 wmb(); /* ensure all writes are done before we ring a bell */
342 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
343}
344
345static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
346 struct mlxsw_pci_queue *q)
347{
348 wmb(); /* ensure all writes are done before we ring a bell */
349 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
350 q->consumer_counter + q->count);
351}
352
353static void
354mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
355 struct mlxsw_pci_queue *q)
356{
357 wmb(); /* ensure all writes are done before we ring a bell */
358 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
359}
360
361static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
362 int page_index)
363{
364 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
365}
366
367static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
368 struct mlxsw_pci_queue *q)
369{
370 int i;
371 int err;
372
373 q->producer_counter = 0;
374 q->consumer_counter = 0;
375
376 /* Set CQ of same number of this SDQ. */
377 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
378 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
379 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
380 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
381 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
382
383 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
384 }
385
386 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
387 if (err)
388 return err;
389 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
390 return 0;
391}
392
393static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
394 struct mlxsw_pci_queue *q)
395{
396 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
397}
398
399static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
400{
401 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
402 struct mlxsw_pci_queue *q;
403 int i;
404 static const char hdr[] =
405 "NUM PROD_COUNT CONS_COUNT COUNT\n";
406
407 seq_printf(file, hdr);
408 for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
409 q = mlxsw_pci_sdq_get(mlxsw_pci, i);
410 spin_lock_bh(&q->lock);
411 seq_printf(file, "%3d %10d %10d %5d\n",
412 i, q->producer_counter, q->consumer_counter,
413 q->count);
414 spin_unlock_bh(&q->lock);
415 }
416 return 0;
417}
418
419static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
420 int index, char *frag_data, size_t frag_len,
421 int direction)
422{
423 struct pci_dev *pdev = mlxsw_pci->pdev;
424 dma_addr_t mapaddr;
425
426 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
427 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
428 if (net_ratelimit())
429 dev_err(&pdev->dev, "failed to dma map tx frag\n");
430 return -EIO;
431 }
432 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
433 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
434 return 0;
435}
436
437static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
438 int index, int direction)
439{
440 struct pci_dev *pdev = mlxsw_pci->pdev;
441 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
442 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
443
444 if (!frag_len)
445 return;
446 pci_unmap_single(pdev, mapaddr, frag_len, direction);
447}
448
449static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
450 struct mlxsw_pci_queue_elem_info *elem_info)
451{
452 size_t buf_len = MLXSW_PORT_MAX_MTU;
453 char *wqe = elem_info->elem;
454 struct sk_buff *skb;
455 int err;
456
457 elem_info->u.rdq.skb = NULL;
458 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
459 if (!skb)
460 return -ENOMEM;
461
462 /* Assume that wqe was previously zeroed. */
463
464 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
465 buf_len, DMA_FROM_DEVICE);
466 if (err)
467 goto err_frag_map;
468
469 elem_info->u.rdq.skb = skb;
470 return 0;
471
472err_frag_map:
473 dev_kfree_skb_any(skb);
474 return err;
475}
476
477static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
478 struct mlxsw_pci_queue_elem_info *elem_info)
479{
480 struct sk_buff *skb;
481 char *wqe;
482
483 skb = elem_info->u.rdq.skb;
484 wqe = elem_info->elem;
485
486 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
487 dev_kfree_skb_any(skb);
488}
489
490static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
491 struct mlxsw_pci_queue *q)
492{
493 struct mlxsw_pci_queue_elem_info *elem_info;
494 int i;
495 int err;
496
497 q->producer_counter = 0;
498 q->consumer_counter = 0;
499
500 /* Set CQ of same number of this RDQ with base
501 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
502 */
503 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
504 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
505 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
506 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
507
508 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
509 }
510
511 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
512 if (err)
513 return err;
514
515 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
516
517 for (i = 0; i < q->count; i++) {
518 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
519 BUG_ON(!elem_info);
520 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
521 if (err)
522 goto rollback;
523 /* Everything is set up, ring doorbell to pass elem to HW */
524 q->producer_counter++;
525 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
526 }
527
528 return 0;
529
530rollback:
531 for (i--; i >= 0; i--) {
532 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
533 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
534 }
535 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
536
537 return err;
538}
539
540static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
541 struct mlxsw_pci_queue *q)
542{
543 struct mlxsw_pci_queue_elem_info *elem_info;
544 int i;
545
546 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
547 for (i = 0; i < q->count; i++) {
548 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
549 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
550 }
551}
552
553static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
554{
555 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
556 struct mlxsw_pci_queue *q;
557 int i;
558 static const char hdr[] =
559 "NUM PROD_COUNT CONS_COUNT COUNT\n";
560
561 seq_printf(file, hdr);
562 for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
563 q = mlxsw_pci_rdq_get(mlxsw_pci, i);
564 spin_lock_bh(&q->lock);
565 seq_printf(file, "%3d %10d %10d %5d\n",
566 i, q->producer_counter, q->consumer_counter,
567 q->count);
568 spin_unlock_bh(&q->lock);
569 }
570 return 0;
571}
572
573static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
574 struct mlxsw_pci_queue *q)
575{
576 int i;
577 int err;
578
579 q->consumer_counter = 0;
580
581 for (i = 0; i < q->count; i++) {
582 char *elem = mlxsw_pci_queue_elem_get(q, i);
583
584 mlxsw_pci_cqe_owner_set(elem, 1);
585 }
586
587 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
588 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
589 mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
590 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
591 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
592 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
593 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
594
595 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
596 }
597 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
598 if (err)
599 return err;
600 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
601 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
602 return 0;
603}
604
605static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
606 struct mlxsw_pci_queue *q)
607{
608 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
609}
610
611static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
612{
613 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
614
615 struct mlxsw_pci_queue *q;
616 int i;
617 static const char hdr[] =
618 "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
619
620 seq_printf(file, hdr);
621 for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
622 q = mlxsw_pci_cq_get(mlxsw_pci, i);
623 spin_lock_bh(&q->lock);
624 seq_printf(file, "%3d %10d %10d %10d %5d\n",
625 i, q->consumer_counter, q->u.cq.comp_sdq_count,
626 q->u.cq.comp_rdq_count, q->count);
627 spin_unlock_bh(&q->lock);
628 }
629 return 0;
630}
631
632static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
633 struct mlxsw_pci_queue *q,
634 u16 consumer_counter_limit,
635 char *cqe)
636{
637 struct pci_dev *pdev = mlxsw_pci->pdev;
638 struct mlxsw_pci_queue_elem_info *elem_info;
639 char *wqe;
640 struct sk_buff *skb;
641 int i;
642
643 spin_lock(&q->lock);
644 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
645 skb = elem_info->u.sdq.skb;
646 wqe = elem_info->elem;
647 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
648 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
649 dev_kfree_skb_any(skb);
650 elem_info->u.sdq.skb = NULL;
651
652 if (q->consumer_counter++ != consumer_counter_limit)
653 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
654 spin_unlock(&q->lock);
655}
656
657static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
658 struct mlxsw_pci_queue *q,
659 u16 consumer_counter_limit,
660 char *cqe)
661{
662 struct pci_dev *pdev = mlxsw_pci->pdev;
663 struct mlxsw_pci_queue_elem_info *elem_info;
664 char *wqe;
665 struct sk_buff *skb;
666 struct mlxsw_rx_info rx_info;
667 int err;
668
669 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
670 skb = elem_info->u.sdq.skb;
671 if (!skb)
672 return;
673 wqe = elem_info->elem;
674 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
675
676 if (q->consumer_counter++ != consumer_counter_limit)
677 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
678
679 /* We do not support lag now */
680 if (mlxsw_pci_cqe_lag_get(cqe))
681 goto drop;
682
683 rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
684 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
685
686 skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe));
687 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
688
689put_new_skb:
690 memset(wqe, 0, q->elem_size);
691 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
692 if (err && net_ratelimit())
693 dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
694 /* Everything is set up, ring doorbell to pass elem to HW */
695 q->producer_counter++;
696 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
697 return;
698
699drop:
700 dev_kfree_skb_any(skb);
701 goto put_new_skb;
702}
703
704static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
705{
706 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
707}
708
709static void mlxsw_pci_cq_tasklet(unsigned long data)
710{
711 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
712 struct mlxsw_pci *mlxsw_pci = q->pci;
713 char *cqe;
714 int items = 0;
715 int credits = q->count >> 1;
716
717 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
718 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
719 u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
720 u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
721
722 if (sendq) {
723 struct mlxsw_pci_queue *sdq;
724
725 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
726 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
727 wqe_counter, cqe);
728 q->u.cq.comp_sdq_count++;
729 } else {
730 struct mlxsw_pci_queue *rdq;
731
732 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
733 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
734 wqe_counter, cqe);
735 q->u.cq.comp_rdq_count++;
736 }
737 if (++items == credits)
738 break;
739 }
740 if (items) {
741 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
742 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
743 }
744}
745
746static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
747 struct mlxsw_pci_queue *q)
748{
749 int i;
750 int err;
751
752 q->consumer_counter = 0;
753
754 for (i = 0; i < q->count; i++) {
755 char *elem = mlxsw_pci_queue_elem_get(q, i);
756
757 mlxsw_pci_eqe_owner_set(elem, 1);
758 }
759
760 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
761 mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
762 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
763 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
764 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
765 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
766
767 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
768 }
769 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
770 if (err)
771 return err;
772 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
773 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
774 return 0;
775}
776
777static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
778 struct mlxsw_pci_queue *q)
779{
780 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
781}
782
783static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
784{
785 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
786 struct mlxsw_pci_queue *q;
787 int i;
788 static const char hdr[] =
789 "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
790
791 seq_printf(file, hdr);
792 for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
793 q = mlxsw_pci_eq_get(mlxsw_pci, i);
794 spin_lock_bh(&q->lock);
795 seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
796 i, q->consumer_counter, q->u.eq.ev_cmd_count,
797 q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
798 q->count);
799 spin_unlock_bh(&q->lock);
800 }
801 return 0;
802}
803
804static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
805{
806 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
807 mlxsw_pci->cmd.comp.out_param =
808 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
809 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
810 mlxsw_pci->cmd.wait_done = true;
811 wake_up(&mlxsw_pci->cmd.wait);
812}
813
814static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
815{
816 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
817}
818
819static void mlxsw_pci_eq_tasklet(unsigned long data)
820{
821 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
822 struct mlxsw_pci *mlxsw_pci = q->pci;
823 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
824 char *eqe;
825 u8 cqn;
826 bool cq_handle = false;
827 int items = 0;
828 int credits = q->count >> 1;
829
830 memset(&active_cqns, 0, sizeof(active_cqns));
831
832 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
833 u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
834
835 switch (event_type) {
836 case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
837 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
838 q->u.eq.ev_cmd_count++;
839 break;
840 case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
841 cqn = mlxsw_pci_eqe_cqn_get(eqe);
842 set_bit(cqn, active_cqns);
843 cq_handle = true;
844 q->u.eq.ev_comp_count++;
845 break;
846 default:
847 q->u.eq.ev_other_count++;
848 }
849 if (++items == credits)
850 break;
851 }
852 if (items) {
853 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
854 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
855 }
856
857 if (!cq_handle)
858 return;
859 for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
860 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
861 mlxsw_pci_queue_tasklet_schedule(q);
862 }
863}
864
865struct mlxsw_pci_queue_ops {
866 const char *name;
867 enum mlxsw_pci_queue_type type;
868 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
869 struct mlxsw_pci_queue *q);
870 void (*fini)(struct mlxsw_pci *mlxsw_pci,
871 struct mlxsw_pci_queue *q);
872 void (*tasklet)(unsigned long data);
873 int (*dbg_read)(struct seq_file *s, void *data);
874 u16 elem_count;
875 u8 elem_size;
876};
877
878static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
879 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
880 .init = mlxsw_pci_sdq_init,
881 .fini = mlxsw_pci_sdq_fini,
882 .dbg_read = mlxsw_pci_sdq_dbg_read,
883 .elem_count = MLXSW_PCI_WQE_COUNT,
884 .elem_size = MLXSW_PCI_WQE_SIZE,
885};
886
887static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
888 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
889 .init = mlxsw_pci_rdq_init,
890 .fini = mlxsw_pci_rdq_fini,
891 .dbg_read = mlxsw_pci_rdq_dbg_read,
892 .elem_count = MLXSW_PCI_WQE_COUNT,
893 .elem_size = MLXSW_PCI_WQE_SIZE
894};
895
896static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
897 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
898 .init = mlxsw_pci_cq_init,
899 .fini = mlxsw_pci_cq_fini,
900 .tasklet = mlxsw_pci_cq_tasklet,
901 .dbg_read = mlxsw_pci_cq_dbg_read,
902 .elem_count = MLXSW_PCI_CQE_COUNT,
903 .elem_size = MLXSW_PCI_CQE_SIZE
904};
905
906static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
907 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
908 .init = mlxsw_pci_eq_init,
909 .fini = mlxsw_pci_eq_fini,
910 .tasklet = mlxsw_pci_eq_tasklet,
911 .dbg_read = mlxsw_pci_eq_dbg_read,
912 .elem_count = MLXSW_PCI_EQE_COUNT,
913 .elem_size = MLXSW_PCI_EQE_SIZE
914};
915
916static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
917 const struct mlxsw_pci_queue_ops *q_ops,
918 struct mlxsw_pci_queue *q, u8 q_num)
919{
920 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
921 int i;
922 int err;
923
924 spin_lock_init(&q->lock);
925 q->num = q_num;
926 q->count = q_ops->elem_count;
927 q->elem_size = q_ops->elem_size;
928 q->type = q_ops->type;
929 q->pci = mlxsw_pci;
930
931 if (q_ops->tasklet)
932 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
933
934 mem_item->size = MLXSW_PCI_AQ_SIZE;
935 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
936 mem_item->size,
937 &mem_item->mapaddr);
938 if (!mem_item->buf)
939 return -ENOMEM;
940 memset(mem_item->buf, 0, mem_item->size);
941
942 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
943 if (!q->elem_info) {
944 err = -ENOMEM;
945 goto err_elem_info_alloc;
946 }
947
948 /* Initialize dma mapped elements info elem_info for
949 * future easy access.
950 */
951 for (i = 0; i < q->count; i++) {
952 struct mlxsw_pci_queue_elem_info *elem_info;
953
954 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
955 elem_info->elem =
956 __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
957 }
958
959 mlxsw_cmd_mbox_zero(mbox);
960 err = q_ops->init(mlxsw_pci, mbox, q);
961 if (err)
962 goto err_q_ops_init;
963 return 0;
964
965err_q_ops_init:
966 kfree(q->elem_info);
967err_elem_info_alloc:
968 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
969 mem_item->buf, mem_item->mapaddr);
970 return err;
971}
972
973static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
974 const struct mlxsw_pci_queue_ops *q_ops,
975 struct mlxsw_pci_queue *q)
976{
977 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
978
979 q_ops->fini(mlxsw_pci, q);
980 kfree(q->elem_info);
981 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
982 mem_item->buf, mem_item->mapaddr);
983}
984
985static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
986 const struct mlxsw_pci_queue_ops *q_ops,
987 u8 num_qs)
988{
989 struct pci_dev *pdev = mlxsw_pci->pdev;
990 struct mlxsw_pci_queue_type_group *queue_group;
991 char tmp[16];
992 int i;
993 int err;
994
995 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
996 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
997 if (!queue_group->q)
998 return -ENOMEM;
999
1000 for (i = 0; i < num_qs; i++) {
1001 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1002 &queue_group->q[i], i);
1003 if (err)
1004 goto err_queue_init;
1005 }
1006 queue_group->count = num_qs;
1007
1008 sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
1009 debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
1010 q_ops->dbg_read);
1011
1012 return 0;
1013
1014err_queue_init:
1015 for (i--; i >= 0; i--)
1016 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1017 kfree(queue_group->q);
1018 return err;
1019}
1020
1021static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1022 const struct mlxsw_pci_queue_ops *q_ops)
1023{
1024 struct mlxsw_pci_queue_type_group *queue_group;
1025 int i;
1026
1027 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1028 for (i = 0; i < queue_group->count; i++)
1029 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1030 kfree(queue_group->q);
1031}
1032
1033static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1034{
1035 struct pci_dev *pdev = mlxsw_pci->pdev;
1036 u8 num_sdqs;
1037 u8 sdq_log2sz;
1038 u8 num_rdqs;
1039 u8 rdq_log2sz;
1040 u8 num_cqs;
1041 u8 cq_log2sz;
1042 u8 num_eqs;
1043 u8 eq_log2sz;
1044 int err;
1045
1046 mlxsw_cmd_mbox_zero(mbox);
1047 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1048 if (err)
1049 return err;
1050
1051 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1052 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1053 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1054 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1055 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1056 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1057 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1058 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1059
1060 if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
1061 (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
1062 (num_cqs != MLXSW_PCI_CQS_COUNT) ||
1063 (num_eqs != MLXSW_PCI_EQS_COUNT)) {
1064 dev_err(&pdev->dev, "Unsupported number of queues\n");
1065 return -EINVAL;
1066 }
1067
1068 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1069 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1070 (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
1071 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1072 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1073 return -EINVAL;
1074 }
1075
1076 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1077 num_eqs);
1078 if (err) {
1079 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1080 return err;
1081 }
1082
1083 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1084 num_cqs);
1085 if (err) {
1086 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1087 goto err_cqs_init;
1088 }
1089
1090 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1091 num_sdqs);
1092 if (err) {
1093 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1094 goto err_sdqs_init;
1095 }
1096
1097 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1098 num_rdqs);
1099 if (err) {
1100 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1101 goto err_rdqs_init;
1102 }
1103
1104 /* We have to poll in command interface until queues are initialized */
1105 mlxsw_pci->cmd.nopoll = true;
1106 return 0;
1107
1108err_rdqs_init:
1109 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1110err_sdqs_init:
1111 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1112err_cqs_init:
1113 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1114 return err;
1115}
1116
1117static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1118{
1119 mlxsw_pci->cmd.nopoll = false;
1120 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1121 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1122 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1123 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1124}
1125
1126static void
1127mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1128 char *mbox, int index,
1129 const struct mlxsw_swid_config *swid)
1130{
1131 u8 mask = 0;
1132
1133 if (swid->used_type) {
1134 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1135 mbox, index, swid->type);
1136 mask |= 1;
1137 }
1138 if (swid->used_properties) {
1139 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1140 mbox, index, swid->properties);
1141 mask |= 2;
1142 }
1143 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1144}
1145
1146static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1147 const struct mlxsw_config_profile *profile)
1148{
1149 int i;
1150
1151 mlxsw_cmd_mbox_zero(mbox);
1152
1153 if (profile->used_max_vepa_channels) {
1154 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1155 mbox, 1);
1156 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1157 mbox, profile->max_vepa_channels);
1158 }
1159 if (profile->used_max_lag) {
1160 mlxsw_cmd_mbox_config_profile_set_max_lag_set(
1161 mbox, 1);
1162 mlxsw_cmd_mbox_config_profile_max_lag_set(
1163 mbox, profile->max_lag);
1164 }
1165 if (profile->used_max_port_per_lag) {
1166 mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
1167 mbox, 1);
1168 mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
1169 mbox, profile->max_port_per_lag);
1170 }
1171 if (profile->used_max_mid) {
1172 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1173 mbox, 1);
1174 mlxsw_cmd_mbox_config_profile_max_mid_set(
1175 mbox, profile->max_mid);
1176 }
1177 if (profile->used_max_pgt) {
1178 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1179 mbox, 1);
1180 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1181 mbox, profile->max_pgt);
1182 }
1183 if (profile->used_max_system_port) {
1184 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1185 mbox, 1);
1186 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1187 mbox, profile->max_system_port);
1188 }
1189 if (profile->used_max_vlan_groups) {
1190 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1191 mbox, 1);
1192 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1193 mbox, profile->max_vlan_groups);
1194 }
1195 if (profile->used_max_regions) {
1196 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1197 mbox, 1);
1198 mlxsw_cmd_mbox_config_profile_max_regions_set(
1199 mbox, profile->max_regions);
1200 }
1201 if (profile->used_flood_tables) {
1202 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1203 mbox, 1);
1204 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1205 mbox, profile->max_flood_tables);
1206 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1207 mbox, profile->max_vid_flood_tables);
1208 }
1209 if (profile->used_flood_mode) {
1210 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1211 mbox, 1);
1212 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1213 mbox, profile->flood_mode);
1214 }
1215 if (profile->used_max_ib_mc) {
1216 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1217 mbox, 1);
1218 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1219 mbox, profile->max_ib_mc);
1220 }
1221 if (profile->used_max_pkey) {
1222 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1223 mbox, 1);
1224 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1225 mbox, profile->max_pkey);
1226 }
1227 if (profile->used_ar_sec) {
1228 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1229 mbox, 1);
1230 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1231 mbox, profile->ar_sec);
1232 }
1233 if (profile->used_adaptive_routing_group_cap) {
1234 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1235 mbox, 1);
1236 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1237 mbox, profile->adaptive_routing_group_cap);
1238 }
1239
1240 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1241 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1242 &profile->swid_config[i]);
1243
1244 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1245}
1246
1247static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1248{
1249 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1250 int err;
1251
1252 mlxsw_cmd_mbox_zero(mbox);
1253 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1254 if (err)
1255 return err;
1256 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1257 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1258 return 0;
1259}
1260
1261static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1262 u16 num_pages)
1263{
1264 struct mlxsw_pci_mem_item *mem_item;
1265 int i;
1266 int err;
1267
1268 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1269 GFP_KERNEL);
1270 if (!mlxsw_pci->fw_area.items)
1271 return -ENOMEM;
1272 mlxsw_pci->fw_area.num_pages = num_pages;
1273
1274 mlxsw_cmd_mbox_zero(mbox);
1275 for (i = 0; i < num_pages; i++) {
1276 mem_item = &mlxsw_pci->fw_area.items[i];
1277
1278 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1279 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1280 mem_item->size,
1281 &mem_item->mapaddr);
1282 if (!mem_item->buf) {
1283 err = -ENOMEM;
1284 goto err_alloc;
1285 }
1286 mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
1287 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
1288 }
1289
1290 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
1291 if (err)
1292 goto err_cmd_map_fa;
1293
1294 return 0;
1295
1296err_cmd_map_fa:
1297err_alloc:
1298 for (i--; i >= 0; i--) {
1299 mem_item = &mlxsw_pci->fw_area.items[i];
1300
1301 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1302 mem_item->buf, mem_item->mapaddr);
1303 }
1304 kfree(mlxsw_pci->fw_area.items);
1305 return err;
1306}
1307
1308static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1309{
1310 struct mlxsw_pci_mem_item *mem_item;
1311 int i;
1312
1313 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1314
1315 for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
1316 mem_item = &mlxsw_pci->fw_area.items[i];
1317
1318 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1319 mem_item->buf, mem_item->mapaddr);
1320 }
1321 kfree(mlxsw_pci->fw_area.items);
1322}
1323
1324static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1325{
1326 struct mlxsw_pci *mlxsw_pci = dev_id;
1327 struct mlxsw_pci_queue *q;
1328 int i;
1329
1330 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1331 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1332 mlxsw_pci_queue_tasklet_schedule(q);
1333 }
1334 return IRQ_HANDLED;
1335}
1336
1337static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1338 const struct mlxsw_config_profile *profile)
1339{
1340 struct mlxsw_pci *mlxsw_pci = bus_priv;
1341 struct pci_dev *pdev = mlxsw_pci->pdev;
1342 char *mbox;
1343 u16 num_pages;
1344 int err;
1345
1346 mutex_init(&mlxsw_pci->cmd.lock);
1347 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1348
1349 mlxsw_pci->core = mlxsw_core;
1350
1351 mbox = mlxsw_cmd_mbox_alloc();
1352 if (!mbox)
1353 return -ENOMEM;
1354 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1355 if (err)
1356 goto err_query_fw;
1357
1358 mlxsw_pci->bus_info.fw_rev.major =
1359 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1360 mlxsw_pci->bus_info.fw_rev.minor =
1361 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1362 mlxsw_pci->bus_info.fw_rev.subminor =
1363 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1364
1365 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1366 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1367 err = -EINVAL;
1368 goto err_iface_rev;
1369 }
1370 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1371 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1372 err = -EINVAL;
1373 goto err_doorbell_page_bar;
1374 }
1375
1376 mlxsw_pci->doorbell_offset =
1377 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1378
1379 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1380 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1381 if (err)
1382 goto err_fw_area_init;
1383
1384 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1385 if (err)
1386 goto err_boardinfo;
1387
1388 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
1389 if (err)
1390 goto err_config_profile;
1391
1392 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1393 if (err)
1394 goto err_aqs_init;
1395
1396 err = request_irq(mlxsw_pci->msix_entry.vector,
1397 mlxsw_pci_eq_irq_handler, 0,
1398 mlxsw_pci_driver_name, mlxsw_pci);
1399 if (err) {
1400 dev_err(&pdev->dev, "IRQ request failed\n");
1401 goto err_request_eq_irq;
1402 }
1403
1404 goto mbox_put;
1405
1406err_request_eq_irq:
1407 mlxsw_pci_aqs_fini(mlxsw_pci);
1408err_aqs_init:
1409err_config_profile:
1410err_boardinfo:
1411 mlxsw_pci_fw_area_fini(mlxsw_pci);
1412err_fw_area_init:
1413err_doorbell_page_bar:
1414err_iface_rev:
1415err_query_fw:
1416mbox_put:
1417 mlxsw_cmd_mbox_free(mbox);
1418 return err;
1419}
1420
1421static void mlxsw_pci_fini(void *bus_priv)
1422{
1423 struct mlxsw_pci *mlxsw_pci = bus_priv;
1424
1425 free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
1426 mlxsw_pci_aqs_fini(mlxsw_pci);
1427 mlxsw_pci_fw_area_fini(mlxsw_pci);
1428}
1429
1430static struct mlxsw_pci_queue *
1431mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1432 const struct mlxsw_tx_info *tx_info)
1433{
1434 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1435
1436 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1437}
1438
1439static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1440 const struct mlxsw_tx_info *tx_info)
1441{
1442 struct mlxsw_pci *mlxsw_pci = bus_priv;
1443 struct mlxsw_pci_queue *q;
1444 struct mlxsw_pci_queue_elem_info *elem_info;
1445 char *wqe;
1446 int i;
1447 int err;
1448
1449 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1450 err = skb_linearize(skb);
1451 if (err)
1452 return err;
1453 }
1454
1455 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1456 spin_lock_bh(&q->lock);
1457 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1458 if (!elem_info) {
1459 /* queue is full */
1460 err = -EAGAIN;
1461 goto unlock;
1462 }
1463 elem_info->u.sdq.skb = skb;
1464
1465 wqe = elem_info->elem;
1466 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1467 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1468 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1469
1470 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1471 skb_headlen(skb), DMA_TO_DEVICE);
1472 if (err)
1473 goto unlock;
1474
1475 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1476 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1477
1478 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1479 skb_frag_address(frag),
1480 skb_frag_size(frag),
1481 DMA_TO_DEVICE);
1482 if (err)
1483 goto unmap_frags;
1484 }
1485
1486 /* Set unused sq entries byte count to zero. */
1487 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1488 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1489
1490 /* Everything is set up, ring producer doorbell to get HW going */
1491 q->producer_counter++;
1492 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1493
1494 goto unlock;
1495
1496unmap_frags:
1497 for (; i >= 0; i--)
1498 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1499unlock:
1500 spin_unlock_bh(&q->lock);
1501 return err;
1502}
1503
1504static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1505 u32 in_mod, bool out_mbox_direct,
1506 char *in_mbox, size_t in_mbox_size,
1507 char *out_mbox, size_t out_mbox_size,
1508 u8 *p_status)
1509{
1510 struct mlxsw_pci *mlxsw_pci = bus_priv;
1511 dma_addr_t in_mapaddr = 0;
1512 dma_addr_t out_mapaddr = 0;
1513 bool evreq = mlxsw_pci->cmd.nopoll;
1514 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1515 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1516 int err;
1517
1518 *p_status = MLXSW_CMD_STATUS_OK;
1519
1520 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1521 if (err)
1522 return err;
1523
1524 if (in_mbox) {
1525 in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
1526 in_mbox_size, PCI_DMA_TODEVICE);
1527 if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
1528 in_mapaddr))) {
1529 err = -EIO;
1530 goto err_in_mbox_map;
1531 }
1532 }
1533 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
1534 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
1535
1536 if (out_mbox) {
1537 out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
1538 out_mbox_size, PCI_DMA_FROMDEVICE);
1539 if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
1540 out_mapaddr))) {
1541 err = -EIO;
1542 goto err_out_mbox_map;
1543 }
1544 }
1545 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
1546 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
1547
1548 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1549 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1550
1551 *p_wait_done = false;
1552
1553 wmb(); /* all needs to be written before we write control register */
1554 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1555 MLXSW_PCI_CIR_CTRL_GO_BIT |
1556 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1557 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1558 opcode);
1559
1560 if (!evreq) {
1561 unsigned long end;
1562
1563 end = jiffies + timeout;
1564 do {
1565 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1566
1567 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1568 *p_wait_done = true;
1569 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1570 break;
1571 }
1572 cond_resched();
1573 } while (time_before(jiffies, end));
1574 } else {
1575 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1576 *p_status = mlxsw_pci->cmd.comp.status;
1577 }
1578
1579 err = 0;
1580 if (*p_wait_done) {
1581 if (*p_status)
1582 err = -EIO;
1583 } else {
1584 err = -ETIMEDOUT;
1585 }
1586
1587 if (!err && out_mbox && out_mbox_direct) {
1588 /* Some commands does not use output param as address to mailbox
1589 * but they store output directly into registers. In that case,
1590 * copy registers into mbox buffer.
1591 */
1592 __be32 tmp;
1593
1594 if (!evreq) {
1595 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1596 CIR_OUT_PARAM_HI));
1597 memcpy(out_mbox, &tmp, sizeof(tmp));
1598 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1599 CIR_OUT_PARAM_LO));
1600 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1601 }
1602 }
1603
1604 if (out_mapaddr)
1605 pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
1606 PCI_DMA_FROMDEVICE);
1607
1608 /* fall through */
1609
1610err_out_mbox_map:
1611 if (in_mapaddr)
1612 pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
1613 PCI_DMA_TODEVICE);
1614err_in_mbox_map:
1615 mutex_unlock(&mlxsw_pci->cmd.lock);
1616
1617 return err;
1618}
1619
1620static const struct mlxsw_bus mlxsw_pci_bus = {
1621 .kind = "pci",
1622 .init = mlxsw_pci_init,
1623 .fini = mlxsw_pci_fini,
1624 .skb_transmit = mlxsw_pci_skb_transmit,
1625 .cmd_exec = mlxsw_pci_cmd_exec,
1626};
1627
1628static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
1629{
1630 mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
1631 /* Current firware does not let us know when the reset is done.
1632 * So we just wait here for constant time and hope for the best.
1633 */
1634 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1635 return 0;
1636}
1637
1638static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1639{
1640 struct mlxsw_pci *mlxsw_pci;
1641 int err;
1642
1643 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1644 if (!mlxsw_pci)
1645 return -ENOMEM;
1646
1647 err = pci_enable_device(pdev);
1648 if (err) {
1649 dev_err(&pdev->dev, "pci_enable_device failed\n");
1650 goto err_pci_enable_device;
1651 }
1652
1653 err = pci_request_regions(pdev, mlxsw_pci_driver_name);
1654 if (err) {
1655 dev_err(&pdev->dev, "pci_request_regions failed\n");
1656 goto err_pci_request_regions;
1657 }
1658
1659 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1660 if (!err) {
1661 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1662 if (err) {
1663 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1664 goto err_pci_set_dma_mask;
1665 }
1666 } else {
1667 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1668 if (err) {
1669 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1670 goto err_pci_set_dma_mask;
1671 }
1672 }
1673
1674 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1675 dev_err(&pdev->dev, "invalid PCI region size\n");
1676 err = -EINVAL;
1677 goto err_pci_resource_len_check;
1678 }
1679
1680 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1681 pci_resource_len(pdev, 0));
1682 if (!mlxsw_pci->hw_addr) {
1683 dev_err(&pdev->dev, "ioremap failed\n");
1684 err = -EIO;
1685 goto err_ioremap;
1686 }
1687 pci_set_master(pdev);
1688
1689 mlxsw_pci->pdev = pdev;
1690 pci_set_drvdata(pdev, mlxsw_pci);
1691
1692 err = mlxsw_pci_sw_reset(mlxsw_pci);
1693 if (err) {
1694 dev_err(&pdev->dev, "Software reset failed\n");
1695 goto err_sw_reset;
1696 }
1697
1698 err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
1699 if (err) {
1700 dev_err(&pdev->dev, "MSI-X init failed\n");
1701 goto err_msix_init;
1702 }
1703
1704 mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
1705 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1706 mlxsw_pci->bus_info.dev = &pdev->dev;
1707
1708 mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
1709 mlxsw_pci_dbg_root);
1710 if (!mlxsw_pci->dbg_dir) {
1711 dev_err(&pdev->dev, "Failed to create debugfs dir\n");
1712 goto err_dbg_create_dir;
1713 }
1714
1715 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1716 &mlxsw_pci_bus, mlxsw_pci);
1717 if (err) {
1718 dev_err(&pdev->dev, "cannot register bus device\n");
1719 goto err_bus_device_register;
1720 }
1721
1722 return 0;
1723
1724err_bus_device_register:
1725 debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1726err_dbg_create_dir:
1727 pci_disable_msix(mlxsw_pci->pdev);
1728err_msix_init:
1729err_sw_reset:
1730 iounmap(mlxsw_pci->hw_addr);
1731err_ioremap:
1732err_pci_resource_len_check:
1733err_pci_set_dma_mask:
1734 pci_release_regions(pdev);
1735err_pci_request_regions:
1736 pci_disable_device(pdev);
1737err_pci_enable_device:
1738 kfree(mlxsw_pci);
1739 return err;
1740}
1741
1742static void mlxsw_pci_remove(struct pci_dev *pdev)
1743{
1744 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1745
1746 mlxsw_core_bus_device_unregister(mlxsw_pci->core);
1747 debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1748 pci_disable_msix(mlxsw_pci->pdev);
1749 iounmap(mlxsw_pci->hw_addr);
1750 pci_release_regions(mlxsw_pci->pdev);
1751 pci_disable_device(mlxsw_pci->pdev);
1752 kfree(mlxsw_pci);
1753}
1754
1755static struct pci_driver mlxsw_pci_driver = {
1756 .name = mlxsw_pci_driver_name,
1757 .id_table = mlxsw_pci_id_table,
1758 .probe = mlxsw_pci_probe,
1759 .remove = mlxsw_pci_remove,
1760};
1761
1762static int __init mlxsw_pci_module_init(void)
1763{
1764 int err;
1765
1766 mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
1767 if (!mlxsw_pci_dbg_root)
1768 return -ENOMEM;
1769 err = pci_register_driver(&mlxsw_pci_driver);
1770 if (err)
1771 goto err_register_driver;
1772 return 0;
1773
1774err_register_driver:
1775 debugfs_remove_recursive(mlxsw_pci_dbg_root);
1776 return err;
1777}
1778
1779static void __exit mlxsw_pci_module_exit(void)
1780{
1781 pci_unregister_driver(&mlxsw_pci_driver);
1782 debugfs_remove_recursive(mlxsw_pci_dbg_root);
1783}
1784
1785module_init(mlxsw_pci_module_init);
1786module_exit(mlxsw_pci_module_exit);
1787
1788MODULE_LICENSE("Dual BSD/GPL");
1789MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1790MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1791MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);