blob: 3ec52ea10f83dc5b1a979c1b82b535f3c8156b04 [file] [log] [blame]
Jiri Pirkoeda65002015-07-29 23:33:47 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/export.h>
38#include <linux/err.h>
39#include <linux/device.h>
40#include <linux/pci.h>
41#include <linux/interrupt.h>
42#include <linux/wait.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/if_vlan.h>
46#include <linux/log2.h>
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49
50#include "pci.h"
51#include "core.h"
52#include "cmd.h"
53#include "port.h"
54
55static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
56
57static const struct pci_device_id mlxsw_pci_id_table[] = {
Jiri Pirko31557f02015-07-29 23:33:49 +020058 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
Jiri Pirkoeda65002015-07-29 23:33:47 +020059 {0, }
60};
61
62static struct dentry *mlxsw_pci_dbg_root;
63
64static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
65{
66 switch (id->device) {
Jiri Pirko31557f02015-07-29 23:33:49 +020067 case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
68 return MLXSW_DEVICE_KIND_SWITCHX2;
Jiri Pirkoeda65002015-07-29 23:33:47 +020069 default:
70 BUG();
71 }
72}
73
74#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
75 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
76#define mlxsw_pci_read32(mlxsw_pci, reg) \
77 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
78
79enum mlxsw_pci_queue_type {
80 MLXSW_PCI_QUEUE_TYPE_SDQ,
81 MLXSW_PCI_QUEUE_TYPE_RDQ,
82 MLXSW_PCI_QUEUE_TYPE_CQ,
83 MLXSW_PCI_QUEUE_TYPE_EQ,
84};
85
86static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
87{
88 switch (q_type) {
89 case MLXSW_PCI_QUEUE_TYPE_SDQ:
90 return "sdq";
91 case MLXSW_PCI_QUEUE_TYPE_RDQ:
92 return "rdq";
93 case MLXSW_PCI_QUEUE_TYPE_CQ:
94 return "cq";
95 case MLXSW_PCI_QUEUE_TYPE_EQ:
96 return "eq";
97 }
98 BUG();
99}
100
101#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
102
103static const u16 mlxsw_pci_doorbell_type_offset[] = {
104 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
105 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
106 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
107 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
108};
109
110static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
111 0, /* unused */
112 0, /* unused */
113 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
114 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
115};
116
117struct mlxsw_pci_mem_item {
118 char *buf;
119 dma_addr_t mapaddr;
120 size_t size;
121};
122
123struct mlxsw_pci_queue_elem_info {
124 char *elem; /* pointer to actual dma mapped element mem chunk */
125 union {
126 struct {
127 struct sk_buff *skb;
128 } sdq;
129 struct {
130 struct sk_buff *skb;
131 } rdq;
132 } u;
133};
134
135struct mlxsw_pci_queue {
136 spinlock_t lock; /* for queue accesses */
137 struct mlxsw_pci_mem_item mem_item;
138 struct mlxsw_pci_queue_elem_info *elem_info;
139 u16 producer_counter;
140 u16 consumer_counter;
141 u16 count; /* number of elements in queue */
142 u8 num; /* queue number */
143 u8 elem_size; /* size of one element */
144 enum mlxsw_pci_queue_type type;
145 struct tasklet_struct tasklet; /* queue processing tasklet */
146 struct mlxsw_pci *pci;
147 union {
148 struct {
149 u32 comp_sdq_count;
150 u32 comp_rdq_count;
151 } cq;
152 struct {
153 u32 ev_cmd_count;
154 u32 ev_comp_count;
155 u32 ev_other_count;
156 } eq;
157 } u;
158};
159
160struct mlxsw_pci_queue_type_group {
161 struct mlxsw_pci_queue *q;
162 u8 count; /* number of queues in group */
163};
164
165struct mlxsw_pci {
166 struct pci_dev *pdev;
167 u8 __iomem *hw_addr;
168 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
169 u32 doorbell_offset;
170 struct msix_entry msix_entry;
171 struct mlxsw_core *core;
172 struct {
173 u16 num_pages;
174 struct mlxsw_pci_mem_item *items;
175 } fw_area;
176 struct {
177 struct mutex lock; /* Lock access to command registers */
178 bool nopoll;
179 wait_queue_head_t wait;
180 bool wait_done;
181 struct {
182 u8 status;
183 u64 out_param;
184 } comp;
185 } cmd;
186 struct mlxsw_bus_info bus_info;
187 struct dentry *dbg_dir;
188};
189
190static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
191{
192 tasklet_schedule(&q->tasklet);
193}
194
195static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
196 size_t elem_size, int elem_index)
197{
198 return q->mem_item.buf + (elem_size * elem_index);
199}
200
201static struct mlxsw_pci_queue_elem_info *
202mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
203{
204 return &q->elem_info[elem_index];
205}
206
207static struct mlxsw_pci_queue_elem_info *
208mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
209{
210 int index = q->producer_counter & (q->count - 1);
211
212 if ((q->producer_counter - q->consumer_counter) == q->count)
213 return NULL;
214 return mlxsw_pci_queue_elem_info_get(q, index);
215}
216
217static struct mlxsw_pci_queue_elem_info *
218mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
219{
220 int index = q->consumer_counter & (q->count - 1);
221
222 return mlxsw_pci_queue_elem_info_get(q, index);
223}
224
225static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
226{
227 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
228}
229
230static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
231{
232 return owner_bit != !!(q->consumer_counter & q->count);
233}
234
235static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
236 u32 (*get_elem_owner_func)(char *))
237{
238 struct mlxsw_pci_queue_elem_info *elem_info;
239 char *elem;
240 bool owner_bit;
241
242 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
243 elem = elem_info->elem;
244 owner_bit = get_elem_owner_func(elem);
245 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
246 return NULL;
247 q->consumer_counter++;
248 rmb(); /* make sure we read owned bit before the rest of elem */
249 return elem;
250}
251
252static struct mlxsw_pci_queue_type_group *
253mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
254 enum mlxsw_pci_queue_type q_type)
255{
256 return &mlxsw_pci->queues[q_type];
257}
258
259static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
260 enum mlxsw_pci_queue_type q_type)
261{
262 struct mlxsw_pci_queue_type_group *queue_group;
263
264 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
265 return queue_group->count;
266}
267
268static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
269{
270 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
271}
272
273static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
274{
275 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
276}
277
278static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
279{
280 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
281}
282
283static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
284{
285 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
286}
287
288static struct mlxsw_pci_queue *
289__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
290 enum mlxsw_pci_queue_type q_type, u8 q_num)
291{
292 return &mlxsw_pci->queues[q_type].q[q_num];
293}
294
295static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
296 u8 q_num)
297{
298 return __mlxsw_pci_queue_get(mlxsw_pci,
299 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
300}
301
302static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
303 u8 q_num)
304{
305 return __mlxsw_pci_queue_get(mlxsw_pci,
306 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
307}
308
309static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
310 u8 q_num)
311{
312 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
313}
314
315static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
316 u8 q_num)
317{
318 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
319}
320
321static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
322 struct mlxsw_pci_queue *q,
323 u16 val)
324{
325 mlxsw_pci_write32(mlxsw_pci,
326 DOORBELL(mlxsw_pci->doorbell_offset,
327 mlxsw_pci_doorbell_type_offset[q->type],
328 q->num), val);
329}
330
331static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
332 struct mlxsw_pci_queue *q,
333 u16 val)
334{
335 mlxsw_pci_write32(mlxsw_pci,
336 DOORBELL(mlxsw_pci->doorbell_offset,
337 mlxsw_pci_doorbell_arm_type_offset[q->type],
338 q->num), val);
339}
340
341static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
342 struct mlxsw_pci_queue *q)
343{
344 wmb(); /* ensure all writes are done before we ring a bell */
345 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
346}
347
348static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
349 struct mlxsw_pci_queue *q)
350{
351 wmb(); /* ensure all writes are done before we ring a bell */
352 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
353 q->consumer_counter + q->count);
354}
355
356static void
357mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
358 struct mlxsw_pci_queue *q)
359{
360 wmb(); /* ensure all writes are done before we ring a bell */
361 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
362}
363
364static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
365 int page_index)
366{
367 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
368}
369
370static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
371 struct mlxsw_pci_queue *q)
372{
373 int i;
374 int err;
375
376 q->producer_counter = 0;
377 q->consumer_counter = 0;
378
379 /* Set CQ of same number of this SDQ. */
380 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
381 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
382 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
383 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
384 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
385
386 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
387 }
388
389 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
390 if (err)
391 return err;
392 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
393 return 0;
394}
395
396static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
397 struct mlxsw_pci_queue *q)
398{
399 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
400}
401
402static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
403{
404 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
405 struct mlxsw_pci_queue *q;
406 int i;
407 static const char hdr[] =
408 "NUM PROD_COUNT CONS_COUNT COUNT\n";
409
410 seq_printf(file, hdr);
411 for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
412 q = mlxsw_pci_sdq_get(mlxsw_pci, i);
413 spin_lock_bh(&q->lock);
414 seq_printf(file, "%3d %10d %10d %5d\n",
415 i, q->producer_counter, q->consumer_counter,
416 q->count);
417 spin_unlock_bh(&q->lock);
418 }
419 return 0;
420}
421
422static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
423 int index, char *frag_data, size_t frag_len,
424 int direction)
425{
426 struct pci_dev *pdev = mlxsw_pci->pdev;
427 dma_addr_t mapaddr;
428
429 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
430 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
431 if (net_ratelimit())
432 dev_err(&pdev->dev, "failed to dma map tx frag\n");
433 return -EIO;
434 }
435 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
436 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
437 return 0;
438}
439
440static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
441 int index, int direction)
442{
443 struct pci_dev *pdev = mlxsw_pci->pdev;
444 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
445 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
446
447 if (!frag_len)
448 return;
449 pci_unmap_single(pdev, mapaddr, frag_len, direction);
450}
451
452static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
453 struct mlxsw_pci_queue_elem_info *elem_info)
454{
455 size_t buf_len = MLXSW_PORT_MAX_MTU;
456 char *wqe = elem_info->elem;
457 struct sk_buff *skb;
458 int err;
459
460 elem_info->u.rdq.skb = NULL;
461 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
462 if (!skb)
463 return -ENOMEM;
464
465 /* Assume that wqe was previously zeroed. */
466
467 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
468 buf_len, DMA_FROM_DEVICE);
469 if (err)
470 goto err_frag_map;
471
472 elem_info->u.rdq.skb = skb;
473 return 0;
474
475err_frag_map:
476 dev_kfree_skb_any(skb);
477 return err;
478}
479
480static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
481 struct mlxsw_pci_queue_elem_info *elem_info)
482{
483 struct sk_buff *skb;
484 char *wqe;
485
486 skb = elem_info->u.rdq.skb;
487 wqe = elem_info->elem;
488
489 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
490 dev_kfree_skb_any(skb);
491}
492
493static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
494 struct mlxsw_pci_queue *q)
495{
496 struct mlxsw_pci_queue_elem_info *elem_info;
497 int i;
498 int err;
499
500 q->producer_counter = 0;
501 q->consumer_counter = 0;
502
503 /* Set CQ of same number of this RDQ with base
504 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
505 */
506 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
507 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
508 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
509 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
510
511 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
512 }
513
514 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
515 if (err)
516 return err;
517
518 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
519
520 for (i = 0; i < q->count; i++) {
521 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
522 BUG_ON(!elem_info);
523 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
524 if (err)
525 goto rollback;
526 /* Everything is set up, ring doorbell to pass elem to HW */
527 q->producer_counter++;
528 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
529 }
530
531 return 0;
532
533rollback:
534 for (i--; i >= 0; i--) {
535 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
536 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
537 }
538 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
539
540 return err;
541}
542
543static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
544 struct mlxsw_pci_queue *q)
545{
546 struct mlxsw_pci_queue_elem_info *elem_info;
547 int i;
548
549 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
550 for (i = 0; i < q->count; i++) {
551 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
552 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
553 }
554}
555
556static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
557{
558 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
559 struct mlxsw_pci_queue *q;
560 int i;
561 static const char hdr[] =
562 "NUM PROD_COUNT CONS_COUNT COUNT\n";
563
564 seq_printf(file, hdr);
565 for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
566 q = mlxsw_pci_rdq_get(mlxsw_pci, i);
567 spin_lock_bh(&q->lock);
568 seq_printf(file, "%3d %10d %10d %5d\n",
569 i, q->producer_counter, q->consumer_counter,
570 q->count);
571 spin_unlock_bh(&q->lock);
572 }
573 return 0;
574}
575
576static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
577 struct mlxsw_pci_queue *q)
578{
579 int i;
580 int err;
581
582 q->consumer_counter = 0;
583
584 for (i = 0; i < q->count; i++) {
585 char *elem = mlxsw_pci_queue_elem_get(q, i);
586
587 mlxsw_pci_cqe_owner_set(elem, 1);
588 }
589
590 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
591 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
592 mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
593 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
594 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
595 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
596 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
597
598 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
599 }
600 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
601 if (err)
602 return err;
603 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
604 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
605 return 0;
606}
607
608static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
609 struct mlxsw_pci_queue *q)
610{
611 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
612}
613
614static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
615{
616 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
617
618 struct mlxsw_pci_queue *q;
619 int i;
620 static const char hdr[] =
621 "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
622
623 seq_printf(file, hdr);
624 for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
625 q = mlxsw_pci_cq_get(mlxsw_pci, i);
626 spin_lock_bh(&q->lock);
627 seq_printf(file, "%3d %10d %10d %10d %5d\n",
628 i, q->consumer_counter, q->u.cq.comp_sdq_count,
629 q->u.cq.comp_rdq_count, q->count);
630 spin_unlock_bh(&q->lock);
631 }
632 return 0;
633}
634
635static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
636 struct mlxsw_pci_queue *q,
637 u16 consumer_counter_limit,
638 char *cqe)
639{
640 struct pci_dev *pdev = mlxsw_pci->pdev;
641 struct mlxsw_pci_queue_elem_info *elem_info;
642 char *wqe;
643 struct sk_buff *skb;
644 int i;
645
646 spin_lock(&q->lock);
647 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
648 skb = elem_info->u.sdq.skb;
649 wqe = elem_info->elem;
650 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
651 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
652 dev_kfree_skb_any(skb);
653 elem_info->u.sdq.skb = NULL;
654
655 if (q->consumer_counter++ != consumer_counter_limit)
656 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
657 spin_unlock(&q->lock);
658}
659
660static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
661 struct mlxsw_pci_queue *q,
662 u16 consumer_counter_limit,
663 char *cqe)
664{
665 struct pci_dev *pdev = mlxsw_pci->pdev;
666 struct mlxsw_pci_queue_elem_info *elem_info;
667 char *wqe;
668 struct sk_buff *skb;
669 struct mlxsw_rx_info rx_info;
Jiri Pirko7b7b9cf2015-08-06 16:41:55 +0200670 u16 byte_count;
Jiri Pirkoeda65002015-07-29 23:33:47 +0200671 int err;
672
673 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
674 skb = elem_info->u.sdq.skb;
675 if (!skb)
676 return;
677 wqe = elem_info->elem;
678 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
679
680 if (q->consumer_counter++ != consumer_counter_limit)
681 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
682
683 /* We do not support lag now */
684 if (mlxsw_pci_cqe_lag_get(cqe))
685 goto drop;
686
687 rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
688 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
689
Jiri Pirko7b7b9cf2015-08-06 16:41:55 +0200690 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
691 if (mlxsw_pci_cqe_crc_get(cqe))
692 byte_count -= ETH_FCS_LEN;
693 skb_put(skb, byte_count);
Jiri Pirkoeda65002015-07-29 23:33:47 +0200694 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
695
696put_new_skb:
697 memset(wqe, 0, q->elem_size);
698 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
699 if (err && net_ratelimit())
700 dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
701 /* Everything is set up, ring doorbell to pass elem to HW */
702 q->producer_counter++;
703 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
704 return;
705
706drop:
707 dev_kfree_skb_any(skb);
708 goto put_new_skb;
709}
710
711static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
712{
713 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
714}
715
716static void mlxsw_pci_cq_tasklet(unsigned long data)
717{
718 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
719 struct mlxsw_pci *mlxsw_pci = q->pci;
720 char *cqe;
721 int items = 0;
722 int credits = q->count >> 1;
723
724 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
725 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
726 u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
727 u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
728
729 if (sendq) {
730 struct mlxsw_pci_queue *sdq;
731
732 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
733 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
734 wqe_counter, cqe);
735 q->u.cq.comp_sdq_count++;
736 } else {
737 struct mlxsw_pci_queue *rdq;
738
739 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
740 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
741 wqe_counter, cqe);
742 q->u.cq.comp_rdq_count++;
743 }
744 if (++items == credits)
745 break;
746 }
747 if (items) {
748 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
749 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
750 }
751}
752
753static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
754 struct mlxsw_pci_queue *q)
755{
756 int i;
757 int err;
758
759 q->consumer_counter = 0;
760
761 for (i = 0; i < q->count; i++) {
762 char *elem = mlxsw_pci_queue_elem_get(q, i);
763
764 mlxsw_pci_eqe_owner_set(elem, 1);
765 }
766
767 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
768 mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
769 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
770 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
771 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
772 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
773
774 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
775 }
776 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
777 if (err)
778 return err;
779 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
780 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
781 return 0;
782}
783
784static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
785 struct mlxsw_pci_queue *q)
786{
787 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
788}
789
790static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
791{
792 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
793 struct mlxsw_pci_queue *q;
794 int i;
795 static const char hdr[] =
796 "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
797
798 seq_printf(file, hdr);
799 for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
800 q = mlxsw_pci_eq_get(mlxsw_pci, i);
801 spin_lock_bh(&q->lock);
802 seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
803 i, q->consumer_counter, q->u.eq.ev_cmd_count,
804 q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
805 q->count);
806 spin_unlock_bh(&q->lock);
807 }
808 return 0;
809}
810
811static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
812{
813 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
814 mlxsw_pci->cmd.comp.out_param =
815 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
816 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
817 mlxsw_pci->cmd.wait_done = true;
818 wake_up(&mlxsw_pci->cmd.wait);
819}
820
821static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
822{
823 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
824}
825
826static void mlxsw_pci_eq_tasklet(unsigned long data)
827{
828 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
829 struct mlxsw_pci *mlxsw_pci = q->pci;
830 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
831 char *eqe;
832 u8 cqn;
833 bool cq_handle = false;
834 int items = 0;
835 int credits = q->count >> 1;
836
837 memset(&active_cqns, 0, sizeof(active_cqns));
838
839 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
840 u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
841
842 switch (event_type) {
843 case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
844 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
845 q->u.eq.ev_cmd_count++;
846 break;
847 case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
848 cqn = mlxsw_pci_eqe_cqn_get(eqe);
849 set_bit(cqn, active_cqns);
850 cq_handle = true;
851 q->u.eq.ev_comp_count++;
852 break;
853 default:
854 q->u.eq.ev_other_count++;
855 }
856 if (++items == credits)
857 break;
858 }
859 if (items) {
860 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
861 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
862 }
863
864 if (!cq_handle)
865 return;
866 for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
867 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
868 mlxsw_pci_queue_tasklet_schedule(q);
869 }
870}
871
872struct mlxsw_pci_queue_ops {
873 const char *name;
874 enum mlxsw_pci_queue_type type;
875 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
876 struct mlxsw_pci_queue *q);
877 void (*fini)(struct mlxsw_pci *mlxsw_pci,
878 struct mlxsw_pci_queue *q);
879 void (*tasklet)(unsigned long data);
880 int (*dbg_read)(struct seq_file *s, void *data);
881 u16 elem_count;
882 u8 elem_size;
883};
884
885static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
886 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
887 .init = mlxsw_pci_sdq_init,
888 .fini = mlxsw_pci_sdq_fini,
889 .dbg_read = mlxsw_pci_sdq_dbg_read,
890 .elem_count = MLXSW_PCI_WQE_COUNT,
891 .elem_size = MLXSW_PCI_WQE_SIZE,
892};
893
894static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
895 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
896 .init = mlxsw_pci_rdq_init,
897 .fini = mlxsw_pci_rdq_fini,
898 .dbg_read = mlxsw_pci_rdq_dbg_read,
899 .elem_count = MLXSW_PCI_WQE_COUNT,
900 .elem_size = MLXSW_PCI_WQE_SIZE
901};
902
903static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
904 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
905 .init = mlxsw_pci_cq_init,
906 .fini = mlxsw_pci_cq_fini,
907 .tasklet = mlxsw_pci_cq_tasklet,
908 .dbg_read = mlxsw_pci_cq_dbg_read,
909 .elem_count = MLXSW_PCI_CQE_COUNT,
910 .elem_size = MLXSW_PCI_CQE_SIZE
911};
912
913static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
914 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
915 .init = mlxsw_pci_eq_init,
916 .fini = mlxsw_pci_eq_fini,
917 .tasklet = mlxsw_pci_eq_tasklet,
918 .dbg_read = mlxsw_pci_eq_dbg_read,
919 .elem_count = MLXSW_PCI_EQE_COUNT,
920 .elem_size = MLXSW_PCI_EQE_SIZE
921};
922
923static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
924 const struct mlxsw_pci_queue_ops *q_ops,
925 struct mlxsw_pci_queue *q, u8 q_num)
926{
927 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
928 int i;
929 int err;
930
931 spin_lock_init(&q->lock);
932 q->num = q_num;
933 q->count = q_ops->elem_count;
934 q->elem_size = q_ops->elem_size;
935 q->type = q_ops->type;
936 q->pci = mlxsw_pci;
937
938 if (q_ops->tasklet)
939 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
940
941 mem_item->size = MLXSW_PCI_AQ_SIZE;
942 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
943 mem_item->size,
944 &mem_item->mapaddr);
945 if (!mem_item->buf)
946 return -ENOMEM;
947 memset(mem_item->buf, 0, mem_item->size);
948
949 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
950 if (!q->elem_info) {
951 err = -ENOMEM;
952 goto err_elem_info_alloc;
953 }
954
955 /* Initialize dma mapped elements info elem_info for
956 * future easy access.
957 */
958 for (i = 0; i < q->count; i++) {
959 struct mlxsw_pci_queue_elem_info *elem_info;
960
961 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
962 elem_info->elem =
963 __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
964 }
965
966 mlxsw_cmd_mbox_zero(mbox);
967 err = q_ops->init(mlxsw_pci, mbox, q);
968 if (err)
969 goto err_q_ops_init;
970 return 0;
971
972err_q_ops_init:
973 kfree(q->elem_info);
974err_elem_info_alloc:
975 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
976 mem_item->buf, mem_item->mapaddr);
977 return err;
978}
979
980static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
981 const struct mlxsw_pci_queue_ops *q_ops,
982 struct mlxsw_pci_queue *q)
983{
984 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
985
986 q_ops->fini(mlxsw_pci, q);
987 kfree(q->elem_info);
988 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
989 mem_item->buf, mem_item->mapaddr);
990}
991
992static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
993 const struct mlxsw_pci_queue_ops *q_ops,
994 u8 num_qs)
995{
996 struct pci_dev *pdev = mlxsw_pci->pdev;
997 struct mlxsw_pci_queue_type_group *queue_group;
998 char tmp[16];
999 int i;
1000 int err;
1001
1002 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1003 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1004 if (!queue_group->q)
1005 return -ENOMEM;
1006
1007 for (i = 0; i < num_qs; i++) {
1008 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1009 &queue_group->q[i], i);
1010 if (err)
1011 goto err_queue_init;
1012 }
1013 queue_group->count = num_qs;
1014
1015 sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
1016 debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
1017 q_ops->dbg_read);
1018
1019 return 0;
1020
1021err_queue_init:
1022 for (i--; i >= 0; i--)
1023 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1024 kfree(queue_group->q);
1025 return err;
1026}
1027
1028static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1029 const struct mlxsw_pci_queue_ops *q_ops)
1030{
1031 struct mlxsw_pci_queue_type_group *queue_group;
1032 int i;
1033
1034 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1035 for (i = 0; i < queue_group->count; i++)
1036 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1037 kfree(queue_group->q);
1038}
1039
1040static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1041{
1042 struct pci_dev *pdev = mlxsw_pci->pdev;
1043 u8 num_sdqs;
1044 u8 sdq_log2sz;
1045 u8 num_rdqs;
1046 u8 rdq_log2sz;
1047 u8 num_cqs;
1048 u8 cq_log2sz;
1049 u8 num_eqs;
1050 u8 eq_log2sz;
1051 int err;
1052
1053 mlxsw_cmd_mbox_zero(mbox);
1054 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1055 if (err)
1056 return err;
1057
1058 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1059 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1060 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1061 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1062 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1063 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1064 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1065 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1066
1067 if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
1068 (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
1069 (num_cqs != MLXSW_PCI_CQS_COUNT) ||
1070 (num_eqs != MLXSW_PCI_EQS_COUNT)) {
1071 dev_err(&pdev->dev, "Unsupported number of queues\n");
1072 return -EINVAL;
1073 }
1074
1075 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1076 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1077 (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
1078 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1079 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1080 return -EINVAL;
1081 }
1082
1083 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1084 num_eqs);
1085 if (err) {
1086 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1087 return err;
1088 }
1089
1090 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1091 num_cqs);
1092 if (err) {
1093 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1094 goto err_cqs_init;
1095 }
1096
1097 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1098 num_sdqs);
1099 if (err) {
1100 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1101 goto err_sdqs_init;
1102 }
1103
1104 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1105 num_rdqs);
1106 if (err) {
1107 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1108 goto err_rdqs_init;
1109 }
1110
1111 /* We have to poll in command interface until queues are initialized */
1112 mlxsw_pci->cmd.nopoll = true;
1113 return 0;
1114
1115err_rdqs_init:
1116 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1117err_sdqs_init:
1118 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1119err_cqs_init:
1120 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1121 return err;
1122}
1123
1124static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1125{
1126 mlxsw_pci->cmd.nopoll = false;
1127 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1128 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1129 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1130 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1131}
1132
1133static void
1134mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1135 char *mbox, int index,
1136 const struct mlxsw_swid_config *swid)
1137{
1138 u8 mask = 0;
1139
1140 if (swid->used_type) {
1141 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1142 mbox, index, swid->type);
1143 mask |= 1;
1144 }
1145 if (swid->used_properties) {
1146 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1147 mbox, index, swid->properties);
1148 mask |= 2;
1149 }
1150 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1151}
1152
1153static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1154 const struct mlxsw_config_profile *profile)
1155{
1156 int i;
1157
1158 mlxsw_cmd_mbox_zero(mbox);
1159
1160 if (profile->used_max_vepa_channels) {
1161 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1162 mbox, 1);
1163 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1164 mbox, profile->max_vepa_channels);
1165 }
1166 if (profile->used_max_lag) {
1167 mlxsw_cmd_mbox_config_profile_set_max_lag_set(
1168 mbox, 1);
1169 mlxsw_cmd_mbox_config_profile_max_lag_set(
1170 mbox, profile->max_lag);
1171 }
1172 if (profile->used_max_port_per_lag) {
1173 mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
1174 mbox, 1);
1175 mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
1176 mbox, profile->max_port_per_lag);
1177 }
1178 if (profile->used_max_mid) {
1179 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1180 mbox, 1);
1181 mlxsw_cmd_mbox_config_profile_max_mid_set(
1182 mbox, profile->max_mid);
1183 }
1184 if (profile->used_max_pgt) {
1185 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1186 mbox, 1);
1187 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1188 mbox, profile->max_pgt);
1189 }
1190 if (profile->used_max_system_port) {
1191 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1192 mbox, 1);
1193 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1194 mbox, profile->max_system_port);
1195 }
1196 if (profile->used_max_vlan_groups) {
1197 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1198 mbox, 1);
1199 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1200 mbox, profile->max_vlan_groups);
1201 }
1202 if (profile->used_max_regions) {
1203 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1204 mbox, 1);
1205 mlxsw_cmd_mbox_config_profile_max_regions_set(
1206 mbox, profile->max_regions);
1207 }
1208 if (profile->used_flood_tables) {
1209 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1210 mbox, 1);
1211 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1212 mbox, profile->max_flood_tables);
1213 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1214 mbox, profile->max_vid_flood_tables);
1215 }
1216 if (profile->used_flood_mode) {
1217 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1218 mbox, 1);
1219 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1220 mbox, profile->flood_mode);
1221 }
1222 if (profile->used_max_ib_mc) {
1223 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1224 mbox, 1);
1225 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1226 mbox, profile->max_ib_mc);
1227 }
1228 if (profile->used_max_pkey) {
1229 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1230 mbox, 1);
1231 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1232 mbox, profile->max_pkey);
1233 }
1234 if (profile->used_ar_sec) {
1235 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1236 mbox, 1);
1237 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1238 mbox, profile->ar_sec);
1239 }
1240 if (profile->used_adaptive_routing_group_cap) {
1241 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1242 mbox, 1);
1243 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1244 mbox, profile->adaptive_routing_group_cap);
1245 }
1246
1247 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1248 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1249 &profile->swid_config[i]);
1250
1251 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1252}
1253
1254static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1255{
1256 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1257 int err;
1258
1259 mlxsw_cmd_mbox_zero(mbox);
1260 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1261 if (err)
1262 return err;
1263 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1264 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1265 return 0;
1266}
1267
1268static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1269 u16 num_pages)
1270{
1271 struct mlxsw_pci_mem_item *mem_item;
1272 int i;
1273 int err;
1274
1275 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1276 GFP_KERNEL);
1277 if (!mlxsw_pci->fw_area.items)
1278 return -ENOMEM;
1279 mlxsw_pci->fw_area.num_pages = num_pages;
1280
1281 mlxsw_cmd_mbox_zero(mbox);
1282 for (i = 0; i < num_pages; i++) {
1283 mem_item = &mlxsw_pci->fw_area.items[i];
1284
1285 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1286 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1287 mem_item->size,
1288 &mem_item->mapaddr);
1289 if (!mem_item->buf) {
1290 err = -ENOMEM;
1291 goto err_alloc;
1292 }
1293 mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
1294 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
1295 }
1296
1297 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
1298 if (err)
1299 goto err_cmd_map_fa;
1300
1301 return 0;
1302
1303err_cmd_map_fa:
1304err_alloc:
1305 for (i--; i >= 0; i--) {
1306 mem_item = &mlxsw_pci->fw_area.items[i];
1307
1308 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1309 mem_item->buf, mem_item->mapaddr);
1310 }
1311 kfree(mlxsw_pci->fw_area.items);
1312 return err;
1313}
1314
1315static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1316{
1317 struct mlxsw_pci_mem_item *mem_item;
1318 int i;
1319
1320 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1321
1322 for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
1323 mem_item = &mlxsw_pci->fw_area.items[i];
1324
1325 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1326 mem_item->buf, mem_item->mapaddr);
1327 }
1328 kfree(mlxsw_pci->fw_area.items);
1329}
1330
1331static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1332{
1333 struct mlxsw_pci *mlxsw_pci = dev_id;
1334 struct mlxsw_pci_queue *q;
1335 int i;
1336
1337 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1338 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1339 mlxsw_pci_queue_tasklet_schedule(q);
1340 }
1341 return IRQ_HANDLED;
1342}
1343
1344static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1345 const struct mlxsw_config_profile *profile)
1346{
1347 struct mlxsw_pci *mlxsw_pci = bus_priv;
1348 struct pci_dev *pdev = mlxsw_pci->pdev;
1349 char *mbox;
1350 u16 num_pages;
1351 int err;
1352
1353 mutex_init(&mlxsw_pci->cmd.lock);
1354 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1355
1356 mlxsw_pci->core = mlxsw_core;
1357
1358 mbox = mlxsw_cmd_mbox_alloc();
1359 if (!mbox)
1360 return -ENOMEM;
1361 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1362 if (err)
1363 goto err_query_fw;
1364
1365 mlxsw_pci->bus_info.fw_rev.major =
1366 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1367 mlxsw_pci->bus_info.fw_rev.minor =
1368 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1369 mlxsw_pci->bus_info.fw_rev.subminor =
1370 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1371
1372 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1373 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1374 err = -EINVAL;
1375 goto err_iface_rev;
1376 }
1377 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1378 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1379 err = -EINVAL;
1380 goto err_doorbell_page_bar;
1381 }
1382
1383 mlxsw_pci->doorbell_offset =
1384 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1385
1386 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1387 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1388 if (err)
1389 goto err_fw_area_init;
1390
1391 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1392 if (err)
1393 goto err_boardinfo;
1394
1395 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
1396 if (err)
1397 goto err_config_profile;
1398
1399 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1400 if (err)
1401 goto err_aqs_init;
1402
1403 err = request_irq(mlxsw_pci->msix_entry.vector,
1404 mlxsw_pci_eq_irq_handler, 0,
1405 mlxsw_pci_driver_name, mlxsw_pci);
1406 if (err) {
1407 dev_err(&pdev->dev, "IRQ request failed\n");
1408 goto err_request_eq_irq;
1409 }
1410
1411 goto mbox_put;
1412
1413err_request_eq_irq:
1414 mlxsw_pci_aqs_fini(mlxsw_pci);
1415err_aqs_init:
1416err_config_profile:
1417err_boardinfo:
1418 mlxsw_pci_fw_area_fini(mlxsw_pci);
1419err_fw_area_init:
1420err_doorbell_page_bar:
1421err_iface_rev:
1422err_query_fw:
1423mbox_put:
1424 mlxsw_cmd_mbox_free(mbox);
1425 return err;
1426}
1427
1428static void mlxsw_pci_fini(void *bus_priv)
1429{
1430 struct mlxsw_pci *mlxsw_pci = bus_priv;
1431
1432 free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
1433 mlxsw_pci_aqs_fini(mlxsw_pci);
1434 mlxsw_pci_fw_area_fini(mlxsw_pci);
1435}
1436
1437static struct mlxsw_pci_queue *
1438mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1439 const struct mlxsw_tx_info *tx_info)
1440{
1441 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1442
1443 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1444}
1445
1446static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1447 const struct mlxsw_tx_info *tx_info)
1448{
1449 struct mlxsw_pci *mlxsw_pci = bus_priv;
1450 struct mlxsw_pci_queue *q;
1451 struct mlxsw_pci_queue_elem_info *elem_info;
1452 char *wqe;
1453 int i;
1454 int err;
1455
1456 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1457 err = skb_linearize(skb);
1458 if (err)
1459 return err;
1460 }
1461
1462 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1463 spin_lock_bh(&q->lock);
1464 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1465 if (!elem_info) {
1466 /* queue is full */
1467 err = -EAGAIN;
1468 goto unlock;
1469 }
1470 elem_info->u.sdq.skb = skb;
1471
1472 wqe = elem_info->elem;
1473 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1474 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1475 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1476
1477 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1478 skb_headlen(skb), DMA_TO_DEVICE);
1479 if (err)
1480 goto unlock;
1481
1482 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1483 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1484
1485 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1486 skb_frag_address(frag),
1487 skb_frag_size(frag),
1488 DMA_TO_DEVICE);
1489 if (err)
1490 goto unmap_frags;
1491 }
1492
1493 /* Set unused sq entries byte count to zero. */
1494 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1495 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1496
1497 /* Everything is set up, ring producer doorbell to get HW going */
1498 q->producer_counter++;
1499 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1500
1501 goto unlock;
1502
1503unmap_frags:
1504 for (; i >= 0; i--)
1505 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1506unlock:
1507 spin_unlock_bh(&q->lock);
1508 return err;
1509}
1510
1511static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1512 u32 in_mod, bool out_mbox_direct,
1513 char *in_mbox, size_t in_mbox_size,
1514 char *out_mbox, size_t out_mbox_size,
1515 u8 *p_status)
1516{
1517 struct mlxsw_pci *mlxsw_pci = bus_priv;
1518 dma_addr_t in_mapaddr = 0;
1519 dma_addr_t out_mapaddr = 0;
1520 bool evreq = mlxsw_pci->cmd.nopoll;
1521 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1522 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1523 int err;
1524
1525 *p_status = MLXSW_CMD_STATUS_OK;
1526
1527 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1528 if (err)
1529 return err;
1530
1531 if (in_mbox) {
1532 in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
1533 in_mbox_size, PCI_DMA_TODEVICE);
1534 if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
1535 in_mapaddr))) {
1536 err = -EIO;
1537 goto err_in_mbox_map;
1538 }
1539 }
1540 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
1541 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
1542
1543 if (out_mbox) {
1544 out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
1545 out_mbox_size, PCI_DMA_FROMDEVICE);
1546 if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
1547 out_mapaddr))) {
1548 err = -EIO;
1549 goto err_out_mbox_map;
1550 }
1551 }
1552 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
1553 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
1554
1555 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1556 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1557
1558 *p_wait_done = false;
1559
1560 wmb(); /* all needs to be written before we write control register */
1561 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1562 MLXSW_PCI_CIR_CTRL_GO_BIT |
1563 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1564 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1565 opcode);
1566
1567 if (!evreq) {
1568 unsigned long end;
1569
1570 end = jiffies + timeout;
1571 do {
1572 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1573
1574 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1575 *p_wait_done = true;
1576 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1577 break;
1578 }
1579 cond_resched();
1580 } while (time_before(jiffies, end));
1581 } else {
1582 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1583 *p_status = mlxsw_pci->cmd.comp.status;
1584 }
1585
1586 err = 0;
1587 if (*p_wait_done) {
1588 if (*p_status)
1589 err = -EIO;
1590 } else {
1591 err = -ETIMEDOUT;
1592 }
1593
1594 if (!err && out_mbox && out_mbox_direct) {
1595 /* Some commands does not use output param as address to mailbox
1596 * but they store output directly into registers. In that case,
1597 * copy registers into mbox buffer.
1598 */
1599 __be32 tmp;
1600
1601 if (!evreq) {
1602 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1603 CIR_OUT_PARAM_HI));
1604 memcpy(out_mbox, &tmp, sizeof(tmp));
1605 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1606 CIR_OUT_PARAM_LO));
1607 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1608 }
1609 }
1610
1611 if (out_mapaddr)
1612 pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
1613 PCI_DMA_FROMDEVICE);
1614
1615 /* fall through */
1616
1617err_out_mbox_map:
1618 if (in_mapaddr)
1619 pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
1620 PCI_DMA_TODEVICE);
1621err_in_mbox_map:
1622 mutex_unlock(&mlxsw_pci->cmd.lock);
1623
1624 return err;
1625}
1626
1627static const struct mlxsw_bus mlxsw_pci_bus = {
1628 .kind = "pci",
1629 .init = mlxsw_pci_init,
1630 .fini = mlxsw_pci_fini,
1631 .skb_transmit = mlxsw_pci_skb_transmit,
1632 .cmd_exec = mlxsw_pci_cmd_exec,
1633};
1634
1635static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
1636{
1637 mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
1638 /* Current firware does not let us know when the reset is done.
1639 * So we just wait here for constant time and hope for the best.
1640 */
1641 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1642 return 0;
1643}
1644
1645static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1646{
1647 struct mlxsw_pci *mlxsw_pci;
1648 int err;
1649
1650 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1651 if (!mlxsw_pci)
1652 return -ENOMEM;
1653
1654 err = pci_enable_device(pdev);
1655 if (err) {
1656 dev_err(&pdev->dev, "pci_enable_device failed\n");
1657 goto err_pci_enable_device;
1658 }
1659
1660 err = pci_request_regions(pdev, mlxsw_pci_driver_name);
1661 if (err) {
1662 dev_err(&pdev->dev, "pci_request_regions failed\n");
1663 goto err_pci_request_regions;
1664 }
1665
1666 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1667 if (!err) {
1668 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1669 if (err) {
1670 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1671 goto err_pci_set_dma_mask;
1672 }
1673 } else {
1674 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1675 if (err) {
1676 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1677 goto err_pci_set_dma_mask;
1678 }
1679 }
1680
1681 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1682 dev_err(&pdev->dev, "invalid PCI region size\n");
1683 err = -EINVAL;
1684 goto err_pci_resource_len_check;
1685 }
1686
1687 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1688 pci_resource_len(pdev, 0));
1689 if (!mlxsw_pci->hw_addr) {
1690 dev_err(&pdev->dev, "ioremap failed\n");
1691 err = -EIO;
1692 goto err_ioremap;
1693 }
1694 pci_set_master(pdev);
1695
1696 mlxsw_pci->pdev = pdev;
1697 pci_set_drvdata(pdev, mlxsw_pci);
1698
1699 err = mlxsw_pci_sw_reset(mlxsw_pci);
1700 if (err) {
1701 dev_err(&pdev->dev, "Software reset failed\n");
1702 goto err_sw_reset;
1703 }
1704
1705 err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
1706 if (err) {
1707 dev_err(&pdev->dev, "MSI-X init failed\n");
1708 goto err_msix_init;
1709 }
1710
1711 mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
1712 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1713 mlxsw_pci->bus_info.dev = &pdev->dev;
1714
1715 mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
1716 mlxsw_pci_dbg_root);
1717 if (!mlxsw_pci->dbg_dir) {
1718 dev_err(&pdev->dev, "Failed to create debugfs dir\n");
1719 goto err_dbg_create_dir;
1720 }
1721
1722 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1723 &mlxsw_pci_bus, mlxsw_pci);
1724 if (err) {
1725 dev_err(&pdev->dev, "cannot register bus device\n");
1726 goto err_bus_device_register;
1727 }
1728
1729 return 0;
1730
1731err_bus_device_register:
1732 debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1733err_dbg_create_dir:
1734 pci_disable_msix(mlxsw_pci->pdev);
1735err_msix_init:
1736err_sw_reset:
1737 iounmap(mlxsw_pci->hw_addr);
1738err_ioremap:
1739err_pci_resource_len_check:
1740err_pci_set_dma_mask:
1741 pci_release_regions(pdev);
1742err_pci_request_regions:
1743 pci_disable_device(pdev);
1744err_pci_enable_device:
1745 kfree(mlxsw_pci);
1746 return err;
1747}
1748
1749static void mlxsw_pci_remove(struct pci_dev *pdev)
1750{
1751 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1752
1753 mlxsw_core_bus_device_unregister(mlxsw_pci->core);
1754 debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1755 pci_disable_msix(mlxsw_pci->pdev);
1756 iounmap(mlxsw_pci->hw_addr);
1757 pci_release_regions(mlxsw_pci->pdev);
1758 pci_disable_device(mlxsw_pci->pdev);
1759 kfree(mlxsw_pci);
1760}
1761
1762static struct pci_driver mlxsw_pci_driver = {
1763 .name = mlxsw_pci_driver_name,
1764 .id_table = mlxsw_pci_id_table,
1765 .probe = mlxsw_pci_probe,
1766 .remove = mlxsw_pci_remove,
1767};
1768
1769static int __init mlxsw_pci_module_init(void)
1770{
1771 int err;
1772
1773 mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
1774 if (!mlxsw_pci_dbg_root)
1775 return -ENOMEM;
1776 err = pci_register_driver(&mlxsw_pci_driver);
1777 if (err)
1778 goto err_register_driver;
1779 return 0;
1780
1781err_register_driver:
1782 debugfs_remove_recursive(mlxsw_pci_dbg_root);
1783 return err;
1784}
1785
1786static void __exit mlxsw_pci_module_exit(void)
1787{
1788 pci_unregister_driver(&mlxsw_pci_driver);
1789 debugfs_remove_recursive(mlxsw_pci_dbg_root);
1790}
1791
1792module_init(mlxsw_pci_module_init);
1793module_exit(mlxsw_pci_module_exit);
1794
1795MODULE_LICENSE("Dual BSD/GPL");
1796MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1797MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1798MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);