blob: 54bba86c95349e393a726c6e58ae8aced57d77a1 [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/ethtool.h>
15#include <linux/log2.h>
16#include <linux/prefetch.h>
17#include <linux/irq.h>
18
19#include "nic_reg.h"
20#include "nic.h"
21#include "nicvf_queues.h"
22#include "thunder_bgx.h"
23
24#define DRV_NAME "thunder-nicvf"
25#define DRV_VERSION "1.0"
26
27/* Supported devices */
28static const struct pci_device_id nicvf_id_table[] = {
29 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
30 PCI_DEVICE_ID_THUNDER_NIC_VF,
31 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
32 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
33 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
34 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
35 { 0, } /* end of table */
36};
37
38MODULE_AUTHOR("Sunil Goutham");
39MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
40MODULE_LICENSE("GPL v2");
41MODULE_VERSION(DRV_VERSION);
42MODULE_DEVICE_TABLE(pci, nicvf_id_table);
43
44static int debug = 0x00;
45module_param(debug, int, 0644);
46MODULE_PARM_DESC(debug, "Debug message level bitmap");
47
48static int cpi_alg = CPI_ALG_NONE;
49module_param(cpi_alg, int, S_IRUGO);
50MODULE_PARM_DESC(cpi_alg,
51 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
52
53static int nicvf_enable_msix(struct nicvf *nic);
54static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev);
55static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx);
56
57static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
58 struct sk_buff *skb)
59{
60 if (skb->len <= 64)
61 nic->drv_stats.rx_frames_64++;
62 else if (skb->len <= 127)
63 nic->drv_stats.rx_frames_127++;
64 else if (skb->len <= 255)
65 nic->drv_stats.rx_frames_255++;
66 else if (skb->len <= 511)
67 nic->drv_stats.rx_frames_511++;
68 else if (skb->len <= 1023)
69 nic->drv_stats.rx_frames_1023++;
70 else if (skb->len <= 1518)
71 nic->drv_stats.rx_frames_1518++;
72 else
73 nic->drv_stats.rx_frames_jumbo++;
74}
75
76/* The Cavium ThunderX network controller can *only* be found in SoCs
77 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
78 * registers on this platform are implicitly strongly ordered with respect
79 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
80 * with no memory barriers in this driver. The readq()/writeq() functions add
81 * explicit ordering operation which in this case are redundant, and only
82 * add overhead.
83 */
84
85/* Register read/write APIs */
86void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
87{
88 writeq_relaxed(val, nic->reg_base + offset);
89}
90
91u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
92{
93 return readq_relaxed(nic->reg_base + offset);
94}
95
96void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
97 u64 qidx, u64 val)
98{
99 void __iomem *addr = nic->reg_base + offset;
100
101 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
102}
103
104u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
105{
106 void __iomem *addr = nic->reg_base + offset;
107
108 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
109}
110
111/* VF -> PF mailbox communication */
112
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700113static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
114{
115 u64 *msg = (u64 *)mbx;
116
117 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
118 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
119}
120
Sunil Goutham4863dea2015-05-26 19:20:15 -0700121int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
122{
123 int timeout = NIC_MBOX_MSG_TIMEOUT;
124 int sleep = 10;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700125
126 nic->pf_acked = false;
127 nic->pf_nacked = false;
128
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700129 nicvf_write_to_mbx(nic, mbx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700130
131 /* Wait for previous message to be acked, timeout 2sec */
132 while (!nic->pf_acked) {
133 if (nic->pf_nacked)
134 return -EINVAL;
135 msleep(sleep);
136 if (nic->pf_acked)
137 break;
138 timeout -= sleep;
139 if (!timeout) {
140 netdev_err(nic->netdev,
141 "PF didn't ack to mbox msg %d from VF%d\n",
142 (mbx->msg.msg & 0xFF), nic->vf_id);
143 return -EBUSY;
144 }
145 }
146 return 0;
147}
148
149/* Checks if VF is able to comminicate with PF
150* and also gets the VNIC number this VF is associated to.
151*/
152static int nicvf_check_pf_ready(struct nicvf *nic)
153{
154 int timeout = 5000, sleep = 20;
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700155 union nic_mbx mbx = {};
156
157 mbx.msg.msg = NIC_MBOX_MSG_READY;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700158
159 nic->pf_ready_to_rcv_msg = false;
160
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700161 nicvf_write_to_mbx(nic, &mbx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700162
163 while (!nic->pf_ready_to_rcv_msg) {
164 msleep(sleep);
165 if (nic->pf_ready_to_rcv_msg)
166 break;
167 timeout -= sleep;
168 if (!timeout) {
169 netdev_err(nic->netdev,
170 "PF didn't respond to READY msg\n");
171 return 0;
172 }
173 }
174 return 1;
175}
176
177static void nicvf_handle_mbx_intr(struct nicvf *nic)
178{
179 union nic_mbx mbx = {};
180 u64 *mbx_data;
181 u64 mbx_addr;
182 int i;
183
184 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
185 mbx_data = (u64 *)&mbx;
186
187 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
188 *mbx_data = nicvf_reg_read(nic, mbx_addr);
189 mbx_data++;
190 mbx_addr += sizeof(u64);
191 }
192
193 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
194 switch (mbx.msg.msg) {
195 case NIC_MBOX_MSG_READY:
196 nic->pf_ready_to_rcv_msg = true;
197 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
198 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
199 nic->node = mbx.nic_cfg.node_id;
Aleksey Makarove610cb32015-06-02 11:00:21 -0700200 ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700201 nic->link_up = false;
202 nic->duplex = 0;
203 nic->speed = 0;
204 break;
205 case NIC_MBOX_MSG_ACK:
206 nic->pf_acked = true;
207 break;
208 case NIC_MBOX_MSG_NACK:
209 nic->pf_nacked = true;
210 break;
211 case NIC_MBOX_MSG_RSS_SIZE:
212 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
213 nic->pf_acked = true;
214 break;
215 case NIC_MBOX_MSG_BGX_STATS:
216 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
217 nic->pf_acked = true;
218 nic->bgx_stats_acked = true;
219 break;
220 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
221 nic->pf_acked = true;
222 nic->link_up = mbx.link_status.link_up;
223 nic->duplex = mbx.link_status.duplex;
224 nic->speed = mbx.link_status.speed;
225 if (nic->link_up) {
226 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
227 nic->netdev->name, nic->speed,
228 nic->duplex == DUPLEX_FULL ?
229 "Full duplex" : "Half duplex");
230 netif_carrier_on(nic->netdev);
231 netif_tx_wake_all_queues(nic->netdev);
232 } else {
233 netdev_info(nic->netdev, "%s: Link is Down\n",
234 nic->netdev->name);
235 netif_carrier_off(nic->netdev);
236 netif_tx_stop_all_queues(nic->netdev);
237 }
238 break;
239 default:
240 netdev_err(nic->netdev,
241 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
242 break;
243 }
244 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
245}
246
247static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
248{
249 union nic_mbx mbx = {};
Sunil Goutham4863dea2015-05-26 19:20:15 -0700250
251 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
252 mbx.mac.vf_id = nic->vf_id;
Aleksey Makarove610cb32015-06-02 11:00:21 -0700253 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700254
255 return nicvf_send_msg_to_pf(nic, &mbx);
256}
257
258void nicvf_config_cpi(struct nicvf *nic)
259{
260 union nic_mbx mbx = {};
261
262 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
263 mbx.cpi_cfg.vf_id = nic->vf_id;
264 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
265 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
266
267 nicvf_send_msg_to_pf(nic, &mbx);
268}
269
270void nicvf_get_rss_size(struct nicvf *nic)
271{
272 union nic_mbx mbx = {};
273
274 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
275 mbx.rss_size.vf_id = nic->vf_id;
276 nicvf_send_msg_to_pf(nic, &mbx);
277}
278
279void nicvf_config_rss(struct nicvf *nic)
280{
281 union nic_mbx mbx = {};
282 struct nicvf_rss_info *rss = &nic->rss_info;
283 int ind_tbl_len = rss->rss_size;
284 int i, nextq = 0;
285
286 mbx.rss_cfg.vf_id = nic->vf_id;
287 mbx.rss_cfg.hash_bits = rss->hash_bits;
288 while (ind_tbl_len) {
289 mbx.rss_cfg.tbl_offset = nextq;
290 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
291 RSS_IND_TBL_LEN_PER_MBX_MSG);
292 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
293 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
294
295 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
296 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
297
298 nicvf_send_msg_to_pf(nic, &mbx);
299
300 ind_tbl_len -= mbx.rss_cfg.tbl_len;
301 }
302}
303
304void nicvf_set_rss_key(struct nicvf *nic)
305{
306 struct nicvf_rss_info *rss = &nic->rss_info;
307 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
308 int idx;
309
310 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
311 nicvf_reg_write(nic, key_addr, rss->key[idx]);
312 key_addr += sizeof(u64);
313 }
314}
315
316static int nicvf_rss_init(struct nicvf *nic)
317{
318 struct nicvf_rss_info *rss = &nic->rss_info;
319 int idx;
320
321 nicvf_get_rss_size(nic);
322
323 if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
324 rss->enable = false;
325 rss->hash_bits = 0;
326 return 0;
327 }
328
329 rss->enable = true;
330
331 /* Using the HW reset value for now */
Aleksey Makarov4a4f87d2015-06-02 11:00:19 -0700332 rss->key[0] = 0xFEED0BADFEED0BADULL;
333 rss->key[1] = 0xFEED0BADFEED0BADULL;
334 rss->key[2] = 0xFEED0BADFEED0BADULL;
335 rss->key[3] = 0xFEED0BADFEED0BADULL;
336 rss->key[4] = 0xFEED0BADFEED0BADULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700337
338 nicvf_set_rss_key(nic);
339
340 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
341 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
342
343 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
344
345 for (idx = 0; idx < rss->rss_size; idx++)
346 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
347 nic->qs->rq_cnt);
348 nicvf_config_rss(nic);
349 return 1;
350}
351
352int nicvf_set_real_num_queues(struct net_device *netdev,
353 int tx_queues, int rx_queues)
354{
355 int err = 0;
356
357 err = netif_set_real_num_tx_queues(netdev, tx_queues);
358 if (err) {
359 netdev_err(netdev,
360 "Failed to set no of Tx queues: %d\n", tx_queues);
361 return err;
362 }
363
364 err = netif_set_real_num_rx_queues(netdev, rx_queues);
365 if (err)
366 netdev_err(netdev,
367 "Failed to set no of Rx queues: %d\n", rx_queues);
368 return err;
369}
370
371static int nicvf_init_resources(struct nicvf *nic)
372{
373 int err;
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700374 union nic_mbx mbx = {};
375
376 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700377
378 /* Enable Qset */
379 nicvf_qset_config(nic, true);
380
381 /* Initialize queues and HW for data transfer */
382 err = nicvf_config_data_transfer(nic, true);
383 if (err) {
384 netdev_err(nic->netdev,
385 "Failed to alloc/config VF's QSet resources\n");
386 return err;
387 }
388
389 /* Send VF config done msg to PF */
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700390 nicvf_write_to_mbx(nic, &mbx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700391
392 return 0;
393}
394
395static void nicvf_snd_pkt_handler(struct net_device *netdev,
396 struct cmp_queue *cq,
397 struct cqe_send_t *cqe_tx, int cqe_type)
398{
399 struct sk_buff *skb = NULL;
400 struct nicvf *nic = netdev_priv(netdev);
401 struct snd_queue *sq;
402 struct sq_hdr_subdesc *hdr;
403
404 sq = &nic->qs->sq[cqe_tx->sq_idx];
405
406 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
407 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
408 return;
409
410 netdev_dbg(nic->netdev,
411 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
412 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
413 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
414
415 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
416 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
417 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
418 /* For TSO offloaded packets only one head SKB needs to be freed */
419 if (skb) {
420 prefetch(skb);
421 dev_consume_skb_any(skb);
422 }
423}
424
425static void nicvf_rcv_pkt_handler(struct net_device *netdev,
426 struct napi_struct *napi,
427 struct cmp_queue *cq,
428 struct cqe_rx_t *cqe_rx, int cqe_type)
429{
430 struct sk_buff *skb;
431 struct nicvf *nic = netdev_priv(netdev);
432 int err = 0;
433
434 /* Check for errors */
435 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
436 if (err && !cqe_rx->rb_cnt)
437 return;
438
439 skb = nicvf_get_rcv_skb(nic, cqe_rx);
440 if (!skb) {
441 netdev_dbg(nic->netdev, "Packet not received\n");
442 return;
443 }
444
445 if (netif_msg_pktdata(nic)) {
446 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
447 skb, skb->len);
448 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
449 skb->data, skb->len, true);
450 }
451
452 nicvf_set_rx_frame_cnt(nic, skb);
453
454 skb_record_rx_queue(skb, cqe_rx->rq_idx);
455 if (netdev->hw_features & NETIF_F_RXCSUM) {
456 /* HW by default verifies TCP/UDP/SCTP checksums */
457 skb->ip_summed = CHECKSUM_UNNECESSARY;
458 } else {
459 skb_checksum_none_assert(skb);
460 }
461
462 skb->protocol = eth_type_trans(skb, netdev);
463
464 if (napi && (netdev->features & NETIF_F_GRO))
465 napi_gro_receive(napi, skb);
466 else
467 netif_receive_skb(skb);
468}
469
470static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
471 struct napi_struct *napi, int budget)
472{
473 int processed_cqe, work_done = 0;
474 int cqe_count, cqe_head;
475 struct nicvf *nic = netdev_priv(netdev);
476 struct queue_set *qs = nic->qs;
477 struct cmp_queue *cq = &qs->cq[cq_idx];
478 struct cqe_rx_t *cq_desc;
479
480 spin_lock_bh(&cq->lock);
481loop:
482 processed_cqe = 0;
483 /* Get no of valid CQ entries to process */
484 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
485 cqe_count &= CQ_CQE_COUNT;
486 if (!cqe_count)
487 goto done;
488
489 /* Get head of the valid CQ entries */
490 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
491 cqe_head &= 0xFFFF;
492
493 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
494 __func__, cqe_count, cqe_head);
495 while (processed_cqe < cqe_count) {
496 /* Get the CQ descriptor */
497 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
498 cqe_head++;
499 cqe_head &= (cq->dmem.q_len - 1);
500 /* Initiate prefetch for next descriptor */
501 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
502
503 if ((work_done >= budget) && napi &&
504 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
505 break;
506 }
507
508 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
509 cq_desc->cqe_type);
510 switch (cq_desc->cqe_type) {
511 case CQE_TYPE_RX:
512 nicvf_rcv_pkt_handler(netdev, napi, cq,
513 cq_desc, CQE_TYPE_RX);
514 work_done++;
515 break;
516 case CQE_TYPE_SEND:
517 nicvf_snd_pkt_handler(netdev, cq,
518 (void *)cq_desc, CQE_TYPE_SEND);
519 break;
520 case CQE_TYPE_INVALID:
521 case CQE_TYPE_RX_SPLIT:
522 case CQE_TYPE_RX_TCP:
523 case CQE_TYPE_SEND_PTP:
524 /* Ignore for now */
525 break;
526 }
527 processed_cqe++;
528 }
529 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
530 __func__, processed_cqe, work_done, budget);
531
532 /* Ring doorbell to inform H/W to reuse processed CQEs */
533 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
534 cq_idx, processed_cqe);
535
536 if ((work_done < budget) && napi)
537 goto loop;
538
539done:
540 spin_unlock_bh(&cq->lock);
541 return work_done;
542}
543
544static int nicvf_poll(struct napi_struct *napi, int budget)
545{
546 u64 cq_head;
547 int work_done = 0;
548 struct net_device *netdev = napi->dev;
549 struct nicvf *nic = netdev_priv(netdev);
550 struct nicvf_cq_poll *cq;
551 struct netdev_queue *txq;
552
553 cq = container_of(napi, struct nicvf_cq_poll, napi);
554 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
555
556 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
557 if (netif_tx_queue_stopped(txq))
558 netif_tx_wake_queue(txq);
559
560 if (work_done < budget) {
561 /* Slow packet rate, exit polling */
562 napi_complete(napi);
563 /* Re-enable interrupts */
564 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
565 cq->cq_idx);
566 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
567 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
568 cq->cq_idx, cq_head);
569 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
570 }
571 return work_done;
572}
573
574/* Qset error interrupt handler
575 *
576 * As of now only CQ errors are handled
577 */
578void nicvf_handle_qs_err(unsigned long data)
579{
580 struct nicvf *nic = (struct nicvf *)data;
581 struct queue_set *qs = nic->qs;
582 int qidx;
583 u64 status;
584
585 netif_tx_disable(nic->netdev);
586
587 /* Check if it is CQ err */
588 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
589 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
590 qidx);
591 if (!(status & CQ_ERR_MASK))
592 continue;
593 /* Process already queued CQEs and reconfig CQ */
594 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
595 nicvf_sq_disable(nic, qidx);
596 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
597 nicvf_cmp_queue_config(nic, qs, qidx, true);
598 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
599 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
600
601 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
602 }
603
604 netif_tx_start_all_queues(nic->netdev);
605 /* Re-enable Qset error interrupt */
606 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
607}
608
609static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
610{
611 struct nicvf *nic = (struct nicvf *)nicvf_irq;
612 u64 intr;
613
614 intr = nicvf_reg_read(nic, NIC_VF_INT);
615 /* Check for spurious interrupt */
616 if (!(intr & NICVF_INTR_MBOX_MASK))
617 return IRQ_HANDLED;
618
619 nicvf_handle_mbx_intr(nic);
620
621 return IRQ_HANDLED;
622}
623
624static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
625{
626 u64 qidx, intr, clear_intr = 0;
627 u64 cq_intr, rbdr_intr, qs_err_intr;
628 struct nicvf *nic = (struct nicvf *)nicvf_irq;
629 struct queue_set *qs = nic->qs;
630 struct nicvf_cq_poll *cq_poll = NULL;
631
632 intr = nicvf_reg_read(nic, NIC_VF_INT);
633 if (netif_msg_intr(nic))
634 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
635 nic->netdev->name, intr);
636
637 qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
638 if (qs_err_intr) {
639 /* Disable Qset err interrupt and schedule softirq */
640 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
641 tasklet_hi_schedule(&nic->qs_err_task);
642 clear_intr |= qs_err_intr;
643 }
644
645 /* Disable interrupts and start polling */
646 cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
647 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
648 if (!(cq_intr & (1 << qidx)))
649 continue;
650 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
651 continue;
652
653 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
654 clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
655
656 cq_poll = nic->napi[qidx];
657 /* Schedule NAPI */
658 if (cq_poll)
659 napi_schedule(&cq_poll->napi);
660 }
661
662 /* Handle RBDR interrupts */
663 rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
664 if (rbdr_intr) {
665 /* Disable RBDR interrupt and schedule softirq */
666 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
667 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
668 continue;
669 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
670 tasklet_hi_schedule(&nic->rbdr_task);
671 clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
672 }
673 }
674
675 /* Clear interrupts */
676 nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
677 return IRQ_HANDLED;
678}
679
680static int nicvf_enable_msix(struct nicvf *nic)
681{
682 int ret, vec;
683
684 nic->num_vec = NIC_VF_MSIX_VECTORS;
685
686 for (vec = 0; vec < nic->num_vec; vec++)
687 nic->msix_entries[vec].entry = vec;
688
689 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
690 if (ret) {
691 netdev_err(nic->netdev,
692 "Req for #%d msix vectors failed\n", nic->num_vec);
693 return 0;
694 }
695 nic->msix_enabled = 1;
696 return 1;
697}
698
699static void nicvf_disable_msix(struct nicvf *nic)
700{
701 if (nic->msix_enabled) {
702 pci_disable_msix(nic->pdev);
703 nic->msix_enabled = 0;
704 nic->num_vec = 0;
705 }
706}
707
708static int nicvf_register_interrupts(struct nicvf *nic)
709{
710 int irq, free, ret = 0;
711 int vector;
712
713 for_each_cq_irq(irq)
714 sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
715 nic->vf_id, irq);
716
717 for_each_sq_irq(irq)
718 sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
719 nic->vf_id, irq - NICVF_INTR_ID_SQ);
720
721 for_each_rbdr_irq(irq)
722 sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
723 nic->vf_id, irq - NICVF_INTR_ID_RBDR);
724
725 /* Register all interrupts except mailbox */
726 for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
727 vector = nic->msix_entries[irq].vector;
728 ret = request_irq(vector, nicvf_intr_handler,
729 0, nic->irq_name[irq], nic);
730 if (ret)
731 break;
732 nic->irq_allocated[irq] = true;
733 }
734
735 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
736 vector = nic->msix_entries[irq].vector;
737 ret = request_irq(vector, nicvf_intr_handler,
738 0, nic->irq_name[irq], nic);
739 if (ret)
740 break;
741 nic->irq_allocated[irq] = true;
742 }
743
744 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
745 "NICVF%d Qset error", nic->vf_id);
746 if (!ret) {
747 vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
748 irq = NICVF_INTR_ID_QS_ERR;
749 ret = request_irq(vector, nicvf_intr_handler,
750 0, nic->irq_name[irq], nic);
751 if (!ret)
752 nic->irq_allocated[irq] = true;
753 }
754
755 if (ret) {
756 netdev_err(nic->netdev, "Request irq failed\n");
757 for (free = 0; free < irq; free++)
758 free_irq(nic->msix_entries[free].vector, nic);
759 return ret;
760 }
761
762 return 0;
763}
764
765static void nicvf_unregister_interrupts(struct nicvf *nic)
766{
767 int irq;
768
769 /* Free registered interrupts */
770 for (irq = 0; irq < nic->num_vec; irq++) {
771 if (nic->irq_allocated[irq])
772 free_irq(nic->msix_entries[irq].vector, nic);
773 nic->irq_allocated[irq] = false;
774 }
775
776 /* Disable MSI-X */
777 nicvf_disable_msix(nic);
778}
779
780/* Initialize MSIX vectors and register MISC interrupt.
781 * Send READY message to PF to check if its alive
782 */
783static int nicvf_register_misc_interrupt(struct nicvf *nic)
784{
785 int ret = 0;
786 int irq = NICVF_INTR_ID_MISC;
787
788 /* Return if mailbox interrupt is already registered */
789 if (nic->msix_enabled)
790 return 0;
791
792 /* Enable MSI-X */
793 if (!nicvf_enable_msix(nic))
794 return 1;
795
796 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
797 /* Register Misc interrupt */
798 ret = request_irq(nic->msix_entries[irq].vector,
799 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
800
801 if (ret)
802 return ret;
803 nic->irq_allocated[irq] = true;
804
805 /* Enable mailbox interrupt */
806 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
807
808 /* Check if VF is able to communicate with PF */
809 if (!nicvf_check_pf_ready(nic)) {
810 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
811 nicvf_unregister_interrupts(nic);
812 return 1;
813 }
814
815 return 0;
816}
817
818static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
819{
820 struct nicvf *nic = netdev_priv(netdev);
821 int qid = skb_get_queue_mapping(skb);
822 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
823
824 /* Check for minimum packet length */
825 if (skb->len <= ETH_HLEN) {
826 dev_kfree_skb(skb);
827 return NETDEV_TX_OK;
828 }
829
830 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
831 netif_tx_stop_queue(txq);
832 nic->drv_stats.tx_busy++;
833 if (netif_msg_tx_err(nic))
834 netdev_warn(netdev,
835 "%s: Transmit ring full, stopping SQ%d\n",
836 netdev->name, qid);
837
838 return NETDEV_TX_BUSY;
839 }
840
841 return NETDEV_TX_OK;
842}
843
844int nicvf_stop(struct net_device *netdev)
845{
846 int irq, qidx;
847 struct nicvf *nic = netdev_priv(netdev);
848 struct queue_set *qs = nic->qs;
849 struct nicvf_cq_poll *cq_poll = NULL;
850 union nic_mbx mbx = {};
851
852 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
853 nicvf_send_msg_to_pf(nic, &mbx);
854
855 netif_carrier_off(netdev);
856 netif_tx_disable(netdev);
857
858 /* Disable RBDR & QS error interrupts */
859 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
860 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
861 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
862 }
863 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
864 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
865
866 /* Wait for pending IRQ handlers to finish */
867 for (irq = 0; irq < nic->num_vec; irq++)
868 synchronize_irq(nic->msix_entries[irq].vector);
869
870 tasklet_kill(&nic->rbdr_task);
871 tasklet_kill(&nic->qs_err_task);
872 if (nic->rb_work_scheduled)
873 cancel_delayed_work_sync(&nic->rbdr_work);
874
875 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
876 cq_poll = nic->napi[qidx];
877 if (!cq_poll)
878 continue;
879 nic->napi[qidx] = NULL;
880 napi_synchronize(&cq_poll->napi);
881 /* CQ intr is enabled while napi_complete,
882 * so disable it now
883 */
884 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
885 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
886 napi_disable(&cq_poll->napi);
887 netif_napi_del(&cq_poll->napi);
888 kfree(cq_poll);
889 }
890
891 /* Free resources */
892 nicvf_config_data_transfer(nic, false);
893
894 /* Disable HW Qset */
895 nicvf_qset_config(nic, false);
896
897 /* disable mailbox interrupt */
898 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
899
900 nicvf_unregister_interrupts(nic);
901
902 return 0;
903}
904
905int nicvf_open(struct net_device *netdev)
906{
907 int err, qidx;
908 struct nicvf *nic = netdev_priv(netdev);
909 struct queue_set *qs = nic->qs;
910 struct nicvf_cq_poll *cq_poll = NULL;
911
912 nic->mtu = netdev->mtu;
913
914 netif_carrier_off(netdev);
915
916 err = nicvf_register_misc_interrupt(nic);
917 if (err)
918 return err;
919
920 /* Register NAPI handler for processing CQEs */
921 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
922 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
923 if (!cq_poll) {
924 err = -ENOMEM;
925 goto napi_del;
926 }
927 cq_poll->cq_idx = qidx;
928 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
929 NAPI_POLL_WEIGHT);
930 napi_enable(&cq_poll->napi);
931 nic->napi[qidx] = cq_poll;
932 }
933
934 /* Check if we got MAC address from PF or else generate a radom MAC */
935 if (is_zero_ether_addr(netdev->dev_addr)) {
936 eth_hw_addr_random(netdev);
937 nicvf_hw_set_mac_addr(nic, netdev);
938 }
939
940 /* Init tasklet for handling Qset err interrupt */
941 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
942 (unsigned long)nic);
943
944 /* Init RBDR tasklet which will refill RBDR */
945 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
946 (unsigned long)nic);
947 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
948
949 /* Configure CPI alorithm */
950 nic->cpi_alg = cpi_alg;
951 nicvf_config_cpi(nic);
952
953 /* Configure receive side scaling */
954 nicvf_rss_init(nic);
955
956 err = nicvf_register_interrupts(nic);
957 if (err)
958 goto cleanup;
959
960 /* Initialize the queues */
961 err = nicvf_init_resources(nic);
962 if (err)
963 goto cleanup;
964
965 /* Make sure queue initialization is written */
966 wmb();
967
968 nicvf_reg_write(nic, NIC_VF_INT, -1);
969 /* Enable Qset err interrupt */
970 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
971
972 /* Enable completion queue interrupt */
973 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
974 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
975
976 /* Enable RBDR threshold interrupt */
977 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
978 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
979
980 netif_carrier_on(netdev);
981 netif_tx_start_all_queues(netdev);
982
983 return 0;
984cleanup:
985 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
986 nicvf_unregister_interrupts(nic);
987napi_del:
988 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
989 cq_poll = nic->napi[qidx];
990 if (!cq_poll)
991 continue;
992 napi_disable(&cq_poll->napi);
993 netif_napi_del(&cq_poll->napi);
994 kfree(cq_poll);
995 nic->napi[qidx] = NULL;
996 }
997 return err;
998}
999
1000static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1001{
1002 union nic_mbx mbx = {};
1003
1004 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1005 mbx.frs.max_frs = mtu;
1006 mbx.frs.vf_id = nic->vf_id;
1007
1008 return nicvf_send_msg_to_pf(nic, &mbx);
1009}
1010
1011static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1012{
1013 struct nicvf *nic = netdev_priv(netdev);
1014
1015 if (new_mtu > NIC_HW_MAX_FRS)
1016 return -EINVAL;
1017
1018 if (new_mtu < NIC_HW_MIN_FRS)
1019 return -EINVAL;
1020
1021 if (nicvf_update_hw_max_frs(nic, new_mtu))
1022 return -EINVAL;
1023 netdev->mtu = new_mtu;
1024 nic->mtu = new_mtu;
1025
1026 return 0;
1027}
1028
1029static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1030{
1031 struct sockaddr *addr = p;
1032 struct nicvf *nic = netdev_priv(netdev);
1033
1034 if (!is_valid_ether_addr(addr->sa_data))
1035 return -EADDRNOTAVAIL;
1036
1037 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1038
1039 if (nic->msix_enabled)
1040 if (nicvf_hw_set_mac_addr(nic, netdev))
1041 return -EBUSY;
1042
1043 return 0;
1044}
1045
1046static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
1047{
1048 if (bgx->rx)
1049 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
1050 else
1051 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
1052}
1053
1054void nicvf_update_lmac_stats(struct nicvf *nic)
1055{
1056 int stat = 0;
1057 union nic_mbx mbx = {};
1058 int timeout;
1059
1060 if (!netif_running(nic->netdev))
1061 return;
1062
1063 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1064 mbx.bgx_stats.vf_id = nic->vf_id;
1065 /* Rx stats */
1066 mbx.bgx_stats.rx = 1;
1067 while (stat < BGX_RX_STATS_COUNT) {
1068 nic->bgx_stats_acked = 0;
1069 mbx.bgx_stats.idx = stat;
1070 nicvf_send_msg_to_pf(nic, &mbx);
1071 timeout = 0;
1072 while ((!nic->bgx_stats_acked) && (timeout < 10)) {
1073 msleep(2);
1074 timeout++;
1075 }
1076 stat++;
1077 }
1078
1079 stat = 0;
1080
1081 /* Tx stats */
1082 mbx.bgx_stats.rx = 0;
1083 while (stat < BGX_TX_STATS_COUNT) {
1084 nic->bgx_stats_acked = 0;
1085 mbx.bgx_stats.idx = stat;
1086 nicvf_send_msg_to_pf(nic, &mbx);
1087 timeout = 0;
1088 while ((!nic->bgx_stats_acked) && (timeout < 10)) {
1089 msleep(2);
1090 timeout++;
1091 }
1092 stat++;
1093 }
1094}
1095
1096void nicvf_update_stats(struct nicvf *nic)
1097{
1098 int qidx;
1099 struct nicvf_hw_stats *stats = &nic->stats;
1100 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1101 struct queue_set *qs = nic->qs;
1102
1103#define GET_RX_STATS(reg) \
1104 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1105#define GET_TX_STATS(reg) \
1106 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1107
1108 stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
1109 stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
1110 stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
1111 stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
1112 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1113 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1114 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1115 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1116 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1117 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1118 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1119 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1120
1121 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1122 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1123 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1124 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1125 stats->tx_drops = GET_TX_STATS(TX_DROP);
1126
1127 drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
1128 stats->rx_bcast_frames_ok +
1129 stats->rx_mcast_frames_ok;
1130 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1131 stats->tx_bcast_frames_ok +
1132 stats->tx_mcast_frames_ok;
1133 drv_stats->rx_drops = stats->rx_drop_red +
1134 stats->rx_drop_overrun;
1135 drv_stats->tx_drops = stats->tx_drops;
1136
1137 /* Update RQ and SQ stats */
1138 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1139 nicvf_update_rq_stats(nic, qidx);
1140 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1141 nicvf_update_sq_stats(nic, qidx);
1142}
1143
1144struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1145 struct rtnl_link_stats64 *stats)
1146{
1147 struct nicvf *nic = netdev_priv(netdev);
1148 struct nicvf_hw_stats *hw_stats = &nic->stats;
1149 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1150
1151 nicvf_update_stats(nic);
1152
1153 stats->rx_bytes = hw_stats->rx_bytes_ok;
1154 stats->rx_packets = drv_stats->rx_frames_ok;
1155 stats->rx_dropped = drv_stats->rx_drops;
1156
1157 stats->tx_bytes = hw_stats->tx_bytes_ok;
1158 stats->tx_packets = drv_stats->tx_frames_ok;
1159 stats->tx_dropped = drv_stats->tx_drops;
1160
1161 return stats;
1162}
1163
1164static void nicvf_tx_timeout(struct net_device *dev)
1165{
1166 struct nicvf *nic = netdev_priv(dev);
1167
1168 if (netif_msg_tx_err(nic))
1169 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1170 dev->name);
1171
1172 schedule_work(&nic->reset_task);
1173}
1174
1175static void nicvf_reset_task(struct work_struct *work)
1176{
1177 struct nicvf *nic;
1178
1179 nic = container_of(work, struct nicvf, reset_task);
1180
1181 if (!netif_running(nic->netdev))
1182 return;
1183
1184 nicvf_stop(nic->netdev);
1185 nicvf_open(nic->netdev);
1186 nic->netdev->trans_start = jiffies;
1187}
1188
1189static const struct net_device_ops nicvf_netdev_ops = {
1190 .ndo_open = nicvf_open,
1191 .ndo_stop = nicvf_stop,
1192 .ndo_start_xmit = nicvf_xmit,
1193 .ndo_change_mtu = nicvf_change_mtu,
1194 .ndo_set_mac_address = nicvf_set_mac_address,
1195 .ndo_get_stats64 = nicvf_get_stats64,
1196 .ndo_tx_timeout = nicvf_tx_timeout,
1197};
1198
1199static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1200{
1201 struct device *dev = &pdev->dev;
1202 struct net_device *netdev;
1203 struct nicvf *nic;
1204 struct queue_set *qs;
1205 int err;
1206
1207 err = pci_enable_device(pdev);
1208 if (err) {
1209 dev_err(dev, "Failed to enable PCI device\n");
1210 return err;
1211 }
1212
1213 err = pci_request_regions(pdev, DRV_NAME);
1214 if (err) {
1215 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1216 goto err_disable_device;
1217 }
1218
1219 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1220 if (err) {
1221 dev_err(dev, "Unable to get usable DMA configuration\n");
1222 goto err_release_regions;
1223 }
1224
1225 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1226 if (err) {
1227 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1228 goto err_release_regions;
1229 }
1230
1231 netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
1232 MAX_RCV_QUEUES_PER_QS,
1233 MAX_SND_QUEUES_PER_QS);
1234 if (!netdev) {
1235 err = -ENOMEM;
1236 goto err_release_regions;
1237 }
1238
1239 pci_set_drvdata(pdev, netdev);
1240
1241 SET_NETDEV_DEV(netdev, &pdev->dev);
1242
1243 nic = netdev_priv(netdev);
1244 nic->netdev = netdev;
1245 nic->pdev = pdev;
1246
1247 /* MAP VF's configuration registers */
1248 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1249 if (!nic->reg_base) {
1250 dev_err(dev, "Cannot map config register space, aborting\n");
1251 err = -ENOMEM;
1252 goto err_free_netdev;
1253 }
1254
1255 err = nicvf_set_qset_resources(nic);
1256 if (err)
1257 goto err_free_netdev;
1258
1259 qs = nic->qs;
1260
1261 err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
1262 if (err)
1263 goto err_free_netdev;
1264
1265 /* Check if PF is alive and get MAC address for this VF */
1266 err = nicvf_register_misc_interrupt(nic);
1267 if (err)
1268 goto err_free_netdev;
1269
1270 netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1271 NETIF_F_TSO | NETIF_F_GRO);
1272 netdev->hw_features = netdev->features;
1273
1274 netdev->netdev_ops = &nicvf_netdev_ops;
1275
1276 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1277
1278 err = register_netdev(netdev);
1279 if (err) {
1280 dev_err(dev, "Failed to register netdevice\n");
1281 goto err_unregister_interrupts;
1282 }
1283
1284 nic->msg_enable = debug;
1285
1286 nicvf_set_ethtool_ops(netdev);
1287
1288 return 0;
1289
1290err_unregister_interrupts:
1291 nicvf_unregister_interrupts(nic);
1292err_free_netdev:
1293 pci_set_drvdata(pdev, NULL);
1294 free_netdev(netdev);
1295err_release_regions:
1296 pci_release_regions(pdev);
1297err_disable_device:
1298 pci_disable_device(pdev);
1299 return err;
1300}
1301
1302static void nicvf_remove(struct pci_dev *pdev)
1303{
1304 struct net_device *netdev = pci_get_drvdata(pdev);
1305 struct nicvf *nic = netdev_priv(netdev);
1306
1307 unregister_netdev(netdev);
1308 nicvf_unregister_interrupts(nic);
1309 pci_set_drvdata(pdev, NULL);
1310 free_netdev(netdev);
1311 pci_release_regions(pdev);
1312 pci_disable_device(pdev);
1313}
1314
1315static struct pci_driver nicvf_driver = {
1316 .name = DRV_NAME,
1317 .id_table = nicvf_id_table,
1318 .probe = nicvf_probe,
1319 .remove = nicvf_remove,
1320};
1321
1322static int __init nicvf_init_module(void)
1323{
1324 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1325
1326 return pci_register_driver(&nicvf_driver);
1327}
1328
1329static void __exit nicvf_cleanup_module(void)
1330{
1331 pci_unregister_driver(&nicvf_driver);
1332}
1333
1334module_init(nicvf_init_module);
1335module_exit(nicvf_cleanup_module);