blob: f81182caedc2cd1e61d6a81d9bb10d3d34765f29 [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/ethtool.h>
15#include <linux/log2.h>
16#include <linux/prefetch.h>
17#include <linux/irq.h>
18
19#include "nic_reg.h"
20#include "nic.h"
21#include "nicvf_queues.h"
22#include "thunder_bgx.h"
23
24#define DRV_NAME "thunder-nicvf"
25#define DRV_VERSION "1.0"
26
27/* Supported devices */
28static const struct pci_device_id nicvf_id_table[] = {
29 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
30 PCI_DEVICE_ID_THUNDER_NIC_VF,
31 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
32 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
33 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
34 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
35 { 0, } /* end of table */
36};
37
38MODULE_AUTHOR("Sunil Goutham");
39MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
40MODULE_LICENSE("GPL v2");
41MODULE_VERSION(DRV_VERSION);
42MODULE_DEVICE_TABLE(pci, nicvf_id_table);
43
44static int debug = 0x00;
45module_param(debug, int, 0644);
46MODULE_PARM_DESC(debug, "Debug message level bitmap");
47
48static int cpi_alg = CPI_ALG_NONE;
49module_param(cpi_alg, int, S_IRUGO);
50MODULE_PARM_DESC(cpi_alg,
51 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
52
53static int nicvf_enable_msix(struct nicvf *nic);
54static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev);
55static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx);
56
57static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
58 struct sk_buff *skb)
59{
60 if (skb->len <= 64)
61 nic->drv_stats.rx_frames_64++;
62 else if (skb->len <= 127)
63 nic->drv_stats.rx_frames_127++;
64 else if (skb->len <= 255)
65 nic->drv_stats.rx_frames_255++;
66 else if (skb->len <= 511)
67 nic->drv_stats.rx_frames_511++;
68 else if (skb->len <= 1023)
69 nic->drv_stats.rx_frames_1023++;
70 else if (skb->len <= 1518)
71 nic->drv_stats.rx_frames_1518++;
72 else
73 nic->drv_stats.rx_frames_jumbo++;
74}
75
76/* The Cavium ThunderX network controller can *only* be found in SoCs
77 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
78 * registers on this platform are implicitly strongly ordered with respect
79 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
80 * with no memory barriers in this driver. The readq()/writeq() functions add
81 * explicit ordering operation which in this case are redundant, and only
82 * add overhead.
83 */
84
85/* Register read/write APIs */
86void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
87{
88 writeq_relaxed(val, nic->reg_base + offset);
89}
90
91u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
92{
93 return readq_relaxed(nic->reg_base + offset);
94}
95
96void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
97 u64 qidx, u64 val)
98{
99 void __iomem *addr = nic->reg_base + offset;
100
101 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
102}
103
104u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
105{
106 void __iomem *addr = nic->reg_base + offset;
107
108 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
109}
110
111/* VF -> PF mailbox communication */
112
113int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
114{
115 int timeout = NIC_MBOX_MSG_TIMEOUT;
116 int sleep = 10;
117 u64 *msg = (u64 *)mbx;
118
119 nic->pf_acked = false;
120 nic->pf_nacked = false;
121
122 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
123 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
124
125 /* Wait for previous message to be acked, timeout 2sec */
126 while (!nic->pf_acked) {
127 if (nic->pf_nacked)
128 return -EINVAL;
129 msleep(sleep);
130 if (nic->pf_acked)
131 break;
132 timeout -= sleep;
133 if (!timeout) {
134 netdev_err(nic->netdev,
135 "PF didn't ack to mbox msg %d from VF%d\n",
136 (mbx->msg.msg & 0xFF), nic->vf_id);
137 return -EBUSY;
138 }
139 }
140 return 0;
141}
142
143/* Checks if VF is able to comminicate with PF
144* and also gets the VNIC number this VF is associated to.
145*/
146static int nicvf_check_pf_ready(struct nicvf *nic)
147{
148 int timeout = 5000, sleep = 20;
149
150 nic->pf_ready_to_rcv_msg = false;
151
152 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0,
153 le64_to_cpu(NIC_MBOX_MSG_READY));
154 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, 1ULL);
155
156 while (!nic->pf_ready_to_rcv_msg) {
157 msleep(sleep);
158 if (nic->pf_ready_to_rcv_msg)
159 break;
160 timeout -= sleep;
161 if (!timeout) {
162 netdev_err(nic->netdev,
163 "PF didn't respond to READY msg\n");
164 return 0;
165 }
166 }
167 return 1;
168}
169
170static void nicvf_handle_mbx_intr(struct nicvf *nic)
171{
172 union nic_mbx mbx = {};
173 u64 *mbx_data;
174 u64 mbx_addr;
175 int i;
176
177 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
178 mbx_data = (u64 *)&mbx;
179
180 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
181 *mbx_data = nicvf_reg_read(nic, mbx_addr);
182 mbx_data++;
183 mbx_addr += sizeof(u64);
184 }
185
186 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
187 switch (mbx.msg.msg) {
188 case NIC_MBOX_MSG_READY:
189 nic->pf_ready_to_rcv_msg = true;
190 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
191 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
192 nic->node = mbx.nic_cfg.node_id;
193 ether_addr_copy(nic->netdev->dev_addr,
194 (u8 *)&mbx.nic_cfg.mac_addr);
195 nic->link_up = false;
196 nic->duplex = 0;
197 nic->speed = 0;
198 break;
199 case NIC_MBOX_MSG_ACK:
200 nic->pf_acked = true;
201 break;
202 case NIC_MBOX_MSG_NACK:
203 nic->pf_nacked = true;
204 break;
205 case NIC_MBOX_MSG_RSS_SIZE:
206 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
207 nic->pf_acked = true;
208 break;
209 case NIC_MBOX_MSG_BGX_STATS:
210 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
211 nic->pf_acked = true;
212 nic->bgx_stats_acked = true;
213 break;
214 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
215 nic->pf_acked = true;
216 nic->link_up = mbx.link_status.link_up;
217 nic->duplex = mbx.link_status.duplex;
218 nic->speed = mbx.link_status.speed;
219 if (nic->link_up) {
220 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
221 nic->netdev->name, nic->speed,
222 nic->duplex == DUPLEX_FULL ?
223 "Full duplex" : "Half duplex");
224 netif_carrier_on(nic->netdev);
225 netif_tx_wake_all_queues(nic->netdev);
226 } else {
227 netdev_info(nic->netdev, "%s: Link is Down\n",
228 nic->netdev->name);
229 netif_carrier_off(nic->netdev);
230 netif_tx_stop_all_queues(nic->netdev);
231 }
232 break;
233 default:
234 netdev_err(nic->netdev,
235 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
236 break;
237 }
238 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
239}
240
241static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
242{
243 union nic_mbx mbx = {};
244 int i;
245
246 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
247 mbx.mac.vf_id = nic->vf_id;
248 for (i = 0; i < ETH_ALEN; i++)
249 mbx.mac.addr = (mbx.mac.addr << 8) |
250 netdev->dev_addr[i];
251
252 return nicvf_send_msg_to_pf(nic, &mbx);
253}
254
255void nicvf_config_cpi(struct nicvf *nic)
256{
257 union nic_mbx mbx = {};
258
259 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
260 mbx.cpi_cfg.vf_id = nic->vf_id;
261 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
262 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
263
264 nicvf_send_msg_to_pf(nic, &mbx);
265}
266
267void nicvf_get_rss_size(struct nicvf *nic)
268{
269 union nic_mbx mbx = {};
270
271 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
272 mbx.rss_size.vf_id = nic->vf_id;
273 nicvf_send_msg_to_pf(nic, &mbx);
274}
275
276void nicvf_config_rss(struct nicvf *nic)
277{
278 union nic_mbx mbx = {};
279 struct nicvf_rss_info *rss = &nic->rss_info;
280 int ind_tbl_len = rss->rss_size;
281 int i, nextq = 0;
282
283 mbx.rss_cfg.vf_id = nic->vf_id;
284 mbx.rss_cfg.hash_bits = rss->hash_bits;
285 while (ind_tbl_len) {
286 mbx.rss_cfg.tbl_offset = nextq;
287 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
288 RSS_IND_TBL_LEN_PER_MBX_MSG);
289 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
290 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
291
292 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
293 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
294
295 nicvf_send_msg_to_pf(nic, &mbx);
296
297 ind_tbl_len -= mbx.rss_cfg.tbl_len;
298 }
299}
300
301void nicvf_set_rss_key(struct nicvf *nic)
302{
303 struct nicvf_rss_info *rss = &nic->rss_info;
304 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
305 int idx;
306
307 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
308 nicvf_reg_write(nic, key_addr, rss->key[idx]);
309 key_addr += sizeof(u64);
310 }
311}
312
313static int nicvf_rss_init(struct nicvf *nic)
314{
315 struct nicvf_rss_info *rss = &nic->rss_info;
316 int idx;
317
318 nicvf_get_rss_size(nic);
319
320 if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
321 rss->enable = false;
322 rss->hash_bits = 0;
323 return 0;
324 }
325
326 rss->enable = true;
327
328 /* Using the HW reset value for now */
Aleksey Makarov4a4f87d2015-06-02 11:00:19 -0700329 rss->key[0] = 0xFEED0BADFEED0BADULL;
330 rss->key[1] = 0xFEED0BADFEED0BADULL;
331 rss->key[2] = 0xFEED0BADFEED0BADULL;
332 rss->key[3] = 0xFEED0BADFEED0BADULL;
333 rss->key[4] = 0xFEED0BADFEED0BADULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700334
335 nicvf_set_rss_key(nic);
336
337 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
338 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
339
340 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
341
342 for (idx = 0; idx < rss->rss_size; idx++)
343 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
344 nic->qs->rq_cnt);
345 nicvf_config_rss(nic);
346 return 1;
347}
348
349int nicvf_set_real_num_queues(struct net_device *netdev,
350 int tx_queues, int rx_queues)
351{
352 int err = 0;
353
354 err = netif_set_real_num_tx_queues(netdev, tx_queues);
355 if (err) {
356 netdev_err(netdev,
357 "Failed to set no of Tx queues: %d\n", tx_queues);
358 return err;
359 }
360
361 err = netif_set_real_num_rx_queues(netdev, rx_queues);
362 if (err)
363 netdev_err(netdev,
364 "Failed to set no of Rx queues: %d\n", rx_queues);
365 return err;
366}
367
368static int nicvf_init_resources(struct nicvf *nic)
369{
370 int err;
371 u64 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
372
373 /* Enable Qset */
374 nicvf_qset_config(nic, true);
375
376 /* Initialize queues and HW for data transfer */
377 err = nicvf_config_data_transfer(nic, true);
378 if (err) {
379 netdev_err(nic->netdev,
380 "Failed to alloc/config VF's QSet resources\n");
381 return err;
382 }
383
384 /* Send VF config done msg to PF */
385 nicvf_reg_write(nic, mbx_addr, le64_to_cpu(NIC_MBOX_MSG_CFG_DONE));
386 mbx_addr += (NIC_PF_VF_MAILBOX_SIZE - 1) * 8;
387 nicvf_reg_write(nic, mbx_addr, 1ULL);
388
389 return 0;
390}
391
392static void nicvf_snd_pkt_handler(struct net_device *netdev,
393 struct cmp_queue *cq,
394 struct cqe_send_t *cqe_tx, int cqe_type)
395{
396 struct sk_buff *skb = NULL;
397 struct nicvf *nic = netdev_priv(netdev);
398 struct snd_queue *sq;
399 struct sq_hdr_subdesc *hdr;
400
401 sq = &nic->qs->sq[cqe_tx->sq_idx];
402
403 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
404 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
405 return;
406
407 netdev_dbg(nic->netdev,
408 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
409 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
410 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
411
412 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
413 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
414 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
415 /* For TSO offloaded packets only one head SKB needs to be freed */
416 if (skb) {
417 prefetch(skb);
418 dev_consume_skb_any(skb);
419 }
420}
421
422static void nicvf_rcv_pkt_handler(struct net_device *netdev,
423 struct napi_struct *napi,
424 struct cmp_queue *cq,
425 struct cqe_rx_t *cqe_rx, int cqe_type)
426{
427 struct sk_buff *skb;
428 struct nicvf *nic = netdev_priv(netdev);
429 int err = 0;
430
431 /* Check for errors */
432 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
433 if (err && !cqe_rx->rb_cnt)
434 return;
435
436 skb = nicvf_get_rcv_skb(nic, cqe_rx);
437 if (!skb) {
438 netdev_dbg(nic->netdev, "Packet not received\n");
439 return;
440 }
441
442 if (netif_msg_pktdata(nic)) {
443 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
444 skb, skb->len);
445 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
446 skb->data, skb->len, true);
447 }
448
449 nicvf_set_rx_frame_cnt(nic, skb);
450
451 skb_record_rx_queue(skb, cqe_rx->rq_idx);
452 if (netdev->hw_features & NETIF_F_RXCSUM) {
453 /* HW by default verifies TCP/UDP/SCTP checksums */
454 skb->ip_summed = CHECKSUM_UNNECESSARY;
455 } else {
456 skb_checksum_none_assert(skb);
457 }
458
459 skb->protocol = eth_type_trans(skb, netdev);
460
461 if (napi && (netdev->features & NETIF_F_GRO))
462 napi_gro_receive(napi, skb);
463 else
464 netif_receive_skb(skb);
465}
466
467static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
468 struct napi_struct *napi, int budget)
469{
470 int processed_cqe, work_done = 0;
471 int cqe_count, cqe_head;
472 struct nicvf *nic = netdev_priv(netdev);
473 struct queue_set *qs = nic->qs;
474 struct cmp_queue *cq = &qs->cq[cq_idx];
475 struct cqe_rx_t *cq_desc;
476
477 spin_lock_bh(&cq->lock);
478loop:
479 processed_cqe = 0;
480 /* Get no of valid CQ entries to process */
481 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
482 cqe_count &= CQ_CQE_COUNT;
483 if (!cqe_count)
484 goto done;
485
486 /* Get head of the valid CQ entries */
487 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
488 cqe_head &= 0xFFFF;
489
490 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
491 __func__, cqe_count, cqe_head);
492 while (processed_cqe < cqe_count) {
493 /* Get the CQ descriptor */
494 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
495 cqe_head++;
496 cqe_head &= (cq->dmem.q_len - 1);
497 /* Initiate prefetch for next descriptor */
498 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
499
500 if ((work_done >= budget) && napi &&
501 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
502 break;
503 }
504
505 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
506 cq_desc->cqe_type);
507 switch (cq_desc->cqe_type) {
508 case CQE_TYPE_RX:
509 nicvf_rcv_pkt_handler(netdev, napi, cq,
510 cq_desc, CQE_TYPE_RX);
511 work_done++;
512 break;
513 case CQE_TYPE_SEND:
514 nicvf_snd_pkt_handler(netdev, cq,
515 (void *)cq_desc, CQE_TYPE_SEND);
516 break;
517 case CQE_TYPE_INVALID:
518 case CQE_TYPE_RX_SPLIT:
519 case CQE_TYPE_RX_TCP:
520 case CQE_TYPE_SEND_PTP:
521 /* Ignore for now */
522 break;
523 }
524 processed_cqe++;
525 }
526 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
527 __func__, processed_cqe, work_done, budget);
528
529 /* Ring doorbell to inform H/W to reuse processed CQEs */
530 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
531 cq_idx, processed_cqe);
532
533 if ((work_done < budget) && napi)
534 goto loop;
535
536done:
537 spin_unlock_bh(&cq->lock);
538 return work_done;
539}
540
541static int nicvf_poll(struct napi_struct *napi, int budget)
542{
543 u64 cq_head;
544 int work_done = 0;
545 struct net_device *netdev = napi->dev;
546 struct nicvf *nic = netdev_priv(netdev);
547 struct nicvf_cq_poll *cq;
548 struct netdev_queue *txq;
549
550 cq = container_of(napi, struct nicvf_cq_poll, napi);
551 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
552
553 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
554 if (netif_tx_queue_stopped(txq))
555 netif_tx_wake_queue(txq);
556
557 if (work_done < budget) {
558 /* Slow packet rate, exit polling */
559 napi_complete(napi);
560 /* Re-enable interrupts */
561 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
562 cq->cq_idx);
563 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
564 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
565 cq->cq_idx, cq_head);
566 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
567 }
568 return work_done;
569}
570
571/* Qset error interrupt handler
572 *
573 * As of now only CQ errors are handled
574 */
575void nicvf_handle_qs_err(unsigned long data)
576{
577 struct nicvf *nic = (struct nicvf *)data;
578 struct queue_set *qs = nic->qs;
579 int qidx;
580 u64 status;
581
582 netif_tx_disable(nic->netdev);
583
584 /* Check if it is CQ err */
585 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
586 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
587 qidx);
588 if (!(status & CQ_ERR_MASK))
589 continue;
590 /* Process already queued CQEs and reconfig CQ */
591 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
592 nicvf_sq_disable(nic, qidx);
593 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
594 nicvf_cmp_queue_config(nic, qs, qidx, true);
595 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
596 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
597
598 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
599 }
600
601 netif_tx_start_all_queues(nic->netdev);
602 /* Re-enable Qset error interrupt */
603 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
604}
605
606static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
607{
608 struct nicvf *nic = (struct nicvf *)nicvf_irq;
609 u64 intr;
610
611 intr = nicvf_reg_read(nic, NIC_VF_INT);
612 /* Check for spurious interrupt */
613 if (!(intr & NICVF_INTR_MBOX_MASK))
614 return IRQ_HANDLED;
615
616 nicvf_handle_mbx_intr(nic);
617
618 return IRQ_HANDLED;
619}
620
621static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
622{
623 u64 qidx, intr, clear_intr = 0;
624 u64 cq_intr, rbdr_intr, qs_err_intr;
625 struct nicvf *nic = (struct nicvf *)nicvf_irq;
626 struct queue_set *qs = nic->qs;
627 struct nicvf_cq_poll *cq_poll = NULL;
628
629 intr = nicvf_reg_read(nic, NIC_VF_INT);
630 if (netif_msg_intr(nic))
631 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
632 nic->netdev->name, intr);
633
634 qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
635 if (qs_err_intr) {
636 /* Disable Qset err interrupt and schedule softirq */
637 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
638 tasklet_hi_schedule(&nic->qs_err_task);
639 clear_intr |= qs_err_intr;
640 }
641
642 /* Disable interrupts and start polling */
643 cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
644 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
645 if (!(cq_intr & (1 << qidx)))
646 continue;
647 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
648 continue;
649
650 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
651 clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
652
653 cq_poll = nic->napi[qidx];
654 /* Schedule NAPI */
655 if (cq_poll)
656 napi_schedule(&cq_poll->napi);
657 }
658
659 /* Handle RBDR interrupts */
660 rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
661 if (rbdr_intr) {
662 /* Disable RBDR interrupt and schedule softirq */
663 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
664 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
665 continue;
666 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
667 tasklet_hi_schedule(&nic->rbdr_task);
668 clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
669 }
670 }
671
672 /* Clear interrupts */
673 nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
674 return IRQ_HANDLED;
675}
676
677static int nicvf_enable_msix(struct nicvf *nic)
678{
679 int ret, vec;
680
681 nic->num_vec = NIC_VF_MSIX_VECTORS;
682
683 for (vec = 0; vec < nic->num_vec; vec++)
684 nic->msix_entries[vec].entry = vec;
685
686 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
687 if (ret) {
688 netdev_err(nic->netdev,
689 "Req for #%d msix vectors failed\n", nic->num_vec);
690 return 0;
691 }
692 nic->msix_enabled = 1;
693 return 1;
694}
695
696static void nicvf_disable_msix(struct nicvf *nic)
697{
698 if (nic->msix_enabled) {
699 pci_disable_msix(nic->pdev);
700 nic->msix_enabled = 0;
701 nic->num_vec = 0;
702 }
703}
704
705static int nicvf_register_interrupts(struct nicvf *nic)
706{
707 int irq, free, ret = 0;
708 int vector;
709
710 for_each_cq_irq(irq)
711 sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
712 nic->vf_id, irq);
713
714 for_each_sq_irq(irq)
715 sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
716 nic->vf_id, irq - NICVF_INTR_ID_SQ);
717
718 for_each_rbdr_irq(irq)
719 sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
720 nic->vf_id, irq - NICVF_INTR_ID_RBDR);
721
722 /* Register all interrupts except mailbox */
723 for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
724 vector = nic->msix_entries[irq].vector;
725 ret = request_irq(vector, nicvf_intr_handler,
726 0, nic->irq_name[irq], nic);
727 if (ret)
728 break;
729 nic->irq_allocated[irq] = true;
730 }
731
732 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
733 vector = nic->msix_entries[irq].vector;
734 ret = request_irq(vector, nicvf_intr_handler,
735 0, nic->irq_name[irq], nic);
736 if (ret)
737 break;
738 nic->irq_allocated[irq] = true;
739 }
740
741 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
742 "NICVF%d Qset error", nic->vf_id);
743 if (!ret) {
744 vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
745 irq = NICVF_INTR_ID_QS_ERR;
746 ret = request_irq(vector, nicvf_intr_handler,
747 0, nic->irq_name[irq], nic);
748 if (!ret)
749 nic->irq_allocated[irq] = true;
750 }
751
752 if (ret) {
753 netdev_err(nic->netdev, "Request irq failed\n");
754 for (free = 0; free < irq; free++)
755 free_irq(nic->msix_entries[free].vector, nic);
756 return ret;
757 }
758
759 return 0;
760}
761
762static void nicvf_unregister_interrupts(struct nicvf *nic)
763{
764 int irq;
765
766 /* Free registered interrupts */
767 for (irq = 0; irq < nic->num_vec; irq++) {
768 if (nic->irq_allocated[irq])
769 free_irq(nic->msix_entries[irq].vector, nic);
770 nic->irq_allocated[irq] = false;
771 }
772
773 /* Disable MSI-X */
774 nicvf_disable_msix(nic);
775}
776
777/* Initialize MSIX vectors and register MISC interrupt.
778 * Send READY message to PF to check if its alive
779 */
780static int nicvf_register_misc_interrupt(struct nicvf *nic)
781{
782 int ret = 0;
783 int irq = NICVF_INTR_ID_MISC;
784
785 /* Return if mailbox interrupt is already registered */
786 if (nic->msix_enabled)
787 return 0;
788
789 /* Enable MSI-X */
790 if (!nicvf_enable_msix(nic))
791 return 1;
792
793 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
794 /* Register Misc interrupt */
795 ret = request_irq(nic->msix_entries[irq].vector,
796 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
797
798 if (ret)
799 return ret;
800 nic->irq_allocated[irq] = true;
801
802 /* Enable mailbox interrupt */
803 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
804
805 /* Check if VF is able to communicate with PF */
806 if (!nicvf_check_pf_ready(nic)) {
807 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
808 nicvf_unregister_interrupts(nic);
809 return 1;
810 }
811
812 return 0;
813}
814
815static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
816{
817 struct nicvf *nic = netdev_priv(netdev);
818 int qid = skb_get_queue_mapping(skb);
819 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
820
821 /* Check for minimum packet length */
822 if (skb->len <= ETH_HLEN) {
823 dev_kfree_skb(skb);
824 return NETDEV_TX_OK;
825 }
826
827 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
828 netif_tx_stop_queue(txq);
829 nic->drv_stats.tx_busy++;
830 if (netif_msg_tx_err(nic))
831 netdev_warn(netdev,
832 "%s: Transmit ring full, stopping SQ%d\n",
833 netdev->name, qid);
834
835 return NETDEV_TX_BUSY;
836 }
837
838 return NETDEV_TX_OK;
839}
840
841int nicvf_stop(struct net_device *netdev)
842{
843 int irq, qidx;
844 struct nicvf *nic = netdev_priv(netdev);
845 struct queue_set *qs = nic->qs;
846 struct nicvf_cq_poll *cq_poll = NULL;
847 union nic_mbx mbx = {};
848
849 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
850 nicvf_send_msg_to_pf(nic, &mbx);
851
852 netif_carrier_off(netdev);
853 netif_tx_disable(netdev);
854
855 /* Disable RBDR & QS error interrupts */
856 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
857 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
858 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
859 }
860 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
861 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
862
863 /* Wait for pending IRQ handlers to finish */
864 for (irq = 0; irq < nic->num_vec; irq++)
865 synchronize_irq(nic->msix_entries[irq].vector);
866
867 tasklet_kill(&nic->rbdr_task);
868 tasklet_kill(&nic->qs_err_task);
869 if (nic->rb_work_scheduled)
870 cancel_delayed_work_sync(&nic->rbdr_work);
871
872 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
873 cq_poll = nic->napi[qidx];
874 if (!cq_poll)
875 continue;
876 nic->napi[qidx] = NULL;
877 napi_synchronize(&cq_poll->napi);
878 /* CQ intr is enabled while napi_complete,
879 * so disable it now
880 */
881 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
882 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
883 napi_disable(&cq_poll->napi);
884 netif_napi_del(&cq_poll->napi);
885 kfree(cq_poll);
886 }
887
888 /* Free resources */
889 nicvf_config_data_transfer(nic, false);
890
891 /* Disable HW Qset */
892 nicvf_qset_config(nic, false);
893
894 /* disable mailbox interrupt */
895 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
896
897 nicvf_unregister_interrupts(nic);
898
899 return 0;
900}
901
902int nicvf_open(struct net_device *netdev)
903{
904 int err, qidx;
905 struct nicvf *nic = netdev_priv(netdev);
906 struct queue_set *qs = nic->qs;
907 struct nicvf_cq_poll *cq_poll = NULL;
908
909 nic->mtu = netdev->mtu;
910
911 netif_carrier_off(netdev);
912
913 err = nicvf_register_misc_interrupt(nic);
914 if (err)
915 return err;
916
917 /* Register NAPI handler for processing CQEs */
918 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
919 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
920 if (!cq_poll) {
921 err = -ENOMEM;
922 goto napi_del;
923 }
924 cq_poll->cq_idx = qidx;
925 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
926 NAPI_POLL_WEIGHT);
927 napi_enable(&cq_poll->napi);
928 nic->napi[qidx] = cq_poll;
929 }
930
931 /* Check if we got MAC address from PF or else generate a radom MAC */
932 if (is_zero_ether_addr(netdev->dev_addr)) {
933 eth_hw_addr_random(netdev);
934 nicvf_hw_set_mac_addr(nic, netdev);
935 }
936
937 /* Init tasklet for handling Qset err interrupt */
938 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
939 (unsigned long)nic);
940
941 /* Init RBDR tasklet which will refill RBDR */
942 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
943 (unsigned long)nic);
944 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
945
946 /* Configure CPI alorithm */
947 nic->cpi_alg = cpi_alg;
948 nicvf_config_cpi(nic);
949
950 /* Configure receive side scaling */
951 nicvf_rss_init(nic);
952
953 err = nicvf_register_interrupts(nic);
954 if (err)
955 goto cleanup;
956
957 /* Initialize the queues */
958 err = nicvf_init_resources(nic);
959 if (err)
960 goto cleanup;
961
962 /* Make sure queue initialization is written */
963 wmb();
964
965 nicvf_reg_write(nic, NIC_VF_INT, -1);
966 /* Enable Qset err interrupt */
967 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
968
969 /* Enable completion queue interrupt */
970 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
971 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
972
973 /* Enable RBDR threshold interrupt */
974 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
975 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
976
977 netif_carrier_on(netdev);
978 netif_tx_start_all_queues(netdev);
979
980 return 0;
981cleanup:
982 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
983 nicvf_unregister_interrupts(nic);
984napi_del:
985 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
986 cq_poll = nic->napi[qidx];
987 if (!cq_poll)
988 continue;
989 napi_disable(&cq_poll->napi);
990 netif_napi_del(&cq_poll->napi);
991 kfree(cq_poll);
992 nic->napi[qidx] = NULL;
993 }
994 return err;
995}
996
997static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
998{
999 union nic_mbx mbx = {};
1000
1001 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1002 mbx.frs.max_frs = mtu;
1003 mbx.frs.vf_id = nic->vf_id;
1004
1005 return nicvf_send_msg_to_pf(nic, &mbx);
1006}
1007
1008static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1009{
1010 struct nicvf *nic = netdev_priv(netdev);
1011
1012 if (new_mtu > NIC_HW_MAX_FRS)
1013 return -EINVAL;
1014
1015 if (new_mtu < NIC_HW_MIN_FRS)
1016 return -EINVAL;
1017
1018 if (nicvf_update_hw_max_frs(nic, new_mtu))
1019 return -EINVAL;
1020 netdev->mtu = new_mtu;
1021 nic->mtu = new_mtu;
1022
1023 return 0;
1024}
1025
1026static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1027{
1028 struct sockaddr *addr = p;
1029 struct nicvf *nic = netdev_priv(netdev);
1030
1031 if (!is_valid_ether_addr(addr->sa_data))
1032 return -EADDRNOTAVAIL;
1033
1034 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1035
1036 if (nic->msix_enabled)
1037 if (nicvf_hw_set_mac_addr(nic, netdev))
1038 return -EBUSY;
1039
1040 return 0;
1041}
1042
1043static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
1044{
1045 if (bgx->rx)
1046 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
1047 else
1048 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
1049}
1050
1051void nicvf_update_lmac_stats(struct nicvf *nic)
1052{
1053 int stat = 0;
1054 union nic_mbx mbx = {};
1055 int timeout;
1056
1057 if (!netif_running(nic->netdev))
1058 return;
1059
1060 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1061 mbx.bgx_stats.vf_id = nic->vf_id;
1062 /* Rx stats */
1063 mbx.bgx_stats.rx = 1;
1064 while (stat < BGX_RX_STATS_COUNT) {
1065 nic->bgx_stats_acked = 0;
1066 mbx.bgx_stats.idx = stat;
1067 nicvf_send_msg_to_pf(nic, &mbx);
1068 timeout = 0;
1069 while ((!nic->bgx_stats_acked) && (timeout < 10)) {
1070 msleep(2);
1071 timeout++;
1072 }
1073 stat++;
1074 }
1075
1076 stat = 0;
1077
1078 /* Tx stats */
1079 mbx.bgx_stats.rx = 0;
1080 while (stat < BGX_TX_STATS_COUNT) {
1081 nic->bgx_stats_acked = 0;
1082 mbx.bgx_stats.idx = stat;
1083 nicvf_send_msg_to_pf(nic, &mbx);
1084 timeout = 0;
1085 while ((!nic->bgx_stats_acked) && (timeout < 10)) {
1086 msleep(2);
1087 timeout++;
1088 }
1089 stat++;
1090 }
1091}
1092
1093void nicvf_update_stats(struct nicvf *nic)
1094{
1095 int qidx;
1096 struct nicvf_hw_stats *stats = &nic->stats;
1097 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1098 struct queue_set *qs = nic->qs;
1099
1100#define GET_RX_STATS(reg) \
1101 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1102#define GET_TX_STATS(reg) \
1103 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1104
1105 stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
1106 stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
1107 stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
1108 stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
1109 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1110 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1111 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1112 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1113 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1114 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1115 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1116 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1117
1118 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1119 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1120 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1121 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1122 stats->tx_drops = GET_TX_STATS(TX_DROP);
1123
1124 drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
1125 stats->rx_bcast_frames_ok +
1126 stats->rx_mcast_frames_ok;
1127 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1128 stats->tx_bcast_frames_ok +
1129 stats->tx_mcast_frames_ok;
1130 drv_stats->rx_drops = stats->rx_drop_red +
1131 stats->rx_drop_overrun;
1132 drv_stats->tx_drops = stats->tx_drops;
1133
1134 /* Update RQ and SQ stats */
1135 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1136 nicvf_update_rq_stats(nic, qidx);
1137 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1138 nicvf_update_sq_stats(nic, qidx);
1139}
1140
1141struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1142 struct rtnl_link_stats64 *stats)
1143{
1144 struct nicvf *nic = netdev_priv(netdev);
1145 struct nicvf_hw_stats *hw_stats = &nic->stats;
1146 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1147
1148 nicvf_update_stats(nic);
1149
1150 stats->rx_bytes = hw_stats->rx_bytes_ok;
1151 stats->rx_packets = drv_stats->rx_frames_ok;
1152 stats->rx_dropped = drv_stats->rx_drops;
1153
1154 stats->tx_bytes = hw_stats->tx_bytes_ok;
1155 stats->tx_packets = drv_stats->tx_frames_ok;
1156 stats->tx_dropped = drv_stats->tx_drops;
1157
1158 return stats;
1159}
1160
1161static void nicvf_tx_timeout(struct net_device *dev)
1162{
1163 struct nicvf *nic = netdev_priv(dev);
1164
1165 if (netif_msg_tx_err(nic))
1166 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1167 dev->name);
1168
1169 schedule_work(&nic->reset_task);
1170}
1171
1172static void nicvf_reset_task(struct work_struct *work)
1173{
1174 struct nicvf *nic;
1175
1176 nic = container_of(work, struct nicvf, reset_task);
1177
1178 if (!netif_running(nic->netdev))
1179 return;
1180
1181 nicvf_stop(nic->netdev);
1182 nicvf_open(nic->netdev);
1183 nic->netdev->trans_start = jiffies;
1184}
1185
1186static const struct net_device_ops nicvf_netdev_ops = {
1187 .ndo_open = nicvf_open,
1188 .ndo_stop = nicvf_stop,
1189 .ndo_start_xmit = nicvf_xmit,
1190 .ndo_change_mtu = nicvf_change_mtu,
1191 .ndo_set_mac_address = nicvf_set_mac_address,
1192 .ndo_get_stats64 = nicvf_get_stats64,
1193 .ndo_tx_timeout = nicvf_tx_timeout,
1194};
1195
1196static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1197{
1198 struct device *dev = &pdev->dev;
1199 struct net_device *netdev;
1200 struct nicvf *nic;
1201 struct queue_set *qs;
1202 int err;
1203
1204 err = pci_enable_device(pdev);
1205 if (err) {
1206 dev_err(dev, "Failed to enable PCI device\n");
1207 return err;
1208 }
1209
1210 err = pci_request_regions(pdev, DRV_NAME);
1211 if (err) {
1212 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1213 goto err_disable_device;
1214 }
1215
1216 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1217 if (err) {
1218 dev_err(dev, "Unable to get usable DMA configuration\n");
1219 goto err_release_regions;
1220 }
1221
1222 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1223 if (err) {
1224 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1225 goto err_release_regions;
1226 }
1227
1228 netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
1229 MAX_RCV_QUEUES_PER_QS,
1230 MAX_SND_QUEUES_PER_QS);
1231 if (!netdev) {
1232 err = -ENOMEM;
1233 goto err_release_regions;
1234 }
1235
1236 pci_set_drvdata(pdev, netdev);
1237
1238 SET_NETDEV_DEV(netdev, &pdev->dev);
1239
1240 nic = netdev_priv(netdev);
1241 nic->netdev = netdev;
1242 nic->pdev = pdev;
1243
1244 /* MAP VF's configuration registers */
1245 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1246 if (!nic->reg_base) {
1247 dev_err(dev, "Cannot map config register space, aborting\n");
1248 err = -ENOMEM;
1249 goto err_free_netdev;
1250 }
1251
1252 err = nicvf_set_qset_resources(nic);
1253 if (err)
1254 goto err_free_netdev;
1255
1256 qs = nic->qs;
1257
1258 err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
1259 if (err)
1260 goto err_free_netdev;
1261
1262 /* Check if PF is alive and get MAC address for this VF */
1263 err = nicvf_register_misc_interrupt(nic);
1264 if (err)
1265 goto err_free_netdev;
1266
1267 netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1268 NETIF_F_TSO | NETIF_F_GRO);
1269 netdev->hw_features = netdev->features;
1270
1271 netdev->netdev_ops = &nicvf_netdev_ops;
1272
1273 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1274
1275 err = register_netdev(netdev);
1276 if (err) {
1277 dev_err(dev, "Failed to register netdevice\n");
1278 goto err_unregister_interrupts;
1279 }
1280
1281 nic->msg_enable = debug;
1282
1283 nicvf_set_ethtool_ops(netdev);
1284
1285 return 0;
1286
1287err_unregister_interrupts:
1288 nicvf_unregister_interrupts(nic);
1289err_free_netdev:
1290 pci_set_drvdata(pdev, NULL);
1291 free_netdev(netdev);
1292err_release_regions:
1293 pci_release_regions(pdev);
1294err_disable_device:
1295 pci_disable_device(pdev);
1296 return err;
1297}
1298
1299static void nicvf_remove(struct pci_dev *pdev)
1300{
1301 struct net_device *netdev = pci_get_drvdata(pdev);
1302 struct nicvf *nic = netdev_priv(netdev);
1303
1304 unregister_netdev(netdev);
1305 nicvf_unregister_interrupts(nic);
1306 pci_set_drvdata(pdev, NULL);
1307 free_netdev(netdev);
1308 pci_release_regions(pdev);
1309 pci_disable_device(pdev);
1310}
1311
1312static struct pci_driver nicvf_driver = {
1313 .name = DRV_NAME,
1314 .id_table = nicvf_id_table,
1315 .probe = nicvf_probe,
1316 .remove = nicvf_remove,
1317};
1318
1319static int __init nicvf_init_module(void)
1320{
1321 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1322
1323 return pci_register_driver(&nicvf_driver);
1324}
1325
1326static void __exit nicvf_cleanup_module(void)
1327{
1328 pci_unregister_driver(&nicvf_driver);
1329}
1330
1331module_init(nicvf_init_module);
1332module_exit(nicvf_cleanup_module);