blob: 45a13f718863f33a6ccc7f14a37f2f2fc117be84 [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
Sunil Gouthamaa2e2592015-08-30 12:29:13 +030013#include <linux/if_vlan.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070014#include <linux/etherdevice.h>
15#include <linux/ethtool.h>
16#include <linux/log2.h>
17#include <linux/prefetch.h>
18#include <linux/irq.h>
19
20#include "nic_reg.h"
21#include "nic.h"
22#include "nicvf_queues.h"
23#include "thunder_bgx.h"
24
25#define DRV_NAME "thunder-nicvf"
26#define DRV_VERSION "1.0"
27
28/* Supported devices */
29static const struct pci_device_id nicvf_id_table[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
31 PCI_DEVICE_ID_THUNDER_NIC_VF,
Sunil Gouthamf7ff0ae2016-08-12 16:51:25 +053032 PCI_VENDOR_ID_CAVIUM,
33 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
Sunil Goutham4863dea2015-05-26 19:20:15 -070034 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
35 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
Sunil Gouthamf7ff0ae2016-08-12 16:51:25 +053036 PCI_VENDOR_ID_CAVIUM,
37 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
39 PCI_DEVICE_ID_THUNDER_NIC_VF,
40 PCI_VENDOR_ID_CAVIUM,
41 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
43 PCI_DEVICE_ID_THUNDER_NIC_VF,
44 PCI_VENDOR_ID_CAVIUM,
45 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
Sunil Goutham4863dea2015-05-26 19:20:15 -070046 { 0, } /* end of table */
47};
48
49MODULE_AUTHOR("Sunil Goutham");
50MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
51MODULE_LICENSE("GPL v2");
52MODULE_VERSION(DRV_VERSION);
53MODULE_DEVICE_TABLE(pci, nicvf_id_table);
54
55static int debug = 0x00;
56module_param(debug, int, 0644);
57MODULE_PARM_DESC(debug, "Debug message level bitmap");
58
59static int cpi_alg = CPI_ALG_NONE;
60module_param(cpi_alg, int, S_IRUGO);
61MODULE_PARM_DESC(cpi_alg,
62 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
63
Sunil Goutham92dc8762015-08-30 12:29:15 +030064static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
65{
66 if (nic->sqs_mode)
67 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
68 else
69 return qidx;
70}
71
Sunil Goutham4863dea2015-05-26 19:20:15 -070072static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
73 struct sk_buff *skb)
74{
75 if (skb->len <= 64)
76 nic->drv_stats.rx_frames_64++;
77 else if (skb->len <= 127)
78 nic->drv_stats.rx_frames_127++;
79 else if (skb->len <= 255)
80 nic->drv_stats.rx_frames_255++;
81 else if (skb->len <= 511)
82 nic->drv_stats.rx_frames_511++;
83 else if (skb->len <= 1023)
84 nic->drv_stats.rx_frames_1023++;
85 else if (skb->len <= 1518)
86 nic->drv_stats.rx_frames_1518++;
87 else
88 nic->drv_stats.rx_frames_jumbo++;
89}
90
91/* The Cavium ThunderX network controller can *only* be found in SoCs
92 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
93 * registers on this platform are implicitly strongly ordered with respect
94 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
95 * with no memory barriers in this driver. The readq()/writeq() functions add
96 * explicit ordering operation which in this case are redundant, and only
97 * add overhead.
98 */
99
100/* Register read/write APIs */
101void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
102{
103 writeq_relaxed(val, nic->reg_base + offset);
104}
105
106u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
107{
108 return readq_relaxed(nic->reg_base + offset);
109}
110
111void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
112 u64 qidx, u64 val)
113{
114 void __iomem *addr = nic->reg_base + offset;
115
116 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
117}
118
119u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
120{
121 void __iomem *addr = nic->reg_base + offset;
122
123 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
124}
125
126/* VF -> PF mailbox communication */
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700127static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
128{
129 u64 *msg = (u64 *)mbx;
130
131 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
132 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
133}
134
Sunil Goutham4863dea2015-05-26 19:20:15 -0700135int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
136{
137 int timeout = NIC_MBOX_MSG_TIMEOUT;
138 int sleep = 10;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700139
140 nic->pf_acked = false;
141 nic->pf_nacked = false;
142
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700143 nicvf_write_to_mbx(nic, mbx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700144
145 /* Wait for previous message to be acked, timeout 2sec */
146 while (!nic->pf_acked) {
Radoslaw Biernackiecae29c2016-08-12 16:51:38 +0530147 if (nic->pf_nacked) {
148 netdev_err(nic->netdev,
149 "PF NACK to mbox msg 0x%02x from VF%d\n",
150 (mbx->msg.msg & 0xFF), nic->vf_id);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700151 return -EINVAL;
Radoslaw Biernackiecae29c2016-08-12 16:51:38 +0530152 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700153 msleep(sleep);
154 if (nic->pf_acked)
155 break;
156 timeout -= sleep;
157 if (!timeout) {
158 netdev_err(nic->netdev,
Radoslaw Biernackiecae29c2016-08-12 16:51:38 +0530159 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
Sunil Goutham4863dea2015-05-26 19:20:15 -0700160 (mbx->msg.msg & 0xFF), nic->vf_id);
161 return -EBUSY;
162 }
163 }
164 return 0;
165}
166
167/* Checks if VF is able to comminicate with PF
168* and also gets the VNIC number this VF is associated to.
169*/
170static int nicvf_check_pf_ready(struct nicvf *nic)
171{
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700172 union nic_mbx mbx = {};
173
174 mbx.msg.msg = NIC_MBOX_MSG_READY;
Sunil Goutham6051cba2015-08-30 12:29:11 +0300175 if (nicvf_send_msg_to_pf(nic, &mbx)) {
176 netdev_err(nic->netdev,
177 "PF didn't respond to READY msg\n");
178 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700179 }
Sunil Goutham6051cba2015-08-30 12:29:11 +0300180
Sunil Goutham4863dea2015-05-26 19:20:15 -0700181 return 1;
182}
183
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700184static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
185{
186 if (bgx->rx)
187 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
188 else
189 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
190}
191
Sunil Goutham4863dea2015-05-26 19:20:15 -0700192static void nicvf_handle_mbx_intr(struct nicvf *nic)
193{
194 union nic_mbx mbx = {};
195 u64 *mbx_data;
196 u64 mbx_addr;
197 int i;
198
199 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
200 mbx_data = (u64 *)&mbx;
201
202 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
203 *mbx_data = nicvf_reg_read(nic, mbx_addr);
204 mbx_data++;
205 mbx_addr += sizeof(u64);
206 }
207
208 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
209 switch (mbx.msg.msg) {
210 case NIC_MBOX_MSG_READY:
Sunil Goutham6051cba2015-08-30 12:29:11 +0300211 nic->pf_acked = true;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700212 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
213 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
214 nic->node = mbx.nic_cfg.node_id;
Pavel Fedinbd049a92015-06-23 17:51:06 +0300215 if (!nic->set_mac_pending)
216 ether_addr_copy(nic->netdev->dev_addr,
217 mbx.nic_cfg.mac_addr);
Sunil Goutham92dc8762015-08-30 12:29:15 +0300218 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
Sunil Gouthamd77a2382015-08-30 12:29:16 +0300219 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700220 nic->link_up = false;
221 nic->duplex = 0;
222 nic->speed = 0;
223 break;
224 case NIC_MBOX_MSG_ACK:
225 nic->pf_acked = true;
226 break;
227 case NIC_MBOX_MSG_NACK:
228 nic->pf_nacked = true;
229 break;
230 case NIC_MBOX_MSG_RSS_SIZE:
231 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
232 nic->pf_acked = true;
233 break;
234 case NIC_MBOX_MSG_BGX_STATS:
235 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
236 nic->pf_acked = true;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700237 break;
238 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
239 nic->pf_acked = true;
240 nic->link_up = mbx.link_status.link_up;
241 nic->duplex = mbx.link_status.duplex;
242 nic->speed = mbx.link_status.speed;
243 if (nic->link_up) {
244 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
245 nic->netdev->name, nic->speed,
246 nic->duplex == DUPLEX_FULL ?
247 "Full duplex" : "Half duplex");
248 netif_carrier_on(nic->netdev);
Sunil Gouthamb49087d2015-07-29 16:49:44 +0300249 netif_tx_start_all_queues(nic->netdev);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700250 } else {
251 netdev_info(nic->netdev, "%s: Link is Down\n",
252 nic->netdev->name);
253 netif_carrier_off(nic->netdev);
254 netif_tx_stop_all_queues(nic->netdev);
255 }
256 break;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300257 case NIC_MBOX_MSG_ALLOC_SQS:
258 nic->sqs_count = mbx.sqs_alloc.qs_count;
259 nic->pf_acked = true;
260 break;
261 case NIC_MBOX_MSG_SNICVF_PTR:
262 /* Primary VF: make note of secondary VF's pointer
263 * to be used while packet transmission.
264 */
265 nic->snicvf[mbx.nicvf.sqs_id] =
266 (struct nicvf *)mbx.nicvf.nicvf;
267 nic->pf_acked = true;
268 break;
269 case NIC_MBOX_MSG_PNICVF_PTR:
270 /* Secondary VF/Qset: make note of primary VF's pointer
271 * to be used while packet reception, to handover packet
272 * to primary VF's netdev.
273 */
274 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
275 nic->pf_acked = true;
276 break;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700277 default:
278 netdev_err(nic->netdev,
279 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
280 break;
281 }
282 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
283}
284
285static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
286{
287 union nic_mbx mbx = {};
Sunil Goutham4863dea2015-05-26 19:20:15 -0700288
289 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
290 mbx.mac.vf_id = nic->vf_id;
Aleksey Makarove610cb32015-06-02 11:00:21 -0700291 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700292
293 return nicvf_send_msg_to_pf(nic, &mbx);
294}
295
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700296static void nicvf_config_cpi(struct nicvf *nic)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700297{
298 union nic_mbx mbx = {};
299
300 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
301 mbx.cpi_cfg.vf_id = nic->vf_id;
302 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
303 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
304
305 nicvf_send_msg_to_pf(nic, &mbx);
306}
307
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700308static void nicvf_get_rss_size(struct nicvf *nic)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700309{
310 union nic_mbx mbx = {};
311
312 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
313 mbx.rss_size.vf_id = nic->vf_id;
314 nicvf_send_msg_to_pf(nic, &mbx);
315}
316
317void nicvf_config_rss(struct nicvf *nic)
318{
319 union nic_mbx mbx = {};
320 struct nicvf_rss_info *rss = &nic->rss_info;
321 int ind_tbl_len = rss->rss_size;
322 int i, nextq = 0;
323
324 mbx.rss_cfg.vf_id = nic->vf_id;
325 mbx.rss_cfg.hash_bits = rss->hash_bits;
326 while (ind_tbl_len) {
327 mbx.rss_cfg.tbl_offset = nextq;
328 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
329 RSS_IND_TBL_LEN_PER_MBX_MSG);
330 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
331 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
332
333 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
334 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
335
336 nicvf_send_msg_to_pf(nic, &mbx);
337
338 ind_tbl_len -= mbx.rss_cfg.tbl_len;
339 }
340}
341
342void nicvf_set_rss_key(struct nicvf *nic)
343{
344 struct nicvf_rss_info *rss = &nic->rss_info;
345 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
346 int idx;
347
348 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
349 nicvf_reg_write(nic, key_addr, rss->key[idx]);
350 key_addr += sizeof(u64);
351 }
352}
353
354static int nicvf_rss_init(struct nicvf *nic)
355{
356 struct nicvf_rss_info *rss = &nic->rss_info;
357 int idx;
358
359 nicvf_get_rss_size(nic);
360
Sunil Goutham38bb5d42015-08-30 12:29:12 +0300361 if (cpi_alg != CPI_ALG_NONE) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700362 rss->enable = false;
363 rss->hash_bits = 0;
364 return 0;
365 }
366
367 rss->enable = true;
368
Sunil Goutham0052c922016-08-12 16:51:43 +0530369 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700370 nicvf_set_rss_key(nic);
371
372 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
373 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
374
375 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
376
377 for (idx = 0; idx < rss->rss_size; idx++)
378 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
Sunil Goutham92dc8762015-08-30 12:29:15 +0300379 nic->rx_queues);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700380 nicvf_config_rss(nic);
381 return 1;
382}
383
Sunil Goutham92dc8762015-08-30 12:29:15 +0300384/* Request PF to allocate additional Qsets */
385static void nicvf_request_sqs(struct nicvf *nic)
386{
387 union nic_mbx mbx = {};
388 int sqs;
389 int sqs_count = nic->sqs_count;
390 int rx_queues = 0, tx_queues = 0;
391
392 /* Only primary VF should request */
393 if (nic->sqs_mode || !nic->sqs_count)
394 return;
395
396 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
397 mbx.sqs_alloc.vf_id = nic->vf_id;
398 mbx.sqs_alloc.qs_count = nic->sqs_count;
399 if (nicvf_send_msg_to_pf(nic, &mbx)) {
400 /* No response from PF */
401 nic->sqs_count = 0;
402 return;
403 }
404
405 /* Return if no Secondary Qsets available */
406 if (!nic->sqs_count)
407 return;
408
409 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
410 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
411 if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
412 tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
413
414 /* Set no of Rx/Tx queues in each of the SQsets */
415 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
416 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
417 mbx.nicvf.vf_id = nic->vf_id;
418 mbx.nicvf.sqs_id = sqs;
419 nicvf_send_msg_to_pf(nic, &mbx);
420
421 nic->snicvf[sqs]->sqs_id = sqs;
422 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
423 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
424 rx_queues -= MAX_RCV_QUEUES_PER_QS;
425 } else {
426 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
427 rx_queues = 0;
428 }
429
430 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
431 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
432 tx_queues -= MAX_SND_QUEUES_PER_QS;
433 } else {
434 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
435 tx_queues = 0;
436 }
437
438 nic->snicvf[sqs]->qs->cq_cnt =
439 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
440
441 /* Initialize secondary Qset's queues and its interrupts */
442 nicvf_open(nic->snicvf[sqs]->netdev);
443 }
444
445 /* Update stack with actual Rx/Tx queue count allocated */
446 if (sqs_count != nic->sqs_count)
447 nicvf_set_real_num_queues(nic->netdev,
448 nic->tx_queues, nic->rx_queues);
449}
450
451/* Send this Qset's nicvf pointer to PF.
452 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
453 * so that packets received by these Qsets can use primary VF's netdev
454 */
455static void nicvf_send_vf_struct(struct nicvf *nic)
456{
457 union nic_mbx mbx = {};
458
459 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
460 mbx.nicvf.sqs_mode = nic->sqs_mode;
461 mbx.nicvf.nicvf = (u64)nic;
462 nicvf_send_msg_to_pf(nic, &mbx);
463}
464
465static void nicvf_get_primary_vf_struct(struct nicvf *nic)
466{
467 union nic_mbx mbx = {};
468
469 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
470 nicvf_send_msg_to_pf(nic, &mbx);
471}
472
Sunil Goutham4863dea2015-05-26 19:20:15 -0700473int nicvf_set_real_num_queues(struct net_device *netdev,
474 int tx_queues, int rx_queues)
475{
476 int err = 0;
477
478 err = netif_set_real_num_tx_queues(netdev, tx_queues);
479 if (err) {
480 netdev_err(netdev,
481 "Failed to set no of Tx queues: %d\n", tx_queues);
482 return err;
483 }
484
485 err = netif_set_real_num_rx_queues(netdev, rx_queues);
486 if (err)
487 netdev_err(netdev,
488 "Failed to set no of Rx queues: %d\n", rx_queues);
489 return err;
490}
491
492static int nicvf_init_resources(struct nicvf *nic)
493{
494 int err;
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700495 union nic_mbx mbx = {};
496
497 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700498
499 /* Enable Qset */
500 nicvf_qset_config(nic, true);
501
502 /* Initialize queues and HW for data transfer */
503 err = nicvf_config_data_transfer(nic, true);
504 if (err) {
505 netdev_err(nic->netdev,
506 "Failed to alloc/config VF's QSet resources\n");
507 return err;
508 }
509
510 /* Send VF config done msg to PF */
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700511 nicvf_write_to_mbx(nic, &mbx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700512
513 return 0;
514}
515
516static void nicvf_snd_pkt_handler(struct net_device *netdev,
517 struct cmp_queue *cq,
Sunil Gouthamc43548d2016-08-12 16:51:41 +0530518 struct cqe_send_t *cqe_tx,
Sunil Goutham2c204c22016-09-23 14:42:28 +0530519 int cqe_type, int budget,
520 unsigned int *tx_pkts, unsigned int *tx_bytes)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700521{
522 struct sk_buff *skb = NULL;
523 struct nicvf *nic = netdev_priv(netdev);
524 struct snd_queue *sq;
525 struct sq_hdr_subdesc *hdr;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +0530526 struct sq_hdr_subdesc *tso_sqe;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700527
528 sq = &nic->qs->sq[cqe_tx->sq_idx];
529
530 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
531 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
532 return;
533
534 netdev_dbg(nic->netdev,
535 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
537 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
538
Sunil Goutham4863dea2015-05-26 19:20:15 -0700539 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
Sunil Goutham4863dea2015-05-26 19:20:15 -0700541 if (skb) {
Sunil Goutham7ceb8a12016-08-30 11:36:27 +0530542 /* Check for dummy descriptor used for HW TSO offload on 88xx */
543 if (hdr->dont_send) {
544 /* Get actual TSO descriptors and free them */
545 tso_sqe =
546 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
547 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
548 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +0530549 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700550 prefetch(skb);
Sunil Goutham2c204c22016-09-23 14:42:28 +0530551 (*tx_pkts)++;
552 *tx_bytes += skb->len;
Sunil Gouthamc43548d2016-08-12 16:51:41 +0530553 napi_consume_skb(skb, budget);
Sunil Goutham143ceb02015-07-29 16:49:37 +0300554 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
Sunil Goutham40fb5f82015-12-10 13:25:19 +0530555 } else {
Sunil Goutham7ceb8a12016-08-30 11:36:27 +0530556 /* In case of SW TSO on 88xx, only last segment will have
557 * a SKB attached, so just free SQEs here.
Sunil Goutham40fb5f82015-12-10 13:25:19 +0530558 */
559 if (!nic->hw_tso)
560 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700561 }
562}
563
Sunil Goutham38bb5d42015-08-30 12:29:12 +0300564static inline void nicvf_set_rxhash(struct net_device *netdev,
565 struct cqe_rx_t *cqe_rx,
566 struct sk_buff *skb)
567{
568 u8 hash_type;
569 u32 hash;
570
571 if (!(netdev->features & NETIF_F_RXHASH))
572 return;
573
574 switch (cqe_rx->rss_alg) {
575 case RSS_ALG_TCP_IP:
576 case RSS_ALG_UDP_IP:
577 hash_type = PKT_HASH_TYPE_L4;
578 hash = cqe_rx->rss_tag;
579 break;
580 case RSS_ALG_IP:
581 hash_type = PKT_HASH_TYPE_L3;
582 hash = cqe_rx->rss_tag;
583 break;
584 default:
585 hash_type = PKT_HASH_TYPE_NONE;
586 hash = 0;
587 }
588
589 skb_set_hash(skb, hash, hash_type);
590}
591
Sunil Goutham4863dea2015-05-26 19:20:15 -0700592static void nicvf_rcv_pkt_handler(struct net_device *netdev,
593 struct napi_struct *napi,
Sunil Gouthamad2eceb2016-02-16 16:29:51 +0530594 struct cqe_rx_t *cqe_rx)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700595{
596 struct sk_buff *skb;
597 struct nicvf *nic = netdev_priv(netdev);
598 int err = 0;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300599 int rq_idx;
600
601 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
602
603 if (nic->sqs_mode) {
604 /* Use primary VF's 'nicvf' struct */
605 nic = nic->pnicvf;
606 netdev = nic->netdev;
607 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700608
609 /* Check for errors */
Sunil Gouthamad2eceb2016-02-16 16:29:51 +0530610 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700611 if (err && !cqe_rx->rb_cnt)
612 return;
613
614 skb = nicvf_get_rcv_skb(nic, cqe_rx);
615 if (!skb) {
616 netdev_dbg(nic->netdev, "Packet not received\n");
617 return;
618 }
619
620 if (netif_msg_pktdata(nic)) {
621 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
622 skb, skb->len);
623 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
624 skb->data, skb->len, true);
625 }
626
Sunil Gouthama2dc5de2015-08-30 12:29:10 +0300627 /* If error packet, drop it here */
628 if (err) {
629 dev_kfree_skb_any(skb);
630 return;
631 }
632
Sunil Goutham4863dea2015-05-26 19:20:15 -0700633 nicvf_set_rx_frame_cnt(nic, skb);
634
Sunil Goutham38bb5d42015-08-30 12:29:12 +0300635 nicvf_set_rxhash(netdev, cqe_rx, skb);
636
Sunil Goutham92dc8762015-08-30 12:29:15 +0300637 skb_record_rx_queue(skb, rq_idx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700638 if (netdev->hw_features & NETIF_F_RXCSUM) {
639 /* HW by default verifies TCP/UDP/SCTP checksums */
640 skb->ip_summed = CHECKSUM_UNNECESSARY;
641 } else {
642 skb_checksum_none_assert(skb);
643 }
644
645 skb->protocol = eth_type_trans(skb, netdev);
646
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300647 /* Check for stripped VLAN */
648 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
649 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
650 ntohs((__force __be16)cqe_rx->vlan_tci));
651
Sunil Goutham4863dea2015-05-26 19:20:15 -0700652 if (napi && (netdev->features & NETIF_F_GRO))
653 napi_gro_receive(napi, skb);
654 else
655 netif_receive_skb(skb);
656}
657
658static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
659 struct napi_struct *napi, int budget)
660{
Sunil Goutham74840b82015-07-29 16:49:42 +0300661 int processed_cqe, work_done = 0, tx_done = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700662 int cqe_count, cqe_head;
663 struct nicvf *nic = netdev_priv(netdev);
664 struct queue_set *qs = nic->qs;
665 struct cmp_queue *cq = &qs->cq[cq_idx];
666 struct cqe_rx_t *cq_desc;
Sunil Goutham74840b82015-07-29 16:49:42 +0300667 struct netdev_queue *txq;
Sunil Goutham2c204c22016-09-23 14:42:28 +0530668 unsigned int tx_pkts = 0, tx_bytes = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700669
670 spin_lock_bh(&cq->lock);
671loop:
672 processed_cqe = 0;
673 /* Get no of valid CQ entries to process */
674 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
675 cqe_count &= CQ_CQE_COUNT;
676 if (!cqe_count)
677 goto done;
678
679 /* Get head of the valid CQ entries */
680 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
681 cqe_head &= 0xFFFF;
682
Sunil Goutham74840b82015-07-29 16:49:42 +0300683 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
684 __func__, cq_idx, cqe_count, cqe_head);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700685 while (processed_cqe < cqe_count) {
686 /* Get the CQ descriptor */
687 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
688 cqe_head++;
689 cqe_head &= (cq->dmem.q_len - 1);
690 /* Initiate prefetch for next descriptor */
691 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
692
693 if ((work_done >= budget) && napi &&
694 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
695 break;
696 }
697
Sunil Goutham74840b82015-07-29 16:49:42 +0300698 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
699 cq_idx, cq_desc->cqe_type);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700700 switch (cq_desc->cqe_type) {
701 case CQE_TYPE_RX:
Sunil Gouthamad2eceb2016-02-16 16:29:51 +0530702 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700703 work_done++;
704 break;
705 case CQE_TYPE_SEND:
706 nicvf_snd_pkt_handler(netdev, cq,
Sunil Gouthamc43548d2016-08-12 16:51:41 +0530707 (void *)cq_desc, CQE_TYPE_SEND,
Sunil Goutham2c204c22016-09-23 14:42:28 +0530708 budget, &tx_pkts, &tx_bytes);
Sunil Goutham74840b82015-07-29 16:49:42 +0300709 tx_done++;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700710 break;
711 case CQE_TYPE_INVALID:
712 case CQE_TYPE_RX_SPLIT:
713 case CQE_TYPE_RX_TCP:
714 case CQE_TYPE_SEND_PTP:
715 /* Ignore for now */
716 break;
717 }
718 processed_cqe++;
719 }
Sunil Goutham74840b82015-07-29 16:49:42 +0300720 netdev_dbg(nic->netdev,
721 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
722 __func__, cq_idx, processed_cqe, work_done, budget);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700723
724 /* Ring doorbell to inform H/W to reuse processed CQEs */
725 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
726 cq_idx, processed_cqe);
727
728 if ((work_done < budget) && napi)
729 goto loop;
730
731done:
Sunil Goutham74840b82015-07-29 16:49:42 +0300732 /* Wakeup TXQ if its stopped earlier due to SQ full */
733 if (tx_done) {
Sunil Goutham92dc8762015-08-30 12:29:15 +0300734 netdev = nic->pnicvf->netdev;
735 txq = netdev_get_tx_queue(netdev,
736 nicvf_netdev_qidx(nic, cq_idx));
Sunil Goutham2c204c22016-09-23 14:42:28 +0530737 if (tx_pkts)
738 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
739
Sunil Goutham92dc8762015-08-30 12:29:15 +0300740 nic = nic->pnicvf;
741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
Sunil Gouthamb49087d2015-07-29 16:49:44 +0300742 netif_tx_start_queue(txq);
Sunil Goutham74840b82015-07-29 16:49:42 +0300743 nic->drv_stats.txq_wake++;
744 if (netif_msg_tx_err(nic))
745 netdev_warn(netdev,
746 "%s: Transmit queue wakeup SQ%d\n",
747 netdev->name, cq_idx);
748 }
749 }
750
Sunil Goutham4863dea2015-05-26 19:20:15 -0700751 spin_unlock_bh(&cq->lock);
752 return work_done;
753}
754
755static int nicvf_poll(struct napi_struct *napi, int budget)
756{
757 u64 cq_head;
758 int work_done = 0;
759 struct net_device *netdev = napi->dev;
760 struct nicvf *nic = netdev_priv(netdev);
761 struct nicvf_cq_poll *cq;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700762
763 cq = container_of(napi, struct nicvf_cq_poll, napi);
764 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
765
Sunil Goutham4863dea2015-05-26 19:20:15 -0700766 if (work_done < budget) {
767 /* Slow packet rate, exit polling */
768 napi_complete(napi);
769 /* Re-enable interrupts */
770 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
771 cq->cq_idx);
772 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
773 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
774 cq->cq_idx, cq_head);
775 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
776 }
777 return work_done;
778}
779
780/* Qset error interrupt handler
781 *
782 * As of now only CQ errors are handled
783 */
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700784static void nicvf_handle_qs_err(unsigned long data)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700785{
786 struct nicvf *nic = (struct nicvf *)data;
787 struct queue_set *qs = nic->qs;
788 int qidx;
789 u64 status;
790
791 netif_tx_disable(nic->netdev);
792
793 /* Check if it is CQ err */
794 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
795 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
796 qidx);
797 if (!(status & CQ_ERR_MASK))
798 continue;
799 /* Process already queued CQEs and reconfig CQ */
800 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
801 nicvf_sq_disable(nic, qidx);
802 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
803 nicvf_cmp_queue_config(nic, qs, qidx, true);
804 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
805 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
806
807 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
808 }
809
810 netif_tx_start_all_queues(nic->netdev);
811 /* Re-enable Qset error interrupt */
812 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
813}
814
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300815static void nicvf_dump_intr_status(struct nicvf *nic)
816{
817 if (netif_msg_intr(nic))
818 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
819 nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
820}
821
Sunil Goutham4863dea2015-05-26 19:20:15 -0700822static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
823{
824 struct nicvf *nic = (struct nicvf *)nicvf_irq;
825 u64 intr;
826
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300827 nicvf_dump_intr_status(nic);
828
Sunil Goutham4863dea2015-05-26 19:20:15 -0700829 intr = nicvf_reg_read(nic, NIC_VF_INT);
830 /* Check for spurious interrupt */
831 if (!(intr & NICVF_INTR_MBOX_MASK))
832 return IRQ_HANDLED;
833
834 nicvf_handle_mbx_intr(nic);
835
836 return IRQ_HANDLED;
837}
838
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300839static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700840{
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300841 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
842 struct nicvf *nic = cq_poll->nicvf;
843 int qidx = cq_poll->cq_idx;
844
845 nicvf_dump_intr_status(nic);
846
847 /* Disable interrupts */
848 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
849
850 /* Schedule NAPI */
Sunil Gouthamef0a4d82016-02-11 21:50:22 +0530851 napi_schedule_irqoff(&cq_poll->napi);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300852
853 /* Clear interrupt */
854 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
855
856 return IRQ_HANDLED;
857}
858
859static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
860{
Sunil Goutham4863dea2015-05-26 19:20:15 -0700861 struct nicvf *nic = (struct nicvf *)nicvf_irq;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300862 u8 qidx;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700863
Sunil Goutham4863dea2015-05-26 19:20:15 -0700864
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300865 nicvf_dump_intr_status(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700866
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300867 /* Disable RBDR interrupt and schedule softirq */
868 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
869 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700870 continue;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300871 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
872 tasklet_hi_schedule(&nic->rbdr_task);
873 /* Clear interrupt */
874 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700875 }
876
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300877 return IRQ_HANDLED;
878}
Sunil Goutham4863dea2015-05-26 19:20:15 -0700879
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300880static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
881{
882 struct nicvf *nic = (struct nicvf *)nicvf_irq;
883
884 nicvf_dump_intr_status(nic);
885
886 /* Disable Qset err interrupt and schedule softirq */
887 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
888 tasklet_hi_schedule(&nic->qs_err_task);
889 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
890
Sunil Goutham4863dea2015-05-26 19:20:15 -0700891 return IRQ_HANDLED;
892}
893
894static int nicvf_enable_msix(struct nicvf *nic)
895{
896 int ret, vec;
897
898 nic->num_vec = NIC_VF_MSIX_VECTORS;
899
900 for (vec = 0; vec < nic->num_vec; vec++)
901 nic->msix_entries[vec].entry = vec;
902
903 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
904 if (ret) {
905 netdev_err(nic->netdev,
906 "Req for #%d msix vectors failed\n", nic->num_vec);
907 return 0;
908 }
909 nic->msix_enabled = 1;
910 return 1;
911}
912
913static void nicvf_disable_msix(struct nicvf *nic)
914{
915 if (nic->msix_enabled) {
916 pci_disable_msix(nic->pdev);
917 nic->msix_enabled = 0;
918 nic->num_vec = 0;
919 }
920}
921
Sunil Gouthamfb4b7d92016-02-11 21:50:23 +0530922static void nicvf_set_irq_affinity(struct nicvf *nic)
923{
924 int vec, cpu;
925 int irqnum;
926
927 for (vec = 0; vec < nic->num_vec; vec++) {
928 if (!nic->irq_allocated[vec])
929 continue;
930
931 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
932 return;
933 /* CQ interrupts */
934 if (vec < NICVF_INTR_ID_SQ)
935 /* Leave CPU0 for RBDR and other interrupts */
936 cpu = nicvf_netdev_qidx(nic, vec) + 1;
937 else
938 cpu = 0;
939
940 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
941 nic->affinity_mask[vec]);
942 irqnum = nic->msix_entries[vec].vector;
943 irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
944 }
945}
946
Sunil Goutham4863dea2015-05-26 19:20:15 -0700947static int nicvf_register_interrupts(struct nicvf *nic)
948{
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300949 int irq, ret = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700950 int vector;
951
952 for_each_cq_irq(irq)
Sunil Gouthame4126212016-08-12 16:51:36 +0530953 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
954 nic->pnicvf->netdev->name,
955 nicvf_netdev_qidx(nic, irq));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700956
957 for_each_sq_irq(irq)
Sunil Gouthame4126212016-08-12 16:51:36 +0530958 sprintf(nic->irq_name[irq], "%s-sq-%d",
959 nic->pnicvf->netdev->name,
960 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700961
962 for_each_rbdr_irq(irq)
Sunil Gouthame4126212016-08-12 16:51:36 +0530963 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
964 nic->pnicvf->netdev->name,
965 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700966
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300967 /* Register CQ interrupts */
968 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700969 vector = nic->msix_entries[irq].vector;
970 ret = request_irq(vector, nicvf_intr_handler,
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300971 0, nic->irq_name[irq], nic->napi[irq]);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700972 if (ret)
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300973 goto err;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700974 nic->irq_allocated[irq] = true;
975 }
976
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300977 /* Register RBDR interrupt */
978 for (irq = NICVF_INTR_ID_RBDR;
979 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700980 vector = nic->msix_entries[irq].vector;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300981 ret = request_irq(vector, nicvf_rbdr_intr_handler,
Sunil Goutham4863dea2015-05-26 19:20:15 -0700982 0, nic->irq_name[irq], nic);
983 if (ret)
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300984 goto err;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700985 nic->irq_allocated[irq] = true;
986 }
987
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300988 /* Register QS error interrupt */
Sunil Gouthame4126212016-08-12 16:51:36 +0530989 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
990 nic->pnicvf->netdev->name,
991 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300992 irq = NICVF_INTR_ID_QS_ERR;
993 ret = request_irq(nic->msix_entries[irq].vector,
994 nicvf_qs_err_intr_handler,
995 0, nic->irq_name[irq], nic);
Sunil Gouthamfb4b7d92016-02-11 21:50:23 +0530996 if (ret)
997 goto err;
998
999 nic->irq_allocated[irq] = true;
1000
1001 /* Set IRQ affinities */
1002 nicvf_set_irq_affinity(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001003
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001004err:
1005 if (ret)
1006 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001007
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001008 return ret;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001009}
1010
1011static void nicvf_unregister_interrupts(struct nicvf *nic)
1012{
1013 int irq;
1014
1015 /* Free registered interrupts */
1016 for (irq = 0; irq < nic->num_vec; irq++) {
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001017 if (!nic->irq_allocated[irq])
1018 continue;
1019
Sunil Gouthamfb4b7d92016-02-11 21:50:23 +05301020 irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
1021 free_cpumask_var(nic->affinity_mask[irq]);
1022
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001023 if (irq < NICVF_INTR_ID_SQ)
1024 free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
1025 else
Sunil Goutham4863dea2015-05-26 19:20:15 -07001026 free_irq(nic->msix_entries[irq].vector, nic);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001027
Sunil Goutham4863dea2015-05-26 19:20:15 -07001028 nic->irq_allocated[irq] = false;
1029 }
1030
1031 /* Disable MSI-X */
1032 nicvf_disable_msix(nic);
1033}
1034
1035/* Initialize MSIX vectors and register MISC interrupt.
1036 * Send READY message to PF to check if its alive
1037 */
1038static int nicvf_register_misc_interrupt(struct nicvf *nic)
1039{
1040 int ret = 0;
1041 int irq = NICVF_INTR_ID_MISC;
1042
1043 /* Return if mailbox interrupt is already registered */
1044 if (nic->msix_enabled)
1045 return 0;
1046
1047 /* Enable MSI-X */
1048 if (!nicvf_enable_msix(nic))
1049 return 1;
1050
1051 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1052 /* Register Misc interrupt */
1053 ret = request_irq(nic->msix_entries[irq].vector,
1054 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1055
1056 if (ret)
1057 return ret;
1058 nic->irq_allocated[irq] = true;
1059
1060 /* Enable mailbox interrupt */
1061 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1062
1063 /* Check if VF is able to communicate with PF */
1064 if (!nicvf_check_pf_ready(nic)) {
1065 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1066 nicvf_unregister_interrupts(nic);
1067 return 1;
1068 }
1069
1070 return 0;
1071}
1072
1073static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1074{
1075 struct nicvf *nic = netdev_priv(netdev);
1076 int qid = skb_get_queue_mapping(skb);
1077 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1078
1079 /* Check for minimum packet length */
1080 if (skb->len <= ETH_HLEN) {
1081 dev_kfree_skb(skb);
1082 return NETDEV_TX_OK;
1083 }
1084
Sunil Gouthamb49087d2015-07-29 16:49:44 +03001085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001086 netif_tx_stop_queue(txq);
Sunil Goutham74840b82015-07-29 16:49:42 +03001087 nic->drv_stats.txq_stop++;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001088 if (netif_msg_tx_err(nic))
1089 netdev_warn(netdev,
1090 "%s: Transmit ring full, stopping SQ%d\n",
1091 netdev->name, qid);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001092 return NETDEV_TX_BUSY;
1093 }
1094
1095 return NETDEV_TX_OK;
1096}
1097
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001098static inline void nicvf_free_cq_poll(struct nicvf *nic)
1099{
1100 struct nicvf_cq_poll *cq_poll;
1101 int qidx;
1102
1103 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1104 cq_poll = nic->napi[qidx];
1105 if (!cq_poll)
1106 continue;
1107 nic->napi[qidx] = NULL;
1108 kfree(cq_poll);
1109 }
1110}
1111
Sunil Goutham4863dea2015-05-26 19:20:15 -07001112int nicvf_stop(struct net_device *netdev)
1113{
1114 int irq, qidx;
1115 struct nicvf *nic = netdev_priv(netdev);
1116 struct queue_set *qs = nic->qs;
1117 struct nicvf_cq_poll *cq_poll = NULL;
1118 union nic_mbx mbx = {};
1119
1120 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1121 nicvf_send_msg_to_pf(nic, &mbx);
1122
1123 netif_carrier_off(netdev);
Sunil Goutham92dc8762015-08-30 12:29:15 +03001124 netif_tx_stop_all_queues(nic->netdev);
Sunil Goutham0b72a9a2015-12-02 15:36:16 +05301125 nic->link_up = false;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001126
1127 /* Teardown secondary qsets first */
1128 if (!nic->sqs_mode) {
1129 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1130 if (!nic->snicvf[qidx])
1131 continue;
1132 nicvf_stop(nic->snicvf[qidx]->netdev);
1133 nic->snicvf[qidx] = NULL;
1134 }
1135 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001136
1137 /* Disable RBDR & QS error interrupts */
1138 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1139 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1140 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1141 }
1142 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1143 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1144
1145 /* Wait for pending IRQ handlers to finish */
1146 for (irq = 0; irq < nic->num_vec; irq++)
1147 synchronize_irq(nic->msix_entries[irq].vector);
1148
1149 tasklet_kill(&nic->rbdr_task);
1150 tasklet_kill(&nic->qs_err_task);
1151 if (nic->rb_work_scheduled)
1152 cancel_delayed_work_sync(&nic->rbdr_work);
1153
1154 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1155 cq_poll = nic->napi[qidx];
1156 if (!cq_poll)
1157 continue;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001158 napi_synchronize(&cq_poll->napi);
1159 /* CQ intr is enabled while napi_complete,
1160 * so disable it now
1161 */
1162 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1163 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1164 napi_disable(&cq_poll->napi);
1165 netif_napi_del(&cq_poll->napi);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001166 }
1167
Sunil Gouthamb49087d2015-07-29 16:49:44 +03001168 netif_tx_disable(netdev);
1169
Sunil Goutham2c204c22016-09-23 14:42:28 +05301170 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1171 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1172
Sunil Goutham4863dea2015-05-26 19:20:15 -07001173 /* Free resources */
1174 nicvf_config_data_transfer(nic, false);
1175
1176 /* Disable HW Qset */
1177 nicvf_qset_config(nic, false);
1178
1179 /* disable mailbox interrupt */
1180 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1181
1182 nicvf_unregister_interrupts(nic);
1183
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001184 nicvf_free_cq_poll(nic);
1185
Sunil Goutham92dc8762015-08-30 12:29:15 +03001186 /* Clear multiqset info */
1187 nic->pnicvf = nic;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001188
Sunil Goutham4863dea2015-05-26 19:20:15 -07001189 return 0;
1190}
1191
1192int nicvf_open(struct net_device *netdev)
1193{
1194 int err, qidx;
1195 struct nicvf *nic = netdev_priv(netdev);
1196 struct queue_set *qs = nic->qs;
1197 struct nicvf_cq_poll *cq_poll = NULL;
1198
1199 nic->mtu = netdev->mtu;
1200
1201 netif_carrier_off(netdev);
1202
1203 err = nicvf_register_misc_interrupt(nic);
1204 if (err)
1205 return err;
1206
1207 /* Register NAPI handler for processing CQEs */
1208 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1209 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1210 if (!cq_poll) {
1211 err = -ENOMEM;
1212 goto napi_del;
1213 }
1214 cq_poll->cq_idx = qidx;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001215 cq_poll->nicvf = nic;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001216 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1217 NAPI_POLL_WEIGHT);
1218 napi_enable(&cq_poll->napi);
1219 nic->napi[qidx] = cq_poll;
1220 }
1221
1222 /* Check if we got MAC address from PF or else generate a radom MAC */
Sunil Gouthama3a8ce42016-08-12 16:51:40 +05301223 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001224 eth_hw_addr_random(netdev);
1225 nicvf_hw_set_mac_addr(nic, netdev);
1226 }
1227
Pavel Fedinbd049a92015-06-23 17:51:06 +03001228 if (nic->set_mac_pending) {
1229 nic->set_mac_pending = false;
1230 nicvf_hw_set_mac_addr(nic, netdev);
1231 }
1232
Sunil Goutham4863dea2015-05-26 19:20:15 -07001233 /* Init tasklet for handling Qset err interrupt */
1234 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1235 (unsigned long)nic);
1236
1237 /* Init RBDR tasklet which will refill RBDR */
1238 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1239 (unsigned long)nic);
1240 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1241
1242 /* Configure CPI alorithm */
1243 nic->cpi_alg = cpi_alg;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001244 if (!nic->sqs_mode)
1245 nicvf_config_cpi(nic);
1246
1247 nicvf_request_sqs(nic);
1248 if (nic->sqs_mode)
1249 nicvf_get_primary_vf_struct(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001250
1251 /* Configure receive side scaling */
Sunil Goutham92dc8762015-08-30 12:29:15 +03001252 if (!nic->sqs_mode)
1253 nicvf_rss_init(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001254
1255 err = nicvf_register_interrupts(nic);
1256 if (err)
1257 goto cleanup;
1258
1259 /* Initialize the queues */
1260 err = nicvf_init_resources(nic);
1261 if (err)
1262 goto cleanup;
1263
1264 /* Make sure queue initialization is written */
1265 wmb();
1266
1267 nicvf_reg_write(nic, NIC_VF_INT, -1);
1268 /* Enable Qset err interrupt */
1269 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1270
1271 /* Enable completion queue interrupt */
1272 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1273 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1274
1275 /* Enable RBDR threshold interrupt */
1276 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1277 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1278
Sunil Goutham74840b82015-07-29 16:49:42 +03001279 nic->drv_stats.txq_stop = 0;
1280 nic->drv_stats.txq_wake = 0;
1281
Sunil Goutham4863dea2015-05-26 19:20:15 -07001282 return 0;
1283cleanup:
1284 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1285 nicvf_unregister_interrupts(nic);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001286 tasklet_kill(&nic->qs_err_task);
1287 tasklet_kill(&nic->rbdr_task);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001288napi_del:
1289 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1290 cq_poll = nic->napi[qidx];
1291 if (!cq_poll)
1292 continue;
1293 napi_disable(&cq_poll->napi);
1294 netif_napi_del(&cq_poll->napi);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001295 }
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001296 nicvf_free_cq_poll(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001297 return err;
1298}
1299
1300static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1301{
1302 union nic_mbx mbx = {};
1303
1304 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1305 mbx.frs.max_frs = mtu;
1306 mbx.frs.vf_id = nic->vf_id;
1307
1308 return nicvf_send_msg_to_pf(nic, &mbx);
1309}
1310
1311static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1312{
1313 struct nicvf *nic = netdev_priv(netdev);
1314
1315 if (new_mtu > NIC_HW_MAX_FRS)
1316 return -EINVAL;
1317
1318 if (new_mtu < NIC_HW_MIN_FRS)
1319 return -EINVAL;
1320
1321 if (nicvf_update_hw_max_frs(nic, new_mtu))
1322 return -EINVAL;
1323 netdev->mtu = new_mtu;
1324 nic->mtu = new_mtu;
1325
1326 return 0;
1327}
1328
1329static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1330{
1331 struct sockaddr *addr = p;
1332 struct nicvf *nic = netdev_priv(netdev);
1333
1334 if (!is_valid_ether_addr(addr->sa_data))
1335 return -EADDRNOTAVAIL;
1336
1337 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1338
Pavel Fedinbd049a92015-06-23 17:51:06 +03001339 if (nic->msix_enabled) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001340 if (nicvf_hw_set_mac_addr(nic, netdev))
1341 return -EBUSY;
Pavel Fedinbd049a92015-06-23 17:51:06 +03001342 } else {
1343 nic->set_mac_pending = true;
1344 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001345
1346 return 0;
1347}
1348
Sunil Goutham4863dea2015-05-26 19:20:15 -07001349void nicvf_update_lmac_stats(struct nicvf *nic)
1350{
1351 int stat = 0;
1352 union nic_mbx mbx = {};
Sunil Goutham4863dea2015-05-26 19:20:15 -07001353
1354 if (!netif_running(nic->netdev))
1355 return;
1356
1357 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1358 mbx.bgx_stats.vf_id = nic->vf_id;
1359 /* Rx stats */
1360 mbx.bgx_stats.rx = 1;
1361 while (stat < BGX_RX_STATS_COUNT) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001362 mbx.bgx_stats.idx = stat;
Sunil Goutham6051cba2015-08-30 12:29:11 +03001363 if (nicvf_send_msg_to_pf(nic, &mbx))
1364 return;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001365 stat++;
1366 }
1367
1368 stat = 0;
1369
1370 /* Tx stats */
1371 mbx.bgx_stats.rx = 0;
1372 while (stat < BGX_TX_STATS_COUNT) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001373 mbx.bgx_stats.idx = stat;
Sunil Goutham6051cba2015-08-30 12:29:11 +03001374 if (nicvf_send_msg_to_pf(nic, &mbx))
1375 return;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001376 stat++;
1377 }
1378}
1379
1380void nicvf_update_stats(struct nicvf *nic)
1381{
1382 int qidx;
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001383 struct nicvf_hw_stats *stats = &nic->hw_stats;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001384 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1385 struct queue_set *qs = nic->qs;
1386
1387#define GET_RX_STATS(reg) \
1388 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1389#define GET_TX_STATS(reg) \
1390 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1391
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001392 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1393 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1394 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1395 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001396 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1397 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1398 stats->rx_drop_red = GET_RX_STATS(RX_RED);
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001399 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001400 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001401 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001402 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1403 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1404 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1405 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1406
1407 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1408 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1409 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1410 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1411 stats->tx_drops = GET_TX_STATS(TX_DROP);
1412
Sunil Goutham4863dea2015-05-26 19:20:15 -07001413 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1414 stats->tx_bcast_frames_ok +
1415 stats->tx_mcast_frames_ok;
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301416 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1417 stats->rx_bcast_frames +
1418 stats->rx_mcast_frames;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001419 drv_stats->rx_drops = stats->rx_drop_red +
1420 stats->rx_drop_overrun;
1421 drv_stats->tx_drops = stats->tx_drops;
1422
1423 /* Update RQ and SQ stats */
1424 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1425 nicvf_update_rq_stats(nic, qidx);
1426 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1427 nicvf_update_sq_stats(nic, qidx);
1428}
1429
Aleksey Makarovfd7ec062015-06-02 11:00:23 -07001430static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001431 struct rtnl_link_stats64 *stats)
1432{
1433 struct nicvf *nic = netdev_priv(netdev);
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001434 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001435 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1436
1437 nicvf_update_stats(nic);
1438
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001439 stats->rx_bytes = hw_stats->rx_bytes;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001440 stats->rx_packets = drv_stats->rx_frames_ok;
1441 stats->rx_dropped = drv_stats->rx_drops;
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001442 stats->multicast = hw_stats->rx_mcast_frames;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001443
1444 stats->tx_bytes = hw_stats->tx_bytes_ok;
1445 stats->tx_packets = drv_stats->tx_frames_ok;
1446 stats->tx_dropped = drv_stats->tx_drops;
1447
1448 return stats;
1449}
1450
1451static void nicvf_tx_timeout(struct net_device *dev)
1452{
1453 struct nicvf *nic = netdev_priv(dev);
1454
1455 if (netif_msg_tx_err(nic))
1456 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1457 dev->name);
1458
Thanneeru Srinivasulua05d4842016-02-11 21:50:21 +05301459 nic->drv_stats.tx_timeout++;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001460 schedule_work(&nic->reset_task);
1461}
1462
1463static void nicvf_reset_task(struct work_struct *work)
1464{
1465 struct nicvf *nic;
1466
1467 nic = container_of(work, struct nicvf, reset_task);
1468
1469 if (!netif_running(nic->netdev))
1470 return;
1471
1472 nicvf_stop(nic->netdev);
1473 nicvf_open(nic->netdev);
Florian Westphal860e9532016-05-03 16:33:13 +02001474 netif_trans_update(nic->netdev);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001475}
1476
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001477static int nicvf_config_loopback(struct nicvf *nic,
1478 netdev_features_t features)
1479{
1480 union nic_mbx mbx = {};
1481
1482 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1483 mbx.lbk.vf_id = nic->vf_id;
1484 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1485
1486 return nicvf_send_msg_to_pf(nic, &mbx);
1487}
1488
1489static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1490 netdev_features_t features)
1491{
1492 struct nicvf *nic = netdev_priv(netdev);
1493
1494 if ((features & NETIF_F_LOOPBACK) &&
1495 netif_running(netdev) && !nic->loopback_supported)
1496 features &= ~NETIF_F_LOOPBACK;
1497
1498 return features;
1499}
1500
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001501static int nicvf_set_features(struct net_device *netdev,
1502 netdev_features_t features)
1503{
1504 struct nicvf *nic = netdev_priv(netdev);
1505 netdev_features_t changed = features ^ netdev->features;
1506
1507 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1508 nicvf_config_vlan_stripping(nic, features);
1509
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001510 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1511 return nicvf_config_loopback(nic, features);
1512
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001513 return 0;
1514}
1515
Sunil Goutham4863dea2015-05-26 19:20:15 -07001516static const struct net_device_ops nicvf_netdev_ops = {
1517 .ndo_open = nicvf_open,
1518 .ndo_stop = nicvf_stop,
1519 .ndo_start_xmit = nicvf_xmit,
1520 .ndo_change_mtu = nicvf_change_mtu,
1521 .ndo_set_mac_address = nicvf_set_mac_address,
1522 .ndo_get_stats64 = nicvf_get_stats64,
1523 .ndo_tx_timeout = nicvf_tx_timeout,
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001524 .ndo_fix_features = nicvf_fix_features,
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001525 .ndo_set_features = nicvf_set_features,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001526};
1527
1528static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1529{
1530 struct device *dev = &pdev->dev;
1531 struct net_device *netdev;
1532 struct nicvf *nic;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001533 int err, qcount;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301534 u16 sdevid;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001535
1536 err = pci_enable_device(pdev);
1537 if (err) {
1538 dev_err(dev, "Failed to enable PCI device\n");
1539 return err;
1540 }
1541
1542 err = pci_request_regions(pdev, DRV_NAME);
1543 if (err) {
1544 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1545 goto err_disable_device;
1546 }
1547
1548 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1549 if (err) {
1550 dev_err(dev, "Unable to get usable DMA configuration\n");
1551 goto err_release_regions;
1552 }
1553
1554 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1555 if (err) {
1556 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1557 goto err_release_regions;
1558 }
1559
Sunil Goutham3a397eb2016-08-12 16:51:27 +05301560 qcount = netif_get_num_default_rss_queues();
Sunil Goutham92dc8762015-08-30 12:29:15 +03001561
1562 /* Restrict multiqset support only for host bound VFs */
1563 if (pdev->is_virtfn) {
1564 /* Set max number of queues per VF */
Sunil Goutham3a397eb2016-08-12 16:51:27 +05301565 qcount = min_t(int, num_online_cpus(),
1566 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
Sunil Goutham92dc8762015-08-30 12:29:15 +03001567 }
1568
1569 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001570 if (!netdev) {
1571 err = -ENOMEM;
1572 goto err_release_regions;
1573 }
1574
1575 pci_set_drvdata(pdev, netdev);
1576
1577 SET_NETDEV_DEV(netdev, &pdev->dev);
1578
1579 nic = netdev_priv(netdev);
1580 nic->netdev = netdev;
1581 nic->pdev = pdev;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001582 nic->pnicvf = nic;
1583 nic->max_queues = qcount;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001584
1585 /* MAP VF's configuration registers */
1586 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1587 if (!nic->reg_base) {
1588 dev_err(dev, "Cannot map config register space, aborting\n");
1589 err = -ENOMEM;
1590 goto err_free_netdev;
1591 }
1592
1593 err = nicvf_set_qset_resources(nic);
1594 if (err)
1595 goto err_free_netdev;
1596
Sunil Goutham4863dea2015-05-26 19:20:15 -07001597 /* Check if PF is alive and get MAC address for this VF */
1598 err = nicvf_register_misc_interrupt(nic);
1599 if (err)
1600 goto err_free_netdev;
1601
Sunil Goutham92dc8762015-08-30 12:29:15 +03001602 nicvf_send_vf_struct(nic);
1603
Sunil Goutham8d210d52016-02-16 16:29:50 +05301604 if (!pass1_silicon(nic->pdev))
1605 nic->hw_tso = true;
1606
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301607 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1608 if (sdevid == 0xA134)
1609 nic->t88 = true;
1610
Sunil Goutham92dc8762015-08-30 12:29:15 +03001611 /* Check if this VF is in QS only mode */
1612 if (nic->sqs_mode)
1613 return 0;
1614
1615 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
1616 if (err)
1617 goto err_unregister_interrupts;
1618
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001619 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1620 NETIF_F_TSO | NETIF_F_GRO |
Sunil Goutham92dc8762015-08-30 12:29:15 +03001621 NETIF_F_HW_VLAN_CTAG_RX);
1622
1623 netdev->hw_features |= NETIF_F_RXHASH;
Sunil Goutham38bb5d42015-08-30 12:29:12 +03001624
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001625 netdev->features |= netdev->hw_features;
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001626 netdev->hw_features |= NETIF_F_LOOPBACK;
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001627
1628 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001629
1630 netdev->netdev_ops = &nicvf_netdev_ops;
Sunil Goutham3d7a8aa2015-07-29 16:49:43 +03001631 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001632
1633 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1634
1635 err = register_netdev(netdev);
1636 if (err) {
1637 dev_err(dev, "Failed to register netdevice\n");
1638 goto err_unregister_interrupts;
1639 }
1640
1641 nic->msg_enable = debug;
1642
1643 nicvf_set_ethtool_ops(netdev);
1644
1645 return 0;
1646
1647err_unregister_interrupts:
1648 nicvf_unregister_interrupts(nic);
1649err_free_netdev:
1650 pci_set_drvdata(pdev, NULL);
1651 free_netdev(netdev);
1652err_release_regions:
1653 pci_release_regions(pdev);
1654err_disable_device:
1655 pci_disable_device(pdev);
1656 return err;
1657}
1658
1659static void nicvf_remove(struct pci_dev *pdev)
1660{
1661 struct net_device *netdev = pci_get_drvdata(pdev);
Pavel Fedin77501302015-11-16 17:51:34 +03001662 struct nicvf *nic;
1663 struct net_device *pnetdev;
1664
1665 if (!netdev)
1666 return;
1667
1668 nic = netdev_priv(netdev);
1669 pnetdev = nic->pnicvf->netdev;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001670
Sunil Goutham92dc8762015-08-30 12:29:15 +03001671 /* Check if this Qset is assigned to different VF.
1672 * If yes, clean primary and all secondary Qsets.
1673 */
1674 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
1675 unregister_netdev(pnetdev);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001676 nicvf_unregister_interrupts(nic);
1677 pci_set_drvdata(pdev, NULL);
1678 free_netdev(netdev);
1679 pci_release_regions(pdev);
1680 pci_disable_device(pdev);
1681}
1682
Sunil Goutham4adf4352015-07-29 16:49:45 +03001683static void nicvf_shutdown(struct pci_dev *pdev)
1684{
1685 nicvf_remove(pdev);
1686}
1687
Sunil Goutham4863dea2015-05-26 19:20:15 -07001688static struct pci_driver nicvf_driver = {
1689 .name = DRV_NAME,
1690 .id_table = nicvf_id_table,
1691 .probe = nicvf_probe,
1692 .remove = nicvf_remove,
Sunil Goutham4adf4352015-07-29 16:49:45 +03001693 .shutdown = nicvf_shutdown,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001694};
1695
1696static int __init nicvf_init_module(void)
1697{
1698 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1699
1700 return pci_register_driver(&nicvf_driver);
1701}
1702
1703static void __exit nicvf_cleanup_module(void)
1704{
1705 pci_unregister_driver(&nicvf_driver);
1706}
1707
1708module_init(nicvf_init_module);
1709module_exit(nicvf_cleanup_module);