blob: 1eacec85dac3ade1fa74c15d2662709043dccb8c [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
Sunil Gouthamaa2e2592015-08-30 12:29:13 +030013#include <linux/if_vlan.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070014#include <linux/etherdevice.h>
15#include <linux/ethtool.h>
16#include <linux/log2.h>
17#include <linux/prefetch.h>
18#include <linux/irq.h>
19
20#include "nic_reg.h"
21#include "nic.h"
22#include "nicvf_queues.h"
23#include "thunder_bgx.h"
24
25#define DRV_NAME "thunder-nicvf"
26#define DRV_VERSION "1.0"
27
28/* Supported devices */
29static const struct pci_device_id nicvf_id_table[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
31 PCI_DEVICE_ID_THUNDER_NIC_VF,
Sunil Gouthamf7ff0ae2016-08-12 16:51:25 +053032 PCI_VENDOR_ID_CAVIUM,
33 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
Sunil Goutham4863dea2015-05-26 19:20:15 -070034 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
35 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
Sunil Gouthamf7ff0ae2016-08-12 16:51:25 +053036 PCI_VENDOR_ID_CAVIUM,
37 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
39 PCI_DEVICE_ID_THUNDER_NIC_VF,
40 PCI_VENDOR_ID_CAVIUM,
41 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
43 PCI_DEVICE_ID_THUNDER_NIC_VF,
44 PCI_VENDOR_ID_CAVIUM,
45 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
Sunil Goutham4863dea2015-05-26 19:20:15 -070046 { 0, } /* end of table */
47};
48
49MODULE_AUTHOR("Sunil Goutham");
50MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
51MODULE_LICENSE("GPL v2");
52MODULE_VERSION(DRV_VERSION);
53MODULE_DEVICE_TABLE(pci, nicvf_id_table);
54
55static int debug = 0x00;
56module_param(debug, int, 0644);
57MODULE_PARM_DESC(debug, "Debug message level bitmap");
58
59static int cpi_alg = CPI_ALG_NONE;
60module_param(cpi_alg, int, S_IRUGO);
61MODULE_PARM_DESC(cpi_alg,
62 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
63
Sunil Goutham92dc8762015-08-30 12:29:15 +030064static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
65{
66 if (nic->sqs_mode)
67 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
68 else
69 return qidx;
70}
71
Sunil Goutham4863dea2015-05-26 19:20:15 -070072/* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
78 * add overhead.
79 */
80
81/* Register read/write APIs */
82void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
83{
84 writeq_relaxed(val, nic->reg_base + offset);
85}
86
87u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
88{
89 return readq_relaxed(nic->reg_base + offset);
90}
91
92void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
93 u64 qidx, u64 val)
94{
95 void __iomem *addr = nic->reg_base + offset;
96
97 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
98}
99
100u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
101{
102 void __iomem *addr = nic->reg_base + offset;
103
104 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
105}
106
107/* VF -> PF mailbox communication */
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700108static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
109{
110 u64 *msg = (u64 *)mbx;
111
112 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
113 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
114}
115
Sunil Goutham4863dea2015-05-26 19:20:15 -0700116int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
117{
118 int timeout = NIC_MBOX_MSG_TIMEOUT;
119 int sleep = 10;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700120
121 nic->pf_acked = false;
122 nic->pf_nacked = false;
123
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700124 nicvf_write_to_mbx(nic, mbx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700125
126 /* Wait for previous message to be acked, timeout 2sec */
127 while (!nic->pf_acked) {
Radoslaw Biernackiecae29c2016-08-12 16:51:38 +0530128 if (nic->pf_nacked) {
129 netdev_err(nic->netdev,
130 "PF NACK to mbox msg 0x%02x from VF%d\n",
131 (mbx->msg.msg & 0xFF), nic->vf_id);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700132 return -EINVAL;
Radoslaw Biernackiecae29c2016-08-12 16:51:38 +0530133 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700134 msleep(sleep);
135 if (nic->pf_acked)
136 break;
137 timeout -= sleep;
138 if (!timeout) {
139 netdev_err(nic->netdev,
Radoslaw Biernackiecae29c2016-08-12 16:51:38 +0530140 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
Sunil Goutham4863dea2015-05-26 19:20:15 -0700141 (mbx->msg.msg & 0xFF), nic->vf_id);
142 return -EBUSY;
143 }
144 }
145 return 0;
146}
147
148/* Checks if VF is able to comminicate with PF
149* and also gets the VNIC number this VF is associated to.
150*/
151static int nicvf_check_pf_ready(struct nicvf *nic)
152{
Aleksey Makarov2cd2a192015-06-02 11:00:20 -0700153 union nic_mbx mbx = {};
154
155 mbx.msg.msg = NIC_MBOX_MSG_READY;
Sunil Goutham6051cba2015-08-30 12:29:11 +0300156 if (nicvf_send_msg_to_pf(nic, &mbx)) {
157 netdev_err(nic->netdev,
158 "PF didn't respond to READY msg\n");
159 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700160 }
Sunil Goutham6051cba2015-08-30 12:29:11 +0300161
Sunil Goutham4863dea2015-05-26 19:20:15 -0700162 return 1;
163}
164
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700165static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
166{
167 if (bgx->rx)
168 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
169 else
170 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
171}
172
Sunil Goutham4863dea2015-05-26 19:20:15 -0700173static void nicvf_handle_mbx_intr(struct nicvf *nic)
174{
175 union nic_mbx mbx = {};
176 u64 *mbx_data;
177 u64 mbx_addr;
178 int i;
179
180 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
181 mbx_data = (u64 *)&mbx;
182
183 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
184 *mbx_data = nicvf_reg_read(nic, mbx_addr);
185 mbx_data++;
186 mbx_addr += sizeof(u64);
187 }
188
189 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
190 switch (mbx.msg.msg) {
191 case NIC_MBOX_MSG_READY:
Sunil Goutham6051cba2015-08-30 12:29:11 +0300192 nic->pf_acked = true;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700193 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
194 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
195 nic->node = mbx.nic_cfg.node_id;
Pavel Fedinbd049a92015-06-23 17:51:06 +0300196 if (!nic->set_mac_pending)
197 ether_addr_copy(nic->netdev->dev_addr,
198 mbx.nic_cfg.mac_addr);
Sunil Goutham92dc8762015-08-30 12:29:15 +0300199 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
Sunil Gouthamd77a2382015-08-30 12:29:16 +0300200 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700201 nic->link_up = false;
202 nic->duplex = 0;
203 nic->speed = 0;
204 break;
205 case NIC_MBOX_MSG_ACK:
206 nic->pf_acked = true;
207 break;
208 case NIC_MBOX_MSG_NACK:
209 nic->pf_nacked = true;
210 break;
211 case NIC_MBOX_MSG_RSS_SIZE:
212 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
213 nic->pf_acked = true;
214 break;
215 case NIC_MBOX_MSG_BGX_STATS:
216 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
217 nic->pf_acked = true;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700218 break;
219 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
220 nic->pf_acked = true;
221 nic->link_up = mbx.link_status.link_up;
222 nic->duplex = mbx.link_status.duplex;
223 nic->speed = mbx.link_status.speed;
Thanneeru Srinivasulu1cc70252016-11-24 14:48:01 +0530224 nic->mac_type = mbx.link_status.mac_type;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700225 if (nic->link_up) {
226 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
227 nic->netdev->name, nic->speed,
228 nic->duplex == DUPLEX_FULL ?
229 "Full duplex" : "Half duplex");
230 netif_carrier_on(nic->netdev);
Sunil Gouthamb49087d2015-07-29 16:49:44 +0300231 netif_tx_start_all_queues(nic->netdev);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700232 } else {
233 netdev_info(nic->netdev, "%s: Link is Down\n",
234 nic->netdev->name);
235 netif_carrier_off(nic->netdev);
236 netif_tx_stop_all_queues(nic->netdev);
237 }
238 break;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300239 case NIC_MBOX_MSG_ALLOC_SQS:
240 nic->sqs_count = mbx.sqs_alloc.qs_count;
241 nic->pf_acked = true;
242 break;
243 case NIC_MBOX_MSG_SNICVF_PTR:
244 /* Primary VF: make note of secondary VF's pointer
245 * to be used while packet transmission.
246 */
247 nic->snicvf[mbx.nicvf.sqs_id] =
248 (struct nicvf *)mbx.nicvf.nicvf;
249 nic->pf_acked = true;
250 break;
251 case NIC_MBOX_MSG_PNICVF_PTR:
252 /* Secondary VF/Qset: make note of primary VF's pointer
253 * to be used while packet reception, to handover packet
254 * to primary VF's netdev.
255 */
256 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
257 nic->pf_acked = true;
258 break;
Sunil Goutham430da202016-11-24 14:48:03 +0530259 case NIC_MBOX_MSG_PFC:
260 nic->pfc.autoneg = mbx.pfc.autoneg;
261 nic->pfc.fc_rx = mbx.pfc.fc_rx;
262 nic->pfc.fc_tx = mbx.pfc.fc_tx;
263 nic->pf_acked = true;
264 break;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700265 default:
266 netdev_err(nic->netdev,
267 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
268 break;
269 }
270 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
271}
272
273static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
274{
275 union nic_mbx mbx = {};
Sunil Goutham4863dea2015-05-26 19:20:15 -0700276
277 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
278 mbx.mac.vf_id = nic->vf_id;
Aleksey Makarove610cb32015-06-02 11:00:21 -0700279 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700280
281 return nicvf_send_msg_to_pf(nic, &mbx);
282}
283
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700284static void nicvf_config_cpi(struct nicvf *nic)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700285{
286 union nic_mbx mbx = {};
287
288 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
289 mbx.cpi_cfg.vf_id = nic->vf_id;
290 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
291 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
292
293 nicvf_send_msg_to_pf(nic, &mbx);
294}
295
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700296static void nicvf_get_rss_size(struct nicvf *nic)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700297{
298 union nic_mbx mbx = {};
299
300 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
301 mbx.rss_size.vf_id = nic->vf_id;
302 nicvf_send_msg_to_pf(nic, &mbx);
303}
304
305void nicvf_config_rss(struct nicvf *nic)
306{
307 union nic_mbx mbx = {};
308 struct nicvf_rss_info *rss = &nic->rss_info;
309 int ind_tbl_len = rss->rss_size;
310 int i, nextq = 0;
311
312 mbx.rss_cfg.vf_id = nic->vf_id;
313 mbx.rss_cfg.hash_bits = rss->hash_bits;
314 while (ind_tbl_len) {
315 mbx.rss_cfg.tbl_offset = nextq;
316 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
317 RSS_IND_TBL_LEN_PER_MBX_MSG);
318 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
319 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
320
321 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
322 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
323
324 nicvf_send_msg_to_pf(nic, &mbx);
325
326 ind_tbl_len -= mbx.rss_cfg.tbl_len;
327 }
328}
329
330void nicvf_set_rss_key(struct nicvf *nic)
331{
332 struct nicvf_rss_info *rss = &nic->rss_info;
333 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
334 int idx;
335
336 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
337 nicvf_reg_write(nic, key_addr, rss->key[idx]);
338 key_addr += sizeof(u64);
339 }
340}
341
342static int nicvf_rss_init(struct nicvf *nic)
343{
344 struct nicvf_rss_info *rss = &nic->rss_info;
345 int idx;
346
347 nicvf_get_rss_size(nic);
348
Sunil Goutham38bb5d42015-08-30 12:29:12 +0300349 if (cpi_alg != CPI_ALG_NONE) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700350 rss->enable = false;
351 rss->hash_bits = 0;
352 return 0;
353 }
354
355 rss->enable = true;
356
Sunil Goutham0052c922016-08-12 16:51:43 +0530357 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700358 nicvf_set_rss_key(nic);
359
360 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
361 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
362
363 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
364
365 for (idx = 0; idx < rss->rss_size; idx++)
366 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
Sunil Goutham92dc8762015-08-30 12:29:15 +0300367 nic->rx_queues);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700368 nicvf_config_rss(nic);
369 return 1;
370}
371
Sunil Goutham92dc8762015-08-30 12:29:15 +0300372/* Request PF to allocate additional Qsets */
373static void nicvf_request_sqs(struct nicvf *nic)
374{
375 union nic_mbx mbx = {};
376 int sqs;
377 int sqs_count = nic->sqs_count;
378 int rx_queues = 0, tx_queues = 0;
379
380 /* Only primary VF should request */
381 if (nic->sqs_mode || !nic->sqs_count)
382 return;
383
384 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
385 mbx.sqs_alloc.vf_id = nic->vf_id;
386 mbx.sqs_alloc.qs_count = nic->sqs_count;
387 if (nicvf_send_msg_to_pf(nic, &mbx)) {
388 /* No response from PF */
389 nic->sqs_count = 0;
390 return;
391 }
392
393 /* Return if no Secondary Qsets available */
394 if (!nic->sqs_count)
395 return;
396
397 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
398 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
399 if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
400 tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
401
402 /* Set no of Rx/Tx queues in each of the SQsets */
403 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
404 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
405 mbx.nicvf.vf_id = nic->vf_id;
406 mbx.nicvf.sqs_id = sqs;
407 nicvf_send_msg_to_pf(nic, &mbx);
408
409 nic->snicvf[sqs]->sqs_id = sqs;
410 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
411 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
412 rx_queues -= MAX_RCV_QUEUES_PER_QS;
413 } else {
414 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
415 rx_queues = 0;
416 }
417
418 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
419 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
420 tx_queues -= MAX_SND_QUEUES_PER_QS;
421 } else {
422 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
423 tx_queues = 0;
424 }
425
426 nic->snicvf[sqs]->qs->cq_cnt =
427 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
428
429 /* Initialize secondary Qset's queues and its interrupts */
430 nicvf_open(nic->snicvf[sqs]->netdev);
431 }
432
433 /* Update stack with actual Rx/Tx queue count allocated */
434 if (sqs_count != nic->sqs_count)
435 nicvf_set_real_num_queues(nic->netdev,
436 nic->tx_queues, nic->rx_queues);
437}
438
439/* Send this Qset's nicvf pointer to PF.
440 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
441 * so that packets received by these Qsets can use primary VF's netdev
442 */
443static void nicvf_send_vf_struct(struct nicvf *nic)
444{
445 union nic_mbx mbx = {};
446
447 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
448 mbx.nicvf.sqs_mode = nic->sqs_mode;
449 mbx.nicvf.nicvf = (u64)nic;
450 nicvf_send_msg_to_pf(nic, &mbx);
451}
452
453static void nicvf_get_primary_vf_struct(struct nicvf *nic)
454{
455 union nic_mbx mbx = {};
456
457 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
458 nicvf_send_msg_to_pf(nic, &mbx);
459}
460
Sunil Goutham4863dea2015-05-26 19:20:15 -0700461int nicvf_set_real_num_queues(struct net_device *netdev,
462 int tx_queues, int rx_queues)
463{
464 int err = 0;
465
466 err = netif_set_real_num_tx_queues(netdev, tx_queues);
467 if (err) {
468 netdev_err(netdev,
469 "Failed to set no of Tx queues: %d\n", tx_queues);
470 return err;
471 }
472
473 err = netif_set_real_num_rx_queues(netdev, rx_queues);
474 if (err)
475 netdev_err(netdev,
476 "Failed to set no of Rx queues: %d\n", rx_queues);
477 return err;
478}
479
480static int nicvf_init_resources(struct nicvf *nic)
481{
482 int err;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700483
484 /* Enable Qset */
485 nicvf_qset_config(nic, true);
486
487 /* Initialize queues and HW for data transfer */
488 err = nicvf_config_data_transfer(nic, true);
489 if (err) {
490 netdev_err(nic->netdev,
491 "Failed to alloc/config VF's QSet resources\n");
492 return err;
493 }
494
Sunil Goutham4863dea2015-05-26 19:20:15 -0700495 return 0;
496}
497
498static void nicvf_snd_pkt_handler(struct net_device *netdev,
Sunil Gouthamc43548d2016-08-12 16:51:41 +0530499 struct cqe_send_t *cqe_tx,
Sunil Goutham2c204c22016-09-23 14:42:28 +0530500 int cqe_type, int budget,
501 unsigned int *tx_pkts, unsigned int *tx_bytes)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700502{
503 struct sk_buff *skb = NULL;
504 struct nicvf *nic = netdev_priv(netdev);
505 struct snd_queue *sq;
506 struct sq_hdr_subdesc *hdr;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +0530507 struct sq_hdr_subdesc *tso_sqe;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700508
509 sq = &nic->qs->sq[cqe_tx->sq_idx];
510
511 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
512 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
513 return;
514
515 netdev_dbg(nic->netdev,
516 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
517 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
518 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
519
Sunil Goutham964cb692016-11-15 17:38:16 +0530520 nicvf_check_cqe_tx_errs(nic, cqe_tx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700521 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
Sunil Goutham4863dea2015-05-26 19:20:15 -0700522 if (skb) {
Sunil Goutham7ceb8a12016-08-30 11:36:27 +0530523 /* Check for dummy descriptor used for HW TSO offload on 88xx */
524 if (hdr->dont_send) {
525 /* Get actual TSO descriptors and free them */
526 tso_sqe =
527 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
528 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
529 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +0530530 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700531 prefetch(skb);
Sunil Goutham2c204c22016-09-23 14:42:28 +0530532 (*tx_pkts)++;
533 *tx_bytes += skb->len;
Sunil Gouthamc43548d2016-08-12 16:51:41 +0530534 napi_consume_skb(skb, budget);
Sunil Goutham143ceb02015-07-29 16:49:37 +0300535 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
Sunil Goutham40fb5f82015-12-10 13:25:19 +0530536 } else {
Sunil Goutham7ceb8a12016-08-30 11:36:27 +0530537 /* In case of SW TSO on 88xx, only last segment will have
538 * a SKB attached, so just free SQEs here.
Sunil Goutham40fb5f82015-12-10 13:25:19 +0530539 */
540 if (!nic->hw_tso)
541 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700542 }
543}
544
Sunil Goutham38bb5d42015-08-30 12:29:12 +0300545static inline void nicvf_set_rxhash(struct net_device *netdev,
546 struct cqe_rx_t *cqe_rx,
547 struct sk_buff *skb)
548{
549 u8 hash_type;
550 u32 hash;
551
552 if (!(netdev->features & NETIF_F_RXHASH))
553 return;
554
555 switch (cqe_rx->rss_alg) {
556 case RSS_ALG_TCP_IP:
557 case RSS_ALG_UDP_IP:
558 hash_type = PKT_HASH_TYPE_L4;
559 hash = cqe_rx->rss_tag;
560 break;
561 case RSS_ALG_IP:
562 hash_type = PKT_HASH_TYPE_L3;
563 hash = cqe_rx->rss_tag;
564 break;
565 default:
566 hash_type = PKT_HASH_TYPE_NONE;
567 hash = 0;
568 }
569
570 skb_set_hash(skb, hash, hash_type);
571}
572
Sunil Goutham4863dea2015-05-26 19:20:15 -0700573static void nicvf_rcv_pkt_handler(struct net_device *netdev,
574 struct napi_struct *napi,
Sunil Gouthamad2eceb2016-02-16 16:29:51 +0530575 struct cqe_rx_t *cqe_rx)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700576{
577 struct sk_buff *skb;
578 struct nicvf *nic = netdev_priv(netdev);
579 int err = 0;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300580 int rq_idx;
581
582 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
583
584 if (nic->sqs_mode) {
585 /* Use primary VF's 'nicvf' struct */
586 nic = nic->pnicvf;
587 netdev = nic->netdev;
588 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700589
590 /* Check for errors */
Sunil Gouthamad2eceb2016-02-16 16:29:51 +0530591 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700592 if (err && !cqe_rx->rb_cnt)
593 return;
594
595 skb = nicvf_get_rcv_skb(nic, cqe_rx);
596 if (!skb) {
597 netdev_dbg(nic->netdev, "Packet not received\n");
598 return;
599 }
600
601 if (netif_msg_pktdata(nic)) {
602 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
603 skb, skb->len);
604 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
605 skb->data, skb->len, true);
606 }
607
Sunil Gouthama2dc5de2015-08-30 12:29:10 +0300608 /* If error packet, drop it here */
609 if (err) {
610 dev_kfree_skb_any(skb);
611 return;
612 }
613
Sunil Goutham38bb5d42015-08-30 12:29:12 +0300614 nicvf_set_rxhash(netdev, cqe_rx, skb);
615
Sunil Goutham92dc8762015-08-30 12:29:15 +0300616 skb_record_rx_queue(skb, rq_idx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700617 if (netdev->hw_features & NETIF_F_RXCSUM) {
618 /* HW by default verifies TCP/UDP/SCTP checksums */
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
620 } else {
621 skb_checksum_none_assert(skb);
622 }
623
624 skb->protocol = eth_type_trans(skb, netdev);
625
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300626 /* Check for stripped VLAN */
627 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
628 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
629 ntohs((__force __be16)cqe_rx->vlan_tci));
630
Sunil Goutham4863dea2015-05-26 19:20:15 -0700631 if (napi && (netdev->features & NETIF_F_GRO))
632 napi_gro_receive(napi, skb);
633 else
634 netif_receive_skb(skb);
635}
636
637static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
638 struct napi_struct *napi, int budget)
639{
Sunil Goutham74840b82015-07-29 16:49:42 +0300640 int processed_cqe, work_done = 0, tx_done = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700641 int cqe_count, cqe_head;
642 struct nicvf *nic = netdev_priv(netdev);
643 struct queue_set *qs = nic->qs;
644 struct cmp_queue *cq = &qs->cq[cq_idx];
645 struct cqe_rx_t *cq_desc;
Sunil Goutham74840b82015-07-29 16:49:42 +0300646 struct netdev_queue *txq;
Sunil Goutham2c204c22016-09-23 14:42:28 +0530647 unsigned int tx_pkts = 0, tx_bytes = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700648
649 spin_lock_bh(&cq->lock);
650loop:
651 processed_cqe = 0;
652 /* Get no of valid CQ entries to process */
653 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
654 cqe_count &= CQ_CQE_COUNT;
655 if (!cqe_count)
656 goto done;
657
658 /* Get head of the valid CQ entries */
659 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
660 cqe_head &= 0xFFFF;
661
Sunil Goutham74840b82015-07-29 16:49:42 +0300662 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
663 __func__, cq_idx, cqe_count, cqe_head);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700664 while (processed_cqe < cqe_count) {
665 /* Get the CQ descriptor */
666 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
667 cqe_head++;
668 cqe_head &= (cq->dmem.q_len - 1);
669 /* Initiate prefetch for next descriptor */
670 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
671
672 if ((work_done >= budget) && napi &&
673 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
674 break;
675 }
676
Sunil Goutham74840b82015-07-29 16:49:42 +0300677 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
678 cq_idx, cq_desc->cqe_type);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700679 switch (cq_desc->cqe_type) {
680 case CQE_TYPE_RX:
Sunil Gouthamad2eceb2016-02-16 16:29:51 +0530681 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700682 work_done++;
683 break;
684 case CQE_TYPE_SEND:
Sunil Goutham964cb692016-11-15 17:38:16 +0530685 nicvf_snd_pkt_handler(netdev,
Sunil Gouthamc43548d2016-08-12 16:51:41 +0530686 (void *)cq_desc, CQE_TYPE_SEND,
Sunil Goutham2c204c22016-09-23 14:42:28 +0530687 budget, &tx_pkts, &tx_bytes);
Sunil Goutham74840b82015-07-29 16:49:42 +0300688 tx_done++;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700689 break;
690 case CQE_TYPE_INVALID:
691 case CQE_TYPE_RX_SPLIT:
692 case CQE_TYPE_RX_TCP:
693 case CQE_TYPE_SEND_PTP:
694 /* Ignore for now */
695 break;
696 }
697 processed_cqe++;
698 }
Sunil Goutham74840b82015-07-29 16:49:42 +0300699 netdev_dbg(nic->netdev,
700 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
701 __func__, cq_idx, processed_cqe, work_done, budget);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700702
703 /* Ring doorbell to inform H/W to reuse processed CQEs */
704 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
705 cq_idx, processed_cqe);
706
707 if ((work_done < budget) && napi)
708 goto loop;
709
710done:
Sunil Goutham74840b82015-07-29 16:49:42 +0300711 /* Wakeup TXQ if its stopped earlier due to SQ full */
712 if (tx_done) {
Sunil Goutham92dc8762015-08-30 12:29:15 +0300713 netdev = nic->pnicvf->netdev;
714 txq = netdev_get_tx_queue(netdev,
715 nicvf_netdev_qidx(nic, cq_idx));
Sunil Goutham2c204c22016-09-23 14:42:28 +0530716 if (tx_pkts)
717 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
718
Sunil Goutham92dc8762015-08-30 12:29:15 +0300719 nic = nic->pnicvf;
720 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
Sunil Gouthamb49087d2015-07-29 16:49:44 +0300721 netif_tx_start_queue(txq);
Sunil Goutham964cb692016-11-15 17:38:16 +0530722 this_cpu_inc(nic->drv_stats->txq_wake);
Sunil Goutham74840b82015-07-29 16:49:42 +0300723 if (netif_msg_tx_err(nic))
724 netdev_warn(netdev,
725 "%s: Transmit queue wakeup SQ%d\n",
726 netdev->name, cq_idx);
727 }
728 }
729
Sunil Goutham4863dea2015-05-26 19:20:15 -0700730 spin_unlock_bh(&cq->lock);
731 return work_done;
732}
733
734static int nicvf_poll(struct napi_struct *napi, int budget)
735{
736 u64 cq_head;
737 int work_done = 0;
738 struct net_device *netdev = napi->dev;
739 struct nicvf *nic = netdev_priv(netdev);
740 struct nicvf_cq_poll *cq;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700741
742 cq = container_of(napi, struct nicvf_cq_poll, napi);
743 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
744
Sunil Goutham4863dea2015-05-26 19:20:15 -0700745 if (work_done < budget) {
746 /* Slow packet rate, exit polling */
747 napi_complete(napi);
748 /* Re-enable interrupts */
749 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
750 cq->cq_idx);
751 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
752 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
753 cq->cq_idx, cq_head);
754 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
755 }
756 return work_done;
757}
758
759/* Qset error interrupt handler
760 *
761 * As of now only CQ errors are handled
762 */
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700763static void nicvf_handle_qs_err(unsigned long data)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700764{
765 struct nicvf *nic = (struct nicvf *)data;
766 struct queue_set *qs = nic->qs;
767 int qidx;
768 u64 status;
769
770 netif_tx_disable(nic->netdev);
771
772 /* Check if it is CQ err */
773 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
774 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
775 qidx);
776 if (!(status & CQ_ERR_MASK))
777 continue;
778 /* Process already queued CQEs and reconfig CQ */
779 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
780 nicvf_sq_disable(nic, qidx);
781 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
782 nicvf_cmp_queue_config(nic, qs, qidx, true);
783 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
784 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
785
786 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
787 }
788
789 netif_tx_start_all_queues(nic->netdev);
790 /* Re-enable Qset error interrupt */
791 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
792}
793
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300794static void nicvf_dump_intr_status(struct nicvf *nic)
795{
796 if (netif_msg_intr(nic))
797 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
798 nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
799}
800
Sunil Goutham4863dea2015-05-26 19:20:15 -0700801static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
802{
803 struct nicvf *nic = (struct nicvf *)nicvf_irq;
804 u64 intr;
805
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300806 nicvf_dump_intr_status(nic);
807
Sunil Goutham4863dea2015-05-26 19:20:15 -0700808 intr = nicvf_reg_read(nic, NIC_VF_INT);
809 /* Check for spurious interrupt */
810 if (!(intr & NICVF_INTR_MBOX_MASK))
811 return IRQ_HANDLED;
812
813 nicvf_handle_mbx_intr(nic);
814
815 return IRQ_HANDLED;
816}
817
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300818static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700819{
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300820 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
821 struct nicvf *nic = cq_poll->nicvf;
822 int qidx = cq_poll->cq_idx;
823
824 nicvf_dump_intr_status(nic);
825
826 /* Disable interrupts */
827 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
828
829 /* Schedule NAPI */
Sunil Gouthamef0a4d82016-02-11 21:50:22 +0530830 napi_schedule_irqoff(&cq_poll->napi);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300831
832 /* Clear interrupt */
833 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
834
835 return IRQ_HANDLED;
836}
837
838static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
839{
Sunil Goutham4863dea2015-05-26 19:20:15 -0700840 struct nicvf *nic = (struct nicvf *)nicvf_irq;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300841 u8 qidx;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700842
Sunil Goutham4863dea2015-05-26 19:20:15 -0700843
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300844 nicvf_dump_intr_status(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700845
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300846 /* Disable RBDR interrupt and schedule softirq */
847 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
848 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700849 continue;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300850 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
851 tasklet_hi_schedule(&nic->rbdr_task);
852 /* Clear interrupt */
853 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700854 }
855
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300856 return IRQ_HANDLED;
857}
Sunil Goutham4863dea2015-05-26 19:20:15 -0700858
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300859static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
860{
861 struct nicvf *nic = (struct nicvf *)nicvf_irq;
862
863 nicvf_dump_intr_status(nic);
864
865 /* Disable Qset err interrupt and schedule softirq */
866 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
867 tasklet_hi_schedule(&nic->qs_err_task);
868 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
869
Sunil Goutham4863dea2015-05-26 19:20:15 -0700870 return IRQ_HANDLED;
871}
872
873static int nicvf_enable_msix(struct nicvf *nic)
874{
875 int ret, vec;
876
877 nic->num_vec = NIC_VF_MSIX_VECTORS;
878
879 for (vec = 0; vec < nic->num_vec; vec++)
880 nic->msix_entries[vec].entry = vec;
881
882 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
883 if (ret) {
884 netdev_err(nic->netdev,
885 "Req for #%d msix vectors failed\n", nic->num_vec);
886 return 0;
887 }
888 nic->msix_enabled = 1;
889 return 1;
890}
891
892static void nicvf_disable_msix(struct nicvf *nic)
893{
894 if (nic->msix_enabled) {
895 pci_disable_msix(nic->pdev);
896 nic->msix_enabled = 0;
897 nic->num_vec = 0;
898 }
899}
900
Sunil Gouthamfb4b7d92016-02-11 21:50:23 +0530901static void nicvf_set_irq_affinity(struct nicvf *nic)
902{
903 int vec, cpu;
904 int irqnum;
905
906 for (vec = 0; vec < nic->num_vec; vec++) {
907 if (!nic->irq_allocated[vec])
908 continue;
909
910 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
911 return;
912 /* CQ interrupts */
913 if (vec < NICVF_INTR_ID_SQ)
914 /* Leave CPU0 for RBDR and other interrupts */
915 cpu = nicvf_netdev_qidx(nic, vec) + 1;
916 else
917 cpu = 0;
918
919 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
920 nic->affinity_mask[vec]);
921 irqnum = nic->msix_entries[vec].vector;
922 irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
923 }
924}
925
Sunil Goutham4863dea2015-05-26 19:20:15 -0700926static int nicvf_register_interrupts(struct nicvf *nic)
927{
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300928 int irq, ret = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700929 int vector;
930
931 for_each_cq_irq(irq)
Sunil Gouthame4126212016-08-12 16:51:36 +0530932 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
933 nic->pnicvf->netdev->name,
934 nicvf_netdev_qidx(nic, irq));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700935
936 for_each_sq_irq(irq)
Sunil Gouthame4126212016-08-12 16:51:36 +0530937 sprintf(nic->irq_name[irq], "%s-sq-%d",
938 nic->pnicvf->netdev->name,
939 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700940
941 for_each_rbdr_irq(irq)
Sunil Gouthame4126212016-08-12 16:51:36 +0530942 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
943 nic->pnicvf->netdev->name,
944 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700945
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300946 /* Register CQ interrupts */
947 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700948 vector = nic->msix_entries[irq].vector;
949 ret = request_irq(vector, nicvf_intr_handler,
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300950 0, nic->irq_name[irq], nic->napi[irq]);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700951 if (ret)
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300952 goto err;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700953 nic->irq_allocated[irq] = true;
954 }
955
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300956 /* Register RBDR interrupt */
957 for (irq = NICVF_INTR_ID_RBDR;
958 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700959 vector = nic->msix_entries[irq].vector;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300960 ret = request_irq(vector, nicvf_rbdr_intr_handler,
Sunil Goutham4863dea2015-05-26 19:20:15 -0700961 0, nic->irq_name[irq], nic);
962 if (ret)
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300963 goto err;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700964 nic->irq_allocated[irq] = true;
965 }
966
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300967 /* Register QS error interrupt */
Sunil Gouthame4126212016-08-12 16:51:36 +0530968 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
969 nic->pnicvf->netdev->name,
970 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300971 irq = NICVF_INTR_ID_QS_ERR;
972 ret = request_irq(nic->msix_entries[irq].vector,
973 nicvf_qs_err_intr_handler,
974 0, nic->irq_name[irq], nic);
Sunil Gouthamfb4b7d92016-02-11 21:50:23 +0530975 if (ret)
976 goto err;
977
978 nic->irq_allocated[irq] = true;
979
980 /* Set IRQ affinities */
981 nicvf_set_irq_affinity(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700982
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300983err:
984 if (ret)
985 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700986
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300987 return ret;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700988}
989
990static void nicvf_unregister_interrupts(struct nicvf *nic)
991{
992 int irq;
993
994 /* Free registered interrupts */
995 for (irq = 0; irq < nic->num_vec; irq++) {
Sunil Goutham39ad6ee2015-08-30 12:29:14 +0300996 if (!nic->irq_allocated[irq])
997 continue;
998
Sunil Gouthamfb4b7d92016-02-11 21:50:23 +0530999 irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
1000 free_cpumask_var(nic->affinity_mask[irq]);
1001
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001002 if (irq < NICVF_INTR_ID_SQ)
1003 free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
1004 else
Sunil Goutham4863dea2015-05-26 19:20:15 -07001005 free_irq(nic->msix_entries[irq].vector, nic);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001006
Sunil Goutham4863dea2015-05-26 19:20:15 -07001007 nic->irq_allocated[irq] = false;
1008 }
1009
1010 /* Disable MSI-X */
1011 nicvf_disable_msix(nic);
1012}
1013
1014/* Initialize MSIX vectors and register MISC interrupt.
1015 * Send READY message to PF to check if its alive
1016 */
1017static int nicvf_register_misc_interrupt(struct nicvf *nic)
1018{
1019 int ret = 0;
1020 int irq = NICVF_INTR_ID_MISC;
1021
1022 /* Return if mailbox interrupt is already registered */
1023 if (nic->msix_enabled)
1024 return 0;
1025
1026 /* Enable MSI-X */
1027 if (!nicvf_enable_msix(nic))
1028 return 1;
1029
1030 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1031 /* Register Misc interrupt */
1032 ret = request_irq(nic->msix_entries[irq].vector,
1033 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1034
1035 if (ret)
1036 return ret;
1037 nic->irq_allocated[irq] = true;
1038
1039 /* Enable mailbox interrupt */
1040 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1041
1042 /* Check if VF is able to communicate with PF */
1043 if (!nicvf_check_pf_ready(nic)) {
1044 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1045 nicvf_unregister_interrupts(nic);
1046 return 1;
1047 }
1048
1049 return 0;
1050}
1051
1052static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1053{
1054 struct nicvf *nic = netdev_priv(netdev);
1055 int qid = skb_get_queue_mapping(skb);
1056 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1057
1058 /* Check for minimum packet length */
1059 if (skb->len <= ETH_HLEN) {
1060 dev_kfree_skb(skb);
1061 return NETDEV_TX_OK;
1062 }
1063
Sunil Gouthamb49087d2015-07-29 16:49:44 +03001064 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001065 netif_tx_stop_queue(txq);
Sunil Goutham964cb692016-11-15 17:38:16 +05301066 this_cpu_inc(nic->drv_stats->txq_stop);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001067 if (netif_msg_tx_err(nic))
1068 netdev_warn(netdev,
1069 "%s: Transmit ring full, stopping SQ%d\n",
1070 netdev->name, qid);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001071 return NETDEV_TX_BUSY;
1072 }
1073
1074 return NETDEV_TX_OK;
1075}
1076
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001077static inline void nicvf_free_cq_poll(struct nicvf *nic)
1078{
1079 struct nicvf_cq_poll *cq_poll;
1080 int qidx;
1081
1082 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1083 cq_poll = nic->napi[qidx];
1084 if (!cq_poll)
1085 continue;
1086 nic->napi[qidx] = NULL;
1087 kfree(cq_poll);
1088 }
1089}
1090
Sunil Goutham4863dea2015-05-26 19:20:15 -07001091int nicvf_stop(struct net_device *netdev)
1092{
1093 int irq, qidx;
1094 struct nicvf *nic = netdev_priv(netdev);
1095 struct queue_set *qs = nic->qs;
1096 struct nicvf_cq_poll *cq_poll = NULL;
1097 union nic_mbx mbx = {};
1098
1099 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1100 nicvf_send_msg_to_pf(nic, &mbx);
1101
1102 netif_carrier_off(netdev);
Sunil Goutham92dc8762015-08-30 12:29:15 +03001103 netif_tx_stop_all_queues(nic->netdev);
Sunil Goutham0b72a9a2015-12-02 15:36:16 +05301104 nic->link_up = false;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001105
1106 /* Teardown secondary qsets first */
1107 if (!nic->sqs_mode) {
1108 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1109 if (!nic->snicvf[qidx])
1110 continue;
1111 nicvf_stop(nic->snicvf[qidx]->netdev);
1112 nic->snicvf[qidx] = NULL;
1113 }
1114 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001115
1116 /* Disable RBDR & QS error interrupts */
1117 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1118 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1119 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1120 }
1121 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1122 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1123
1124 /* Wait for pending IRQ handlers to finish */
1125 for (irq = 0; irq < nic->num_vec; irq++)
1126 synchronize_irq(nic->msix_entries[irq].vector);
1127
1128 tasklet_kill(&nic->rbdr_task);
1129 tasklet_kill(&nic->qs_err_task);
1130 if (nic->rb_work_scheduled)
1131 cancel_delayed_work_sync(&nic->rbdr_work);
1132
1133 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1134 cq_poll = nic->napi[qidx];
1135 if (!cq_poll)
1136 continue;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001137 napi_synchronize(&cq_poll->napi);
1138 /* CQ intr is enabled while napi_complete,
1139 * so disable it now
1140 */
1141 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1142 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1143 napi_disable(&cq_poll->napi);
1144 netif_napi_del(&cq_poll->napi);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001145 }
1146
Sunil Gouthamb49087d2015-07-29 16:49:44 +03001147 netif_tx_disable(netdev);
1148
Sunil Goutham2c204c22016-09-23 14:42:28 +05301149 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1150 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1151
Sunil Goutham4863dea2015-05-26 19:20:15 -07001152 /* Free resources */
1153 nicvf_config_data_transfer(nic, false);
1154
1155 /* Disable HW Qset */
1156 nicvf_qset_config(nic, false);
1157
1158 /* disable mailbox interrupt */
1159 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1160
1161 nicvf_unregister_interrupts(nic);
1162
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001163 nicvf_free_cq_poll(nic);
1164
Sunil Goutham92dc8762015-08-30 12:29:15 +03001165 /* Clear multiqset info */
1166 nic->pnicvf = nic;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001167
Sunil Goutham4863dea2015-05-26 19:20:15 -07001168 return 0;
1169}
1170
Sunil Goutham712c3182016-11-15 17:37:36 +05301171static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1172{
1173 union nic_mbx mbx = {};
1174
1175 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1176 mbx.frs.max_frs = mtu;
1177 mbx.frs.vf_id = nic->vf_id;
1178
1179 return nicvf_send_msg_to_pf(nic, &mbx);
1180}
1181
Sunil Goutham4863dea2015-05-26 19:20:15 -07001182int nicvf_open(struct net_device *netdev)
1183{
Sunil Goutham964cb692016-11-15 17:38:16 +05301184 int cpu, err, qidx;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001185 struct nicvf *nic = netdev_priv(netdev);
1186 struct queue_set *qs = nic->qs;
1187 struct nicvf_cq_poll *cq_poll = NULL;
Sunil Gouthamc94acf82016-11-15 17:38:29 +05301188 union nic_mbx mbx = {};
Sunil Goutham4863dea2015-05-26 19:20:15 -07001189
1190 netif_carrier_off(netdev);
1191
1192 err = nicvf_register_misc_interrupt(nic);
1193 if (err)
1194 return err;
1195
1196 /* Register NAPI handler for processing CQEs */
1197 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1198 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1199 if (!cq_poll) {
1200 err = -ENOMEM;
1201 goto napi_del;
1202 }
1203 cq_poll->cq_idx = qidx;
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001204 cq_poll->nicvf = nic;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001205 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1206 NAPI_POLL_WEIGHT);
1207 napi_enable(&cq_poll->napi);
1208 nic->napi[qidx] = cq_poll;
1209 }
1210
1211 /* Check if we got MAC address from PF or else generate a radom MAC */
Sunil Gouthama3a8ce42016-08-12 16:51:40 +05301212 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001213 eth_hw_addr_random(netdev);
1214 nicvf_hw_set_mac_addr(nic, netdev);
1215 }
1216
Pavel Fedinbd049a92015-06-23 17:51:06 +03001217 if (nic->set_mac_pending) {
1218 nic->set_mac_pending = false;
1219 nicvf_hw_set_mac_addr(nic, netdev);
1220 }
1221
Sunil Goutham4863dea2015-05-26 19:20:15 -07001222 /* Init tasklet for handling Qset err interrupt */
1223 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1224 (unsigned long)nic);
1225
1226 /* Init RBDR tasklet which will refill RBDR */
1227 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1228 (unsigned long)nic);
1229 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1230
1231 /* Configure CPI alorithm */
1232 nic->cpi_alg = cpi_alg;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001233 if (!nic->sqs_mode)
1234 nicvf_config_cpi(nic);
1235
1236 nicvf_request_sqs(nic);
1237 if (nic->sqs_mode)
1238 nicvf_get_primary_vf_struct(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001239
Sunil Goutham712c3182016-11-15 17:37:36 +05301240 /* Configure receive side scaling and MTU */
1241 if (!nic->sqs_mode) {
Sunil Goutham92dc8762015-08-30 12:29:15 +03001242 nicvf_rss_init(nic);
Sunil Goutham712c3182016-11-15 17:37:36 +05301243 if (nicvf_update_hw_max_frs(nic, netdev->mtu))
1244 goto cleanup;
Sunil Goutham964cb692016-11-15 17:38:16 +05301245
1246 /* Clear percpu stats */
1247 for_each_possible_cpu(cpu)
1248 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1249 sizeof(struct nicvf_drv_stats));
Sunil Goutham712c3182016-11-15 17:37:36 +05301250 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001251
1252 err = nicvf_register_interrupts(nic);
1253 if (err)
1254 goto cleanup;
1255
1256 /* Initialize the queues */
1257 err = nicvf_init_resources(nic);
1258 if (err)
1259 goto cleanup;
1260
1261 /* Make sure queue initialization is written */
1262 wmb();
1263
1264 nicvf_reg_write(nic, NIC_VF_INT, -1);
1265 /* Enable Qset err interrupt */
1266 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1267
1268 /* Enable completion queue interrupt */
1269 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1270 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1271
1272 /* Enable RBDR threshold interrupt */
1273 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1275
Sunil Gouthamc94acf82016-11-15 17:38:29 +05301276 /* Send VF config done msg to PF */
1277 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1278 nicvf_write_to_mbx(nic, &mbx);
Sunil Goutham74840b82015-07-29 16:49:42 +03001279
Sunil Goutham4863dea2015-05-26 19:20:15 -07001280 return 0;
1281cleanup:
1282 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1283 nicvf_unregister_interrupts(nic);
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001284 tasklet_kill(&nic->qs_err_task);
1285 tasklet_kill(&nic->rbdr_task);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001286napi_del:
1287 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1288 cq_poll = nic->napi[qidx];
1289 if (!cq_poll)
1290 continue;
1291 napi_disable(&cq_poll->napi);
1292 netif_napi_del(&cq_poll->napi);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001293 }
Sunil Goutham39ad6ee2015-08-30 12:29:14 +03001294 nicvf_free_cq_poll(nic);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001295 return err;
1296}
1297
Sunil Goutham4863dea2015-05-26 19:20:15 -07001298static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1299{
1300 struct nicvf *nic = netdev_priv(netdev);
David S. Millerf9aa9dc2016-11-22 11:29:28 -05001301 int orig_mtu = netdev->mtu;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001302
Sunil Goutham4863dea2015-05-26 19:20:15 -07001303 netdev->mtu = new_mtu;
Sunil Goutham712c3182016-11-15 17:37:36 +05301304
1305 if (!netif_running(netdev))
1306 return 0;
1307
David S. Millerf9aa9dc2016-11-22 11:29:28 -05001308 if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1309 netdev->mtu = orig_mtu;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001310 return -EINVAL;
David S. Millerf9aa9dc2016-11-22 11:29:28 -05001311 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001312
1313 return 0;
1314}
1315
1316static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1317{
1318 struct sockaddr *addr = p;
1319 struct nicvf *nic = netdev_priv(netdev);
1320
1321 if (!is_valid_ether_addr(addr->sa_data))
1322 return -EADDRNOTAVAIL;
1323
1324 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1325
Pavel Fedinbd049a92015-06-23 17:51:06 +03001326 if (nic->msix_enabled) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001327 if (nicvf_hw_set_mac_addr(nic, netdev))
1328 return -EBUSY;
Pavel Fedinbd049a92015-06-23 17:51:06 +03001329 } else {
1330 nic->set_mac_pending = true;
1331 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001332
1333 return 0;
1334}
1335
Sunil Goutham4863dea2015-05-26 19:20:15 -07001336void nicvf_update_lmac_stats(struct nicvf *nic)
1337{
1338 int stat = 0;
1339 union nic_mbx mbx = {};
Sunil Goutham4863dea2015-05-26 19:20:15 -07001340
1341 if (!netif_running(nic->netdev))
1342 return;
1343
1344 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1345 mbx.bgx_stats.vf_id = nic->vf_id;
1346 /* Rx stats */
1347 mbx.bgx_stats.rx = 1;
1348 while (stat < BGX_RX_STATS_COUNT) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001349 mbx.bgx_stats.idx = stat;
Sunil Goutham6051cba2015-08-30 12:29:11 +03001350 if (nicvf_send_msg_to_pf(nic, &mbx))
1351 return;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001352 stat++;
1353 }
1354
1355 stat = 0;
1356
1357 /* Tx stats */
1358 mbx.bgx_stats.rx = 0;
1359 while (stat < BGX_TX_STATS_COUNT) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001360 mbx.bgx_stats.idx = stat;
Sunil Goutham6051cba2015-08-30 12:29:11 +03001361 if (nicvf_send_msg_to_pf(nic, &mbx))
1362 return;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001363 stat++;
1364 }
1365}
1366
1367void nicvf_update_stats(struct nicvf *nic)
1368{
Sunil Goutham964cb692016-11-15 17:38:16 +05301369 int qidx, cpu;
1370 u64 tmp_stats = 0;
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001371 struct nicvf_hw_stats *stats = &nic->hw_stats;
Sunil Goutham964cb692016-11-15 17:38:16 +05301372 struct nicvf_drv_stats *drv_stats;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001373 struct queue_set *qs = nic->qs;
1374
1375#define GET_RX_STATS(reg) \
1376 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1377#define GET_TX_STATS(reg) \
1378 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1379
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001380 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1381 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1382 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1383 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001384 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1385 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1386 stats->rx_drop_red = GET_RX_STATS(RX_RED);
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001387 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001388 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001389 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001390 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1391 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1392 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1393 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1394
Sunil Goutham964cb692016-11-15 17:38:16 +05301395 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1396 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1397 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1398 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001399 stats->tx_drops = GET_TX_STATS(TX_DROP);
1400
Sunil Goutham964cb692016-11-15 17:38:16 +05301401 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1402 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1403 * pointed by dummy SQE and results in tx_drops counter being
1404 * incremented. Subtracting it from tx_tso counter will give
1405 * exact tx_drops counter.
1406 */
1407 if (nic->t88 && nic->hw_tso) {
1408 for_each_possible_cpu(cpu) {
1409 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1410 tmp_stats += drv_stats->tx_tso;
1411 }
1412 stats->tx_drops = tmp_stats - stats->tx_drops;
1413 }
1414 stats->tx_frames = stats->tx_ucast_frames +
1415 stats->tx_bcast_frames +
1416 stats->tx_mcast_frames;
1417 stats->rx_frames = stats->rx_ucast_frames +
1418 stats->rx_bcast_frames +
1419 stats->rx_mcast_frames;
1420 stats->rx_drops = stats->rx_drop_red +
1421 stats->rx_drop_overrun;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001422
1423 /* Update RQ and SQ stats */
1424 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1425 nicvf_update_rq_stats(nic, qidx);
1426 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1427 nicvf_update_sq_stats(nic, qidx);
1428}
1429
Aleksey Makarovfd7ec062015-06-02 11:00:23 -07001430static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001431 struct rtnl_link_stats64 *stats)
1432{
1433 struct nicvf *nic = netdev_priv(netdev);
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001434 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001435
1436 nicvf_update_stats(nic);
1437
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001438 stats->rx_bytes = hw_stats->rx_bytes;
Sunil Goutham964cb692016-11-15 17:38:16 +05301439 stats->rx_packets = hw_stats->rx_frames;
1440 stats->rx_dropped = hw_stats->rx_drops;
Sunil Gouthama2dc5de2015-08-30 12:29:10 +03001441 stats->multicast = hw_stats->rx_mcast_frames;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001442
Sunil Goutham964cb692016-11-15 17:38:16 +05301443 stats->tx_bytes = hw_stats->tx_bytes;
1444 stats->tx_packets = hw_stats->tx_frames;
1445 stats->tx_dropped = hw_stats->tx_drops;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001446
1447 return stats;
1448}
1449
1450static void nicvf_tx_timeout(struct net_device *dev)
1451{
1452 struct nicvf *nic = netdev_priv(dev);
1453
1454 if (netif_msg_tx_err(nic))
1455 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1456 dev->name);
1457
Sunil Goutham964cb692016-11-15 17:38:16 +05301458 this_cpu_inc(nic->drv_stats->tx_timeout);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001459 schedule_work(&nic->reset_task);
1460}
1461
1462static void nicvf_reset_task(struct work_struct *work)
1463{
1464 struct nicvf *nic;
1465
1466 nic = container_of(work, struct nicvf, reset_task);
1467
1468 if (!netif_running(nic->netdev))
1469 return;
1470
1471 nicvf_stop(nic->netdev);
1472 nicvf_open(nic->netdev);
Florian Westphal860e9532016-05-03 16:33:13 +02001473 netif_trans_update(nic->netdev);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001474}
1475
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001476static int nicvf_config_loopback(struct nicvf *nic,
1477 netdev_features_t features)
1478{
1479 union nic_mbx mbx = {};
1480
1481 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1482 mbx.lbk.vf_id = nic->vf_id;
1483 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1484
1485 return nicvf_send_msg_to_pf(nic, &mbx);
1486}
1487
1488static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1489 netdev_features_t features)
1490{
1491 struct nicvf *nic = netdev_priv(netdev);
1492
1493 if ((features & NETIF_F_LOOPBACK) &&
1494 netif_running(netdev) && !nic->loopback_supported)
1495 features &= ~NETIF_F_LOOPBACK;
1496
1497 return features;
1498}
1499
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001500static int nicvf_set_features(struct net_device *netdev,
1501 netdev_features_t features)
1502{
1503 struct nicvf *nic = netdev_priv(netdev);
1504 netdev_features_t changed = features ^ netdev->features;
1505
1506 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1507 nicvf_config_vlan_stripping(nic, features);
1508
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001509 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1510 return nicvf_config_loopback(nic, features);
1511
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001512 return 0;
1513}
1514
Sunil Goutham4863dea2015-05-26 19:20:15 -07001515static const struct net_device_ops nicvf_netdev_ops = {
1516 .ndo_open = nicvf_open,
1517 .ndo_stop = nicvf_stop,
1518 .ndo_start_xmit = nicvf_xmit,
1519 .ndo_change_mtu = nicvf_change_mtu,
1520 .ndo_set_mac_address = nicvf_set_mac_address,
1521 .ndo_get_stats64 = nicvf_get_stats64,
1522 .ndo_tx_timeout = nicvf_tx_timeout,
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001523 .ndo_fix_features = nicvf_fix_features,
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001524 .ndo_set_features = nicvf_set_features,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001525};
1526
1527static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1528{
1529 struct device *dev = &pdev->dev;
1530 struct net_device *netdev;
1531 struct nicvf *nic;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001532 int err, qcount;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301533 u16 sdevid;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001534
1535 err = pci_enable_device(pdev);
1536 if (err) {
1537 dev_err(dev, "Failed to enable PCI device\n");
1538 return err;
1539 }
1540
1541 err = pci_request_regions(pdev, DRV_NAME);
1542 if (err) {
1543 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1544 goto err_disable_device;
1545 }
1546
1547 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1548 if (err) {
1549 dev_err(dev, "Unable to get usable DMA configuration\n");
1550 goto err_release_regions;
1551 }
1552
1553 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1554 if (err) {
1555 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1556 goto err_release_regions;
1557 }
1558
Sunil Goutham3a397eb2016-08-12 16:51:27 +05301559 qcount = netif_get_num_default_rss_queues();
Sunil Goutham92dc8762015-08-30 12:29:15 +03001560
1561 /* Restrict multiqset support only for host bound VFs */
1562 if (pdev->is_virtfn) {
1563 /* Set max number of queues per VF */
Sunil Goutham3a397eb2016-08-12 16:51:27 +05301564 qcount = min_t(int, num_online_cpus(),
1565 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
Sunil Goutham92dc8762015-08-30 12:29:15 +03001566 }
1567
1568 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001569 if (!netdev) {
1570 err = -ENOMEM;
1571 goto err_release_regions;
1572 }
1573
1574 pci_set_drvdata(pdev, netdev);
1575
1576 SET_NETDEV_DEV(netdev, &pdev->dev);
1577
1578 nic = netdev_priv(netdev);
1579 nic->netdev = netdev;
1580 nic->pdev = pdev;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001581 nic->pnicvf = nic;
1582 nic->max_queues = qcount;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001583
1584 /* MAP VF's configuration registers */
1585 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1586 if (!nic->reg_base) {
1587 dev_err(dev, "Cannot map config register space, aborting\n");
1588 err = -ENOMEM;
1589 goto err_free_netdev;
1590 }
1591
Sunil Goutham964cb692016-11-15 17:38:16 +05301592 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1593 if (!nic->drv_stats) {
1594 err = -ENOMEM;
1595 goto err_free_netdev;
1596 }
1597
Sunil Goutham4863dea2015-05-26 19:20:15 -07001598 err = nicvf_set_qset_resources(nic);
1599 if (err)
1600 goto err_free_netdev;
1601
Sunil Goutham4863dea2015-05-26 19:20:15 -07001602 /* Check if PF is alive and get MAC address for this VF */
1603 err = nicvf_register_misc_interrupt(nic);
1604 if (err)
1605 goto err_free_netdev;
1606
Sunil Goutham92dc8762015-08-30 12:29:15 +03001607 nicvf_send_vf_struct(nic);
1608
Sunil Goutham8d210d52016-02-16 16:29:50 +05301609 if (!pass1_silicon(nic->pdev))
1610 nic->hw_tso = true;
1611
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301612 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1613 if (sdevid == 0xA134)
1614 nic->t88 = true;
1615
Sunil Goutham92dc8762015-08-30 12:29:15 +03001616 /* Check if this VF is in QS only mode */
1617 if (nic->sqs_mode)
1618 return 0;
1619
1620 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
1621 if (err)
1622 goto err_unregister_interrupts;
1623
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001624 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1625 NETIF_F_TSO | NETIF_F_GRO |
Sunil Goutham92dc8762015-08-30 12:29:15 +03001626 NETIF_F_HW_VLAN_CTAG_RX);
1627
1628 netdev->hw_features |= NETIF_F_RXHASH;
Sunil Goutham38bb5d42015-08-30 12:29:12 +03001629
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001630 netdev->features |= netdev->hw_features;
Sunil Gouthamd77a2382015-08-30 12:29:16 +03001631 netdev->hw_features |= NETIF_F_LOOPBACK;
Sunil Gouthamaa2e2592015-08-30 12:29:13 +03001632
1633 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001634
1635 netdev->netdev_ops = &nicvf_netdev_ops;
Sunil Goutham3d7a8aa2015-07-29 16:49:43 +03001636 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001637
Jarod Wilson109cc162016-10-17 15:54:13 -04001638 /* MTU range: 64 - 9200 */
1639 netdev->min_mtu = NIC_HW_MIN_FRS;
1640 netdev->max_mtu = NIC_HW_MAX_FRS;
1641
Sunil Goutham4863dea2015-05-26 19:20:15 -07001642 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1643
1644 err = register_netdev(netdev);
1645 if (err) {
1646 dev_err(dev, "Failed to register netdevice\n");
1647 goto err_unregister_interrupts;
1648 }
1649
1650 nic->msg_enable = debug;
1651
1652 nicvf_set_ethtool_ops(netdev);
1653
1654 return 0;
1655
1656err_unregister_interrupts:
1657 nicvf_unregister_interrupts(nic);
1658err_free_netdev:
1659 pci_set_drvdata(pdev, NULL);
Sunil Goutham964cb692016-11-15 17:38:16 +05301660 if (nic->drv_stats)
1661 free_percpu(nic->drv_stats);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001662 free_netdev(netdev);
1663err_release_regions:
1664 pci_release_regions(pdev);
1665err_disable_device:
1666 pci_disable_device(pdev);
1667 return err;
1668}
1669
1670static void nicvf_remove(struct pci_dev *pdev)
1671{
1672 struct net_device *netdev = pci_get_drvdata(pdev);
Pavel Fedin77501302015-11-16 17:51:34 +03001673 struct nicvf *nic;
1674 struct net_device *pnetdev;
1675
1676 if (!netdev)
1677 return;
1678
1679 nic = netdev_priv(netdev);
1680 pnetdev = nic->pnicvf->netdev;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001681
Sunil Goutham92dc8762015-08-30 12:29:15 +03001682 /* Check if this Qset is assigned to different VF.
1683 * If yes, clean primary and all secondary Qsets.
1684 */
1685 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
1686 unregister_netdev(pnetdev);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001687 nicvf_unregister_interrupts(nic);
1688 pci_set_drvdata(pdev, NULL);
Sunil Goutham964cb692016-11-15 17:38:16 +05301689 if (nic->drv_stats)
1690 free_percpu(nic->drv_stats);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001691 free_netdev(netdev);
1692 pci_release_regions(pdev);
1693 pci_disable_device(pdev);
1694}
1695
Sunil Goutham4adf4352015-07-29 16:49:45 +03001696static void nicvf_shutdown(struct pci_dev *pdev)
1697{
1698 nicvf_remove(pdev);
1699}
1700
Sunil Goutham4863dea2015-05-26 19:20:15 -07001701static struct pci_driver nicvf_driver = {
1702 .name = DRV_NAME,
1703 .id_table = nicvf_id_table,
1704 .probe = nicvf_probe,
1705 .remove = nicvf_remove,
Sunil Goutham4adf4352015-07-29 16:49:45 +03001706 .shutdown = nicvf_shutdown,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001707};
1708
1709static int __init nicvf_init_module(void)
1710{
1711 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1712
1713 return pci_register_driver(&nicvf_driver);
1714}
1715
1716static void __exit nicvf_cleanup_module(void)
1717{
1718 pci_unregister_driver(&nicvf_driver);
1719}
1720
1721module_init(nicvf_init_module);
1722module_exit(nicvf_cleanup_module);