blob: 50d2007a2640c6a83808e1423a1a7f1e26f6f72e [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chanc0c050c2015-10-22 16:01:17 -04004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
13#include <linux/if_vlan.h>
14#include <linux/interrupt.h>
15#include <linux/etherdevice.h>
16#include "bnxt_hsi.h"
17#include "bnxt.h"
18#include "bnxt_sriov.h"
19#include "bnxt_ethtool.h"
20
21#ifdef CONFIG_BNXT_SRIOV
22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
23{
Michael Chancaefe522015-12-09 19:35:42 -050024 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -040025 netdev_err(bp->dev, "vf ndo called though PF is down\n");
26 return -EINVAL;
27 }
28 if (!bp->pf.active_vfs) {
29 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
30 return -EINVAL;
31 }
32 if (vf_id >= bp->pf.max_vfs) {
33 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
34 return -EINVAL;
35 }
36 return 0;
37}
38
39int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
40{
41 struct hwrm_func_cfg_input req = {0};
42 struct bnxt *bp = netdev_priv(dev);
43 struct bnxt_vf_info *vf;
44 bool old_setting = false;
45 u32 func_flags;
46 int rc;
47
48 rc = bnxt_vf_ndo_prep(bp, vf_id);
49 if (rc)
50 return rc;
51
52 vf = &bp->pf.vf[vf_id];
53 if (vf->flags & BNXT_VF_SPOOFCHK)
54 old_setting = true;
55 if (old_setting == setting)
56 return 0;
57
58 func_flags = vf->func_flags;
59 if (setting)
60 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
61 else
62 func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
63 /*TODO: if the driver supports VLAN filter on guest VLAN,
64 * the spoof check should also include vlan anti-spoofing
65 */
66 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -050067 req.fid = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -040068 req.flags = cpu_to_le32(func_flags);
69 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
70 if (!rc) {
71 vf->func_flags = func_flags;
72 if (setting)
73 vf->flags |= BNXT_VF_SPOOFCHK;
74 else
75 vf->flags &= ~BNXT_VF_SPOOFCHK;
76 }
77 return rc;
78}
79
80int bnxt_get_vf_config(struct net_device *dev, int vf_id,
81 struct ifla_vf_info *ivi)
82{
83 struct bnxt *bp = netdev_priv(dev);
84 struct bnxt_vf_info *vf;
85 int rc;
86
87 rc = bnxt_vf_ndo_prep(bp, vf_id);
88 if (rc)
89 return rc;
90
91 ivi->vf = vf_id;
92 vf = &bp->pf.vf[vf_id];
93
94 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
95 ivi->max_tx_rate = vf->max_tx_rate;
96 ivi->min_tx_rate = vf->min_tx_rate;
97 ivi->vlan = vf->vlan;
98 ivi->qos = vf->flags & BNXT_VF_QOS;
99 ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
100 if (!(vf->flags & BNXT_VF_LINK_FORCED))
101 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
102 else if (vf->flags & BNXT_VF_LINK_UP)
103 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
104 else
105 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
106
107 return 0;
108}
109
110int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
111{
112 struct hwrm_func_cfg_input req = {0};
113 struct bnxt *bp = netdev_priv(dev);
114 struct bnxt_vf_info *vf;
115 int rc;
116
117 rc = bnxt_vf_ndo_prep(bp, vf_id);
118 if (rc)
119 return rc;
120 /* reject bc or mc mac addr, zero mac addr means allow
121 * VF to use its own mac addr
122 */
123 if (is_multicast_ether_addr(mac)) {
124 netdev_err(dev, "Invalid VF ethernet address\n");
125 return -EINVAL;
126 }
127 vf = &bp->pf.vf[vf_id];
128
129 memcpy(vf->mac_addr, mac, ETH_ALEN);
130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -0500131 req.fid = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400132 req.flags = cpu_to_le32(vf->func_flags);
133 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
134 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
135 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
136}
137
138int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
139{
140 struct hwrm_func_cfg_input req = {0};
141 struct bnxt *bp = netdev_priv(dev);
142 struct bnxt_vf_info *vf;
143 u16 vlan_tag;
144 int rc;
145
Michael Chancf6645f2016-06-13 02:25:28 -0400146 if (bp->hwrm_spec_code < 0x10201)
147 return -ENOTSUPP;
148
Michael Chanc0c050c2015-10-22 16:01:17 -0400149 rc = bnxt_vf_ndo_prep(bp, vf_id);
150 if (rc)
151 return rc;
152
153 /* TODO: needed to implement proper handling of user priority,
154 * currently fail the command if there is valid priority
155 */
156 if (vlan_id > 4095 || qos)
157 return -EINVAL;
158
159 vf = &bp->pf.vf[vf_id];
160 vlan_tag = vlan_id;
161 if (vlan_tag == vf->vlan)
162 return 0;
163
164 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -0500165 req.fid = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400166 req.flags = cpu_to_le32(vf->func_flags);
167 req.dflt_vlan = cpu_to_le16(vlan_tag);
168 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
169 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
170 if (!rc)
171 vf->vlan = vlan_tag;
172 return rc;
173}
174
175int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
176 int max_tx_rate)
177{
178 struct hwrm_func_cfg_input req = {0};
179 struct bnxt *bp = netdev_priv(dev);
180 struct bnxt_vf_info *vf;
181 u32 pf_link_speed;
182 int rc;
183
184 rc = bnxt_vf_ndo_prep(bp, vf_id);
185 if (rc)
186 return rc;
187
188 vf = &bp->pf.vf[vf_id];
189 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
190 if (max_tx_rate > pf_link_speed) {
191 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
192 max_tx_rate, vf_id);
193 return -EINVAL;
194 }
195
196 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
197 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
198 min_tx_rate, vf_id);
199 return -EINVAL;
200 }
201 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
202 return 0;
203 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -0500204 req.fid = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400205 req.flags = cpu_to_le32(vf->func_flags);
206 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
207 req.max_bw = cpu_to_le32(max_tx_rate);
208 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
209 req.min_bw = cpu_to_le32(min_tx_rate);
210 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
211 if (!rc) {
212 vf->min_tx_rate = min_tx_rate;
213 vf->max_tx_rate = max_tx_rate;
214 }
215 return rc;
216}
217
218int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
219{
220 struct bnxt *bp = netdev_priv(dev);
221 struct bnxt_vf_info *vf;
222 int rc;
223
224 rc = bnxt_vf_ndo_prep(bp, vf_id);
225 if (rc)
226 return rc;
227
228 vf = &bp->pf.vf[vf_id];
229
230 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
231 switch (link) {
232 case IFLA_VF_LINK_STATE_AUTO:
233 vf->flags |= BNXT_VF_LINK_UP;
234 break;
235 case IFLA_VF_LINK_STATE_DISABLE:
236 vf->flags |= BNXT_VF_LINK_FORCED;
237 break;
238 case IFLA_VF_LINK_STATE_ENABLE:
239 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
240 break;
241 default:
242 netdev_err(bp->dev, "Invalid link option\n");
243 rc = -EINVAL;
244 break;
245 }
246 /* CHIMP TODO: send msg to VF to update new link state */
247
248 return rc;
249}
250
251static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
252{
253 int i;
254 struct bnxt_vf_info *vf;
255
256 for (i = 0; i < num_vfs; i++) {
257 vf = &bp->pf.vf[i];
258 memset(vf, 0, sizeof(*vf));
259 vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
260 }
261 return 0;
262}
263
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500264static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
Michael Chanc0c050c2015-10-22 16:01:17 -0400265{
266 int i, rc = 0;
267 struct bnxt_pf_info *pf = &bp->pf;
268 struct hwrm_func_vf_resc_free_input req = {0};
269
270 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
271
272 mutex_lock(&bp->hwrm_cmd_lock);
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500273 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -0400274 req.vf_id = cpu_to_le16(i);
275 rc = _hwrm_send_message(bp, &req, sizeof(req),
276 HWRM_CMD_TIMEOUT);
277 if (rc)
278 break;
279 }
280 mutex_unlock(&bp->hwrm_cmd_lock);
281 return rc;
282}
283
284static void bnxt_free_vf_resources(struct bnxt *bp)
285{
286 struct pci_dev *pdev = bp->pdev;
287 int i;
288
289 kfree(bp->pf.vf_event_bmap);
290 bp->pf.vf_event_bmap = NULL;
291
292 for (i = 0; i < 4; i++) {
293 if (bp->pf.hwrm_cmd_req_addr[i]) {
294 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
295 bp->pf.hwrm_cmd_req_addr[i],
296 bp->pf.hwrm_cmd_req_dma_addr[i]);
297 bp->pf.hwrm_cmd_req_addr[i] = NULL;
298 }
299 }
300
301 kfree(bp->pf.vf);
302 bp->pf.vf = NULL;
303}
304
305static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
306{
307 struct pci_dev *pdev = bp->pdev;
308 u32 nr_pages, size, i, j, k = 0;
309
310 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
311 if (!bp->pf.vf)
312 return -ENOMEM;
313
314 bnxt_set_vf_attr(bp, num_vfs);
315
316 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
317 nr_pages = size / BNXT_PAGE_SIZE;
318 if (size & (BNXT_PAGE_SIZE - 1))
319 nr_pages++;
320
321 for (i = 0; i < nr_pages; i++) {
322 bp->pf.hwrm_cmd_req_addr[i] =
323 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
324 &bp->pf.hwrm_cmd_req_dma_addr[i],
325 GFP_KERNEL);
326
327 if (!bp->pf.hwrm_cmd_req_addr[i])
328 return -ENOMEM;
329
330 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
331 struct bnxt_vf_info *vf = &bp->pf.vf[k];
332
333 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
334 j * BNXT_HWRM_REQ_MAX_SIZE;
335 vf->hwrm_cmd_req_dma_addr =
336 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
337 BNXT_HWRM_REQ_MAX_SIZE;
338 k++;
339 }
340 }
341
342 /* Max 128 VF's */
343 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
344 if (!bp->pf.vf_event_bmap)
345 return -ENOMEM;
346
347 bp->pf.hwrm_cmd_req_pages = nr_pages;
348 return 0;
349}
350
351static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
352{
353 struct hwrm_func_buf_rgtr_input req = {0};
354
355 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
356
357 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
358 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
359 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
360 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
361 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
362 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
363 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
364
365 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
366}
367
368/* only call by PF to reserve resources for VF */
Michael Chan92268c32015-12-27 18:19:25 -0500369static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
Michael Chanc0c050c2015-10-22 16:01:17 -0400370{
371 u32 rc = 0, mtu, i;
372 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
Michael Chanb72d4a62015-12-27 18:19:27 -0500373 u16 vf_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -0400374 struct hwrm_func_cfg_input req = {0};
375 struct bnxt_pf_info *pf = &bp->pf;
376
377 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
378
379 /* Remaining rings are distributed equally amongs VF's for now */
380 /* TODO: the following workaroud is needed to restrict total number
381 * of vf_cp_rings not exceed number of HW ring groups. This WA should
382 * be removed once new HWRM provides HW ring groups capability in
383 * hwrm_func_qcap.
384 */
Michael Chan92268c32015-12-27 18:19:25 -0500385 vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
386 vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400387 /* TODO: restore this logic below once the WA above is removed */
Michael Chan92268c32015-12-27 18:19:25 -0500388 /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
389 vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400390 if (bp->flags & BNXT_FLAG_AGG_RINGS)
Michael Chan92268c32015-12-27 18:19:25 -0500391 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
392 num_vfs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400393 else
Michael Chan92268c32015-12-27 18:19:25 -0500394 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
Michael Chanb72d4a62015-12-27 18:19:27 -0500395 vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
Michael Chan92268c32015-12-27 18:19:25 -0500396 vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400397
398 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
399 FUNC_CFG_REQ_ENABLES_MRU |
400 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
401 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
402 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
403 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
404 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
405 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
Michael Chanb72d4a62015-12-27 18:19:27 -0500406 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
407 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
Michael Chanc0c050c2015-10-22 16:01:17 -0400408
409 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
410 req.mru = cpu_to_le16(mtu);
411 req.mtu = cpu_to_le16(mtu);
412
413 req.num_rsscos_ctxs = cpu_to_le16(1);
414 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
415 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
416 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -0500417 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
Michael Chanc0c050c2015-10-22 16:01:17 -0400418 req.num_l2_ctxs = cpu_to_le16(4);
419 vf_vnics = 1;
420
421 req.num_vnics = cpu_to_le16(vf_vnics);
422 /* FIXME spec currently uses 1 bit for stats ctx */
423 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
424
425 mutex_lock(&bp->hwrm_cmd_lock);
Michael Chan92268c32015-12-27 18:19:25 -0500426 for (i = 0; i < num_vfs; i++) {
Michael Chanc1935542015-12-27 18:19:28 -0500427 req.fid = cpu_to_le16(pf->first_vf_id + i);
Michael Chanc0c050c2015-10-22 16:01:17 -0400428 rc = _hwrm_send_message(bp, &req, sizeof(req),
429 HWRM_CMD_TIMEOUT);
430 if (rc)
431 break;
Michael Chan92268c32015-12-27 18:19:25 -0500432 pf->active_vfs = i + 1;
Michael Chanc1935542015-12-27 18:19:28 -0500433 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400434 }
435 mutex_unlock(&bp->hwrm_cmd_lock);
436 if (!rc) {
Michael Chan4a21b492015-12-27 18:19:26 -0500437 pf->max_tx_rings -= vf_tx_rings * num_vfs;
438 pf->max_rx_rings -= vf_rx_rings * num_vfs;
Michael Chanb72d4a62015-12-27 18:19:27 -0500439 pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
Michael Chan4a21b492015-12-27 18:19:26 -0500440 pf->max_cp_rings -= vf_cp_rings * num_vfs;
441 pf->max_rsscos_ctxs -= num_vfs;
442 pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
443 pf->max_vnics -= vf_vnics * num_vfs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400444 }
445 return rc;
446}
447
448static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
449{
450 int rc = 0, vfs_supported;
451 int min_rx_rings, min_tx_rings, min_rss_ctxs;
452 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
453
454 /* Check if we can enable requested num of vf's. At a mininum
455 * we require 1 RX 1 TX rings for each VF. In this minimum conf
456 * features like TPA will not be available.
457 */
458 vfs_supported = *num_vfs;
459
460 while (vfs_supported) {
461 min_rx_rings = vfs_supported;
462 min_tx_rings = vfs_supported;
463 min_rss_ctxs = vfs_supported;
464
465 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
466 if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
467 min_rx_rings)
468 rx_ok = 1;
469 } else {
470 if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
471 min_rx_rings)
472 rx_ok = 1;
473 }
474
475 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
476 tx_ok = 1;
477
478 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
479 rss_ok = 1;
480
481 if (tx_ok && rx_ok && rss_ok)
482 break;
483
484 vfs_supported--;
485 }
486
487 if (!vfs_supported) {
488 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
489 return -EINVAL;
490 }
491
492 if (vfs_supported != *num_vfs) {
493 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
494 *num_vfs, vfs_supported);
495 *num_vfs = vfs_supported;
496 }
497
498 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
499 if (rc)
500 goto err_out1;
501
502 /* Reserve resources for VFs */
Michael Chan92268c32015-12-27 18:19:25 -0500503 rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
Michael Chanc0c050c2015-10-22 16:01:17 -0400504 if (rc)
505 goto err_out2;
506
507 /* Register buffers for VFs */
508 rc = bnxt_hwrm_func_buf_rgtr(bp);
509 if (rc)
510 goto err_out2;
511
512 rc = pci_enable_sriov(bp->pdev, *num_vfs);
513 if (rc)
514 goto err_out2;
515
516 return 0;
517
518err_out2:
519 /* Free the resources reserved for various VF's */
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500520 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
Michael Chanc0c050c2015-10-22 16:01:17 -0400521
522err_out1:
523 bnxt_free_vf_resources(bp);
524
525 return rc;
526}
527
Jeffrey Huang19241362016-02-26 04:00:00 -0500528static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
529 struct bnxt_vf_info *vf,
530 u16 event_id)
531{
532 int rc = 0;
533 struct hwrm_fwd_async_event_cmpl_input req = {0};
534 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
535 struct hwrm_async_event_cmpl *async_cmpl;
536
537 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
538 if (vf)
539 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
540 else
541 /* broadcast this async event to all VFs */
542 req.encap_async_event_target_id = cpu_to_le16(0xffff);
543 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
544 async_cmpl->type =
545 cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
546 async_cmpl->event_id = cpu_to_le16(event_id);
547
548 mutex_lock(&bp->hwrm_cmd_lock);
549 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
550
551 if (rc) {
552 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
553 rc);
554 goto fwd_async_event_cmpl_exit;
555 }
556
557 if (resp->error_code) {
558 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
559 resp->error_code);
560 rc = -1;
561 }
562
563fwd_async_event_cmpl_exit:
564 mutex_unlock(&bp->hwrm_cmd_lock);
565 return rc;
566}
567
Michael Chanc0c050c2015-10-22 16:01:17 -0400568void bnxt_sriov_disable(struct bnxt *bp)
569{
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500570 u16 num_vfs = pci_num_vf(bp->pdev);
571
572 if (!num_vfs)
Michael Chanc0c050c2015-10-22 16:01:17 -0400573 return;
574
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500575 if (pci_vfs_assigned(bp->pdev)) {
Jeffrey Huang19241362016-02-26 04:00:00 -0500576 bnxt_hwrm_fwd_async_event_cmpl(
577 bp, NULL,
578 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500579 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
580 num_vfs);
581 } else {
582 pci_disable_sriov(bp->pdev);
583 /* Free the HW resources reserved for various VF's */
584 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
585 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400586
587 bnxt_free_vf_resources(bp);
588
589 bp->pf.active_vfs = 0;
Michael Chan4a21b492015-12-27 18:19:26 -0500590 /* Reclaim all resources for the PF. */
591 bnxt_hwrm_func_qcaps(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -0400592}
593
594int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
595{
596 struct net_device *dev = pci_get_drvdata(pdev);
597 struct bnxt *bp = netdev_priv(dev);
598
599 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
600 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
601 return 0;
602 }
603
604 rtnl_lock();
605 if (!netif_running(dev)) {
606 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
607 rtnl_unlock();
608 return 0;
609 }
610 bp->sriov_cfg = true;
611 rtnl_unlock();
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500612
613 if (pci_vfs_assigned(bp->pdev)) {
614 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
615 num_vfs = 0;
616 goto sriov_cfg_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -0400617 }
618
619 /* Check if enabled VFs is same as requested */
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500620 if (num_vfs && num_vfs == bp->pf.active_vfs)
621 goto sriov_cfg_exit;
622
623 /* if there are previous existing VFs, clean them up */
624 bnxt_sriov_disable(bp);
625 if (!num_vfs)
626 goto sriov_cfg_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -0400627
628 bnxt_sriov_enable(bp, &num_vfs);
629
Jeffrey Huang4bb6cdc2015-11-05 16:25:51 -0500630sriov_cfg_exit:
Michael Chanc0c050c2015-10-22 16:01:17 -0400631 bp->sriov_cfg = false;
632 wake_up(&bp->sriov_cfg_wait);
633
634 return num_vfs;
635}
636
637static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
638 void *encap_resp, __le64 encap_resp_addr,
639 __le16 encap_resp_cpr, u32 msg_size)
640{
641 int rc = 0;
642 struct hwrm_fwd_resp_input req = {0};
643 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
644
645 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
646
647 /* Set the new target id */
648 req.target_id = cpu_to_le16(vf->fw_fid);
Michael Chanc1935542015-12-27 18:19:28 -0500649 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400650 req.encap_resp_len = cpu_to_le16(msg_size);
651 req.encap_resp_addr = encap_resp_addr;
652 req.encap_resp_cmpl_ring = encap_resp_cpr;
653 memcpy(req.encap_resp, encap_resp, msg_size);
654
655 mutex_lock(&bp->hwrm_cmd_lock);
656 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
657
658 if (rc) {
659 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
660 goto fwd_resp_exit;
661 }
662
663 if (resp->error_code) {
664 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
665 resp->error_code);
666 rc = -1;
667 }
668
669fwd_resp_exit:
670 mutex_unlock(&bp->hwrm_cmd_lock);
671 return rc;
672}
673
674static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
675 u32 msg_size)
676{
677 int rc = 0;
678 struct hwrm_reject_fwd_resp_input req = {0};
679 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
680
681 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
682 /* Set the new target id */
683 req.target_id = cpu_to_le16(vf->fw_fid);
Michael Chanc1935542015-12-27 18:19:28 -0500684 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400685 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
686
687 mutex_lock(&bp->hwrm_cmd_lock);
688 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
689
690 if (rc) {
691 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
692 goto fwd_err_resp_exit;
693 }
694
695 if (resp->error_code) {
696 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
697 resp->error_code);
698 rc = -1;
699 }
700
701fwd_err_resp_exit:
702 mutex_unlock(&bp->hwrm_cmd_lock);
703 return rc;
704}
705
706static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
707 u32 msg_size)
708{
709 int rc = 0;
710 struct hwrm_exec_fwd_resp_input req = {0};
711 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
712
713 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
714 /* Set the new target id */
715 req.target_id = cpu_to_le16(vf->fw_fid);
Michael Chanc1935542015-12-27 18:19:28 -0500716 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
Michael Chanc0c050c2015-10-22 16:01:17 -0400717 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
718
719 mutex_lock(&bp->hwrm_cmd_lock);
720 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
721
722 if (rc) {
723 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
724 goto exec_fwd_resp_exit;
725 }
726
727 if (resp->error_code) {
728 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
729 resp->error_code);
730 rc = -1;
731 }
732
733exec_fwd_resp_exit:
734 mutex_unlock(&bp->hwrm_cmd_lock);
735 return rc;
736}
737
738static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
739{
740 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
741 struct hwrm_cfa_l2_filter_alloc_input *req =
742 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
743
744 if (!is_valid_ether_addr(vf->mac_addr) ||
745 ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
746 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
747 else
748 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
749}
750
751static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
752{
753 int rc = 0;
754
755 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
756 /* real link */
757 rc = bnxt_hwrm_exec_fwd_resp(
758 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
759 } else {
760 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
761 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
762
763 phy_qcfg_req =
764 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
765 mutex_lock(&bp->hwrm_cmd_lock);
766 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
767 sizeof(phy_qcfg_resp));
768 mutex_unlock(&bp->hwrm_cmd_lock);
769 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
770
771 if (vf->flags & BNXT_VF_LINK_UP) {
772 /* if physical link is down, force link up on VF */
773 if (phy_qcfg_resp.link ==
774 PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
775 phy_qcfg_resp.link =
776 PORT_PHY_QCFG_RESP_LINK_LINK;
Michael Chan11f15ed2016-04-05 14:08:55 -0400777 phy_qcfg_resp.link_speed = cpu_to_le16(
778 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
Michael Chanc0c050c2015-10-22 16:01:17 -0400779 phy_qcfg_resp.duplex =
780 PORT_PHY_QCFG_RESP_DUPLEX_FULL;
781 phy_qcfg_resp.pause =
782 (PORT_PHY_QCFG_RESP_PAUSE_TX |
783 PORT_PHY_QCFG_RESP_PAUSE_RX);
784 }
785 } else {
786 /* force link down */
787 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
788 phy_qcfg_resp.link_speed = 0;
789 phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
790 phy_qcfg_resp.pause = 0;
791 }
792 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
793 phy_qcfg_req->resp_addr,
794 phy_qcfg_req->cmpl_ring,
795 sizeof(phy_qcfg_resp));
796 }
797 return rc;
798}
799
800static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
801{
802 int rc = 0;
Michael Chana8643e12016-02-26 04:00:05 -0500803 struct input *encap_req = vf->hwrm_cmd_req_addr;
804 u32 req_type = le16_to_cpu(encap_req->req_type);
Michael Chanc0c050c2015-10-22 16:01:17 -0400805
806 switch (req_type) {
807 case HWRM_CFA_L2_FILTER_ALLOC:
808 rc = bnxt_vf_validate_set_mac(bp, vf);
809 break;
810 case HWRM_FUNC_CFG:
811 /* TODO Validate if VF is allowed to change mac address,
812 * mtu, num of rings etc
813 */
814 rc = bnxt_hwrm_exec_fwd_resp(
815 bp, vf, sizeof(struct hwrm_func_cfg_input));
816 break;
817 case HWRM_PORT_PHY_QCFG:
818 rc = bnxt_vf_set_link(bp, vf);
819 break;
820 default:
821 break;
822 }
823 return rc;
824}
825
826void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
827{
828 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
829
830 /* Scan through VF's and process commands */
831 while (1) {
832 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
833 if (vf_id >= active_vfs)
834 break;
835
836 clear_bit(vf_id, bp->pf.vf_event_bmap);
837 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
838 i = vf_id + 1;
839 }
840}
Michael Chan379a80a2015-10-23 15:06:19 -0400841
842void bnxt_update_vf_mac(struct bnxt *bp)
843{
844 struct hwrm_func_qcaps_input req = {0};
845 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
846
847 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
848 req.fid = cpu_to_le16(0xffff);
849
850 mutex_lock(&bp->hwrm_cmd_lock);
851 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
852 goto update_vf_mac_exit;
853
Jeffrey Huang3874d6a2016-02-26 03:59:59 -0500854 /* Store MAC address from the firmware. There are 2 cases:
855 * 1. MAC address is valid. It is assigned from the PF and we
856 * need to override the current VF MAC address with it.
857 * 2. MAC address is zero. The VF will use a random MAC address by
858 * default but the stored zero MAC will allow the VF user to change
859 * the random MAC address using ndo_set_mac_address() if he wants.
860 */
Michael Chan11f15ed2016-04-05 14:08:55 -0400861 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
862 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
Jeffrey Huang3874d6a2016-02-26 03:59:59 -0500863
864 /* overwrite netdev dev_addr with admin VF MAC */
865 if (is_valid_ether_addr(bp->vf.mac_addr))
866 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
Michael Chan379a80a2015-10-23 15:06:19 -0400867update_vf_mac_exit:
868 mutex_unlock(&bp->hwrm_cmd_lock);
869}
870
Michael Chan84c33dd2016-04-11 04:11:13 -0400871int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
872{
873 struct hwrm_func_vf_cfg_input req = {0};
874 int rc = 0;
875
876 if (!BNXT_VF(bp))
877 return 0;
878
879 if (bp->hwrm_spec_code < 0x10202) {
880 if (is_valid_ether_addr(bp->vf.mac_addr))
881 rc = -EADDRNOTAVAIL;
882 goto mac_done;
883 }
884 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
885 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
886 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
887 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
888mac_done:
889 if (rc) {
890 rc = -EADDRNOTAVAIL;
891 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
892 mac);
893 }
894 return rc;
895}
Michael Chanc0c050c2015-10-22 16:01:17 -0400896#else
897
898void bnxt_sriov_disable(struct bnxt *bp)
899{
900}
901
902void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
903{
Michael Chan379a80a2015-10-23 15:06:19 -0400904 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
905}
906
907void bnxt_update_vf_mac(struct bnxt *bp)
908{
Michael Chanc0c050c2015-10-22 16:01:17 -0400909}
Michael Chan84c33dd2016-04-11 04:11:13 -0400910
911int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
912{
913 return 0;
914}
Michael Chanc0c050c2015-10-22 16:01:17 -0400915#endif