blob: 68e1f8eff9b25e80358da8e7ebbe62854b74e0a0 [file] [log] [blame]
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e.h"
29
30/***********************misc routines*****************************/
31
32/**
33 * i40e_vc_isvalid_vsi_id
34 * @vf: pointer to the vf info
35 * @vsi_id: vf relative vsi id
36 *
37 * check for the valid vsi id
38 **/
39static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
40{
41 struct i40e_pf *pf = vf->pf;
42
43 return pf->vsi[vsi_id]->vf_id == vf->vf_id;
44}
45
46/**
47 * i40e_vc_isvalid_queue_id
48 * @vf: pointer to the vf info
49 * @vsi_id: vsi id
50 * @qid: vsi relative queue id
51 *
52 * check for the valid queue id
53 **/
54static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
55 u8 qid)
56{
57 struct i40e_pf *pf = vf->pf;
58
59 return qid < pf->vsi[vsi_id]->num_queue_pairs;
60}
61
62/**
63 * i40e_vc_isvalid_vector_id
64 * @vf: pointer to the vf info
65 * @vector_id: vf relative vector id
66 *
67 * check for the valid vector id
68 **/
69static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
70{
71 struct i40e_pf *pf = vf->pf;
72
Mitch Williams54692b42013-11-16 10:00:38 +000073 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +000074}
75
76/***********************vf resource mgmt routines*****************/
77
78/**
79 * i40e_vc_get_pf_queue_id
80 * @vf: pointer to the vf info
81 * @vsi_idx: index of VSI in PF struct
82 * @vsi_queue_id: vsi relative queue id
83 *
84 * return pf relative queue id
85 **/
86static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
87 u8 vsi_queue_id)
88{
89 struct i40e_pf *pf = vf->pf;
90 struct i40e_vsi *vsi = pf->vsi[vsi_idx];
91 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
92
93 if (le16_to_cpu(vsi->info.mapping_flags) &
94 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
95 pf_queue_id =
96 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
97 else
98 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
99 vsi_queue_id;
100
101 return pf_queue_id;
102}
103
104/**
105 * i40e_ctrl_vsi_tx_queue
106 * @vf: pointer to the vf info
107 * @vsi_idx: index of VSI in PF struct
108 * @vsi_queue_id: vsi relative queue index
109 * @ctrl: control flags
110 *
111 * enable/disable/enable check/disable check
112 **/
113static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
114 u16 vsi_queue_id,
115 enum i40e_queue_ctrl ctrl)
116{
117 struct i40e_pf *pf = vf->pf;
118 struct i40e_hw *hw = &pf->hw;
119 bool writeback = false;
120 u16 pf_queue_id;
121 int ret = 0;
122 u32 reg;
123
124 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
125 reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
126
127 switch (ctrl) {
128 case I40E_QUEUE_CTRL_ENABLE:
129 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
130 writeback = true;
131 break;
132 case I40E_QUEUE_CTRL_ENABLECHECK:
133 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
134 break;
135 case I40E_QUEUE_CTRL_DISABLE:
136 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
137 writeback = true;
138 break;
139 case I40E_QUEUE_CTRL_DISABLECHECK:
140 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
141 break;
142 case I40E_QUEUE_CTRL_FASTDISABLE:
143 reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
144 writeback = true;
145 break;
146 case I40E_QUEUE_CTRL_FASTDISABLECHECK:
147 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
148 if (!ret) {
149 reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
150 writeback = true;
151 }
152 break;
153 default:
154 ret = -EINVAL;
155 break;
156 }
157
158 if (writeback) {
159 wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
160 i40e_flush(hw);
161 }
162
163 return ret;
164}
165
166/**
167 * i40e_ctrl_vsi_rx_queue
168 * @vf: pointer to the vf info
169 * @vsi_idx: index of VSI in PF struct
170 * @vsi_queue_id: vsi relative queue index
171 * @ctrl: control flags
172 *
173 * enable/disable/enable check/disable check
174 **/
175static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
176 u16 vsi_queue_id,
177 enum i40e_queue_ctrl ctrl)
178{
179 struct i40e_pf *pf = vf->pf;
180 struct i40e_hw *hw = &pf->hw;
181 bool writeback = false;
182 u16 pf_queue_id;
183 int ret = 0;
184 u32 reg;
185
186 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
187 reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
188
189 switch (ctrl) {
190 case I40E_QUEUE_CTRL_ENABLE:
191 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
192 writeback = true;
193 break;
194 case I40E_QUEUE_CTRL_ENABLECHECK:
195 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
196 break;
197 case I40E_QUEUE_CTRL_DISABLE:
198 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
199 writeback = true;
200 break;
201 case I40E_QUEUE_CTRL_DISABLECHECK:
202 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
203 break;
204 case I40E_QUEUE_CTRL_FASTDISABLE:
205 reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
206 writeback = true;
207 break;
208 case I40E_QUEUE_CTRL_FASTDISABLECHECK:
209 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
210 if (!ret) {
211 reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
212 writeback = true;
213 }
214 break;
215 default:
216 ret = -EINVAL;
217 break;
218 }
219
220 if (writeback) {
221 wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
222 i40e_flush(hw);
223 }
224
225 return ret;
226}
227
228/**
229 * i40e_config_irq_link_list
230 * @vf: pointer to the vf info
231 * @vsi_idx: index of VSI in PF struct
232 * @vecmap: irq map info
233 *
234 * configure irq link list from the map
235 **/
236static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
237 struct i40e_virtchnl_vector_map *vecmap)
238{
239 unsigned long linklistmap = 0, tempmap;
240 struct i40e_pf *pf = vf->pf;
241 struct i40e_hw *hw = &pf->hw;
242 u16 vsi_queue_id, pf_queue_id;
243 enum i40e_queue_type qtype;
244 u16 next_q, vector_id;
245 u32 reg, reg_idx;
246 u16 itr_idx = 0;
247
248 vector_id = vecmap->vector_id;
249 /* setup the head */
250 if (0 == vector_id)
251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
252 else
253 reg_idx = I40E_VPINT_LNKLSTN(
Mitch Williams13c60b92013-09-28 07:13:18 +0000254 (pf->hw.func_caps.num_msix_vectors_vf
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000255 * vf->vf_id) + (vector_id - 1));
256
257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
258 /* Special case - No queues mapped on this vector */
259 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
260 goto irq_list_done;
261 }
262 tempmap = vecmap->rxq_map;
263 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
264 while (vsi_queue_id < I40E_MAX_VSI_QP) {
265 linklistmap |= (1 <<
266 (I40E_VIRTCHNL_SUPPORTED_QTYPES *
267 vsi_queue_id));
268 vsi_queue_id =
269 find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
270 }
271
272 tempmap = vecmap->txq_map;
273 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
274 while (vsi_queue_id < I40E_MAX_VSI_QP) {
275 linklistmap |= (1 <<
276 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
277 + 1));
278 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
279 vsi_queue_id + 1);
280 }
281
282 next_q = find_first_bit(&linklistmap,
283 (I40E_MAX_VSI_QP *
284 I40E_VIRTCHNL_SUPPORTED_QTYPES));
285 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
286 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
288 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
289
290 wr32(hw, reg_idx, reg);
291
292 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
293 switch (qtype) {
294 case I40E_QUEUE_TYPE_RX:
295 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
296 itr_idx = vecmap->rxitr_idx;
297 break;
298 case I40E_QUEUE_TYPE_TX:
299 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
300 itr_idx = vecmap->txitr_idx;
301 break;
302 default:
303 break;
304 }
305
306 next_q = find_next_bit(&linklistmap,
307 (I40E_MAX_VSI_QP *
308 I40E_VIRTCHNL_SUPPORTED_QTYPES),
309 next_q + 1);
310 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
313 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
314 vsi_queue_id);
315 } else {
316 pf_queue_id = I40E_QUEUE_END_OF_LIST;
317 qtype = 0;
318 }
319
320 /* format for the RQCTL & TQCTL regs is same */
321 reg = (vector_id) |
322 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
323 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
324 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
325 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
326 wr32(hw, reg_idx, reg);
327 }
328
329irq_list_done:
330 i40e_flush(hw);
331}
332
333/**
334 * i40e_config_vsi_tx_queue
335 * @vf: pointer to the vf info
336 * @vsi_idx: index of VSI in PF struct
337 * @vsi_queue_id: vsi relative queue index
338 * @info: config. info
339 *
340 * configure tx queue
341 **/
342static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
343 u16 vsi_queue_id,
344 struct i40e_virtchnl_txq_info *info)
345{
346 struct i40e_pf *pf = vf->pf;
347 struct i40e_hw *hw = &pf->hw;
348 struct i40e_hmc_obj_txq tx_ctx;
349 u16 pf_queue_id;
350 u32 qtx_ctl;
351 int ret = 0;
352
353 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
354
355 /* clear the context structure first */
356 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
357
358 /* only set the required fields */
359 tx_ctx.base = info->dma_ring_addr / 128;
360 tx_ctx.qlen = info->ring_len;
361 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
362 tx_ctx.rdylist_act = 0;
363
364 /* clear the context in the HMC */
365 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
366 if (ret) {
367 dev_err(&pf->pdev->dev,
368 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
369 pf_queue_id, ret);
370 ret = -ENOENT;
371 goto error_context;
372 }
373
374 /* set the context in the HMC */
375 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
376 if (ret) {
377 dev_err(&pf->pdev->dev,
378 "Failed to set VF LAN Tx queue context %d error: %d\n",
379 pf_queue_id, ret);
380 ret = -ENOENT;
381 goto error_context;
382 }
383
384 /* associate this queue with the PCI VF function */
385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
Shannon Nelson13fd9772013-09-28 07:14:19 +0000386 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000387 & I40E_QTX_CTL_PF_INDX_MASK);
388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
389 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
390 & I40E_QTX_CTL_VFVM_INDX_MASK);
391 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
392 i40e_flush(hw);
393
394error_context:
395 return ret;
396}
397
398/**
399 * i40e_config_vsi_rx_queue
400 * @vf: pointer to the vf info
401 * @vsi_idx: index of VSI in PF struct
402 * @vsi_queue_id: vsi relative queue index
403 * @info: config. info
404 *
405 * configure rx queue
406 **/
407static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
408 u16 vsi_queue_id,
409 struct i40e_virtchnl_rxq_info *info)
410{
411 struct i40e_pf *pf = vf->pf;
412 struct i40e_hw *hw = &pf->hw;
413 struct i40e_hmc_obj_rxq rx_ctx;
414 u16 pf_queue_id;
415 int ret = 0;
416
417 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
418
419 /* clear the context structure first */
420 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
421
422 /* only set the required fields */
423 rx_ctx.base = info->dma_ring_addr / 128;
424 rx_ctx.qlen = info->ring_len;
425
426 if (info->splithdr_enabled) {
427 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
428 I40E_RX_SPLIT_IP |
429 I40E_RX_SPLIT_TCP_UDP |
430 I40E_RX_SPLIT_SCTP;
431 /* header length validation */
432 if (info->hdr_size > ((2 * 1024) - 64)) {
433 ret = -EINVAL;
434 goto error_param;
435 }
436 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
437
438 /* set splitalways mode 10b */
439 rx_ctx.dtype = 0x2;
440 }
441
442 /* databuffer length validation */
443 if (info->databuffer_size > ((16 * 1024) - 128)) {
444 ret = -EINVAL;
445 goto error_param;
446 }
447 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
448
449 /* max pkt. length validation */
450 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
451 ret = -EINVAL;
452 goto error_param;
453 }
454 rx_ctx.rxmax = info->max_pkt_size;
455
456 /* enable 32bytes desc always */
457 rx_ctx.dsize = 1;
458
459 /* default values */
460 rx_ctx.tphrdesc_ena = 1;
461 rx_ctx.tphwdesc_ena = 1;
462 rx_ctx.tphdata_ena = 1;
463 rx_ctx.tphhead_ena = 1;
464 rx_ctx.lrxqthresh = 2;
465 rx_ctx.crcstrip = 1;
466
467 /* clear the context in the HMC */
468 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
469 if (ret) {
470 dev_err(&pf->pdev->dev,
471 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
472 pf_queue_id, ret);
473 ret = -ENOENT;
474 goto error_param;
475 }
476
477 /* set the context in the HMC */
478 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
479 if (ret) {
480 dev_err(&pf->pdev->dev,
481 "Failed to set VF LAN Rx queue context %d error: %d\n",
482 pf_queue_id, ret);
483 ret = -ENOENT;
484 goto error_param;
485 }
486
487error_param:
488 return ret;
489}
490
491/**
492 * i40e_alloc_vsi_res
493 * @vf: pointer to the vf info
494 * @type: type of VSI to allocate
495 *
496 * alloc vf vsi context & resources
497 **/
498static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
499{
500 struct i40e_mac_filter *f = NULL;
501 struct i40e_pf *pf = vf->pf;
502 struct i40e_hw *hw = &pf->hw;
503 struct i40e_vsi *vsi;
504 int ret = 0;
505
506 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
507
508 if (!vsi) {
509 dev_err(&pf->pdev->dev,
510 "add vsi failed for vf %d, aq_err %d\n",
511 vf->vf_id, pf->hw.aq.asq_last_status);
512 ret = -ENOENT;
513 goto error_alloc_vsi_res;
514 }
515 if (type == I40E_VSI_SRIOV) {
516 vf->lan_vsi_index = vsi->idx;
517 vf->lan_vsi_id = vsi->id;
518 dev_info(&pf->pdev->dev,
519 "LAN VSI index %d, VSI id %d\n",
520 vsi->idx, vsi->id);
521 f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
522 0, true, false);
523 }
Neerav Parikh6dbbbfb2013-11-26 10:49:24 +0000524
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000525 if (!f) {
526 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
527 ret = -ENOMEM;
528 goto error_alloc_vsi_res;
529 }
530
531 /* program mac filter */
532 ret = i40e_sync_vsi_filters(vsi);
533 if (ret) {
534 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
535 goto error_alloc_vsi_res;
536 }
537
538 /* accept bcast pkts. by default */
539 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
540 if (ret) {
541 dev_err(&pf->pdev->dev,
542 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
543 vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
544 ret = -EINVAL;
545 }
546
547error_alloc_vsi_res:
548 return ret;
549}
550
551/**
552 * i40e_reset_vf
553 * @vf: pointer to the vf structure
554 * @flr: VFLR was issued or not
555 *
556 * reset the vf
557 **/
558int i40e_reset_vf(struct i40e_vf *vf, bool flr)
559{
560 int ret = -ENOENT;
561 struct i40e_pf *pf = vf->pf;
562 struct i40e_hw *hw = &pf->hw;
563 u32 reg, reg_idx, msix_vf;
564 bool rsd = false;
565 u16 pf_queue_id;
566 int i, j;
567
568 /* warn the VF */
569 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
570
571 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
572
573 /* PF triggers VFR only when VF requests, in case of
574 * VFLR, HW triggers VFR
575 */
576 if (!flr) {
577 /* reset vf using VPGEN_VFRTRIG reg */
578 reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
579 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
580 i40e_flush(hw);
581 }
582
583 /* poll VPGEN_VFRSTAT reg to make sure
584 * that reset is complete
585 */
586 for (i = 0; i < 4; i++) {
587 /* vf reset requires driver to first reset the
588 * vf & than poll the status register to make sure
589 * that the requested op was completed
590 * successfully
591 */
592 udelay(10);
593 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
594 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
595 rsd = true;
596 break;
597 }
598 }
599
600 if (!rsd)
601 dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
602 vf->vf_id);
603
604 /* fast disable qps */
605 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
606 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
607 I40E_QUEUE_CTRL_FASTDISABLE);
608 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
609 I40E_QUEUE_CTRL_FASTDISABLE);
610 }
611
612 /* Queue enable/disable requires driver to
613 * first reset the vf & than poll the status register
614 * to make sure that the requested op was completed
615 * successfully
616 */
617 udelay(10);
618 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
619 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
620 I40E_QUEUE_CTRL_FASTDISABLECHECK);
621 if (ret)
622 dev_info(&pf->pdev->dev,
623 "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
Mitch Williams4f28c722013-11-16 10:00:42 +0000624 j, vf->lan_vsi_index, vf->vf_id);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000625 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
626 I40E_QUEUE_CTRL_FASTDISABLECHECK);
627 if (ret)
628 dev_info(&pf->pdev->dev,
629 "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
Mitch Williams4f28c722013-11-16 10:00:42 +0000630 j, vf->lan_vsi_index, vf->vf_id);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000631 }
632
633 /* clear the irq settings */
634 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
635 for (i = 0; i < msix_vf; i++) {
636 /* format is same for both registers */
637 if (0 == i)
638 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
639 else
640 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
641 (vf->vf_id))
642 + (i - 1));
643 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
644 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
645 wr32(hw, reg_idx, reg);
646 i40e_flush(hw);
647 }
648 /* disable interrupts so the VF starts in a known state */
649 for (i = 0; i < msix_vf; i++) {
650 /* format is same for both registers */
651 if (0 == i)
652 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
653 else
654 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
655 (vf->vf_id))
656 + (i - 1));
657 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
658 i40e_flush(hw);
659 }
660
661 /* set the defaults for the rqctl & tqctl registers */
662 reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
663 I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
664 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
665 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
666 wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
667 wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
668 }
669
670 /* clear the reset bit in the VPGEN_VFRTRIG reg */
671 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
672 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
673 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
674 /* tell the VF the reset is done */
675 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
676 i40e_flush(hw);
677
678 return ret;
679}
680
681/**
682 * i40e_enable_vf_mappings
683 * @vf: pointer to the vf info
684 *
685 * enable vf mappings
686 **/
687static void i40e_enable_vf_mappings(struct i40e_vf *vf)
688{
689 struct i40e_pf *pf = vf->pf;
690 struct i40e_hw *hw = &pf->hw;
691 u32 reg, total_queue_pairs = 0;
692 int j;
693
694 /* Tell the hardware we're using noncontiguous mapping. HW requires
695 * that VF queues be mapped using this method, even when they are
696 * contiguous in real life
697 */
698 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
699 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
700
701 /* enable VF vplan_qtable mappings */
702 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
703 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
704
705 /* map PF queues to VF queues */
706 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
707 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
708 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
709 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
710 total_queue_pairs++;
711 }
712
713 /* map PF queues to VSI */
714 for (j = 0; j < 7; j++) {
715 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
716 reg = 0x07FF07FF; /* unused */
717 } else {
718 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
719 j * 2);
720 reg = qid;
721 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
722 (j * 2) + 1);
723 reg |= qid << 16;
724 }
725 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
726 }
727
728 i40e_flush(hw);
729}
730
731/**
732 * i40e_disable_vf_mappings
733 * @vf: pointer to the vf info
734 *
735 * disable vf mappings
736 **/
737static void i40e_disable_vf_mappings(struct i40e_vf *vf)
738{
739 struct i40e_pf *pf = vf->pf;
740 struct i40e_hw *hw = &pf->hw;
741 int i;
742
743 /* disable qp mappings */
744 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
745 for (i = 0; i < I40E_MAX_VSI_QP; i++)
746 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
747 I40E_QUEUE_END_OF_LIST);
748 i40e_flush(hw);
749}
750
751/**
752 * i40e_free_vf_res
753 * @vf: pointer to the vf info
754 *
755 * free vf resources
756 **/
757static void i40e_free_vf_res(struct i40e_vf *vf)
758{
759 struct i40e_pf *pf = vf->pf;
760
761 /* free vsi & disconnect it from the parent uplink */
762 if (vf->lan_vsi_index) {
763 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
764 vf->lan_vsi_index = 0;
765 vf->lan_vsi_id = 0;
766 }
Neerav Parikh6dbbbfb2013-11-26 10:49:24 +0000767
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000768 /* reset some of the state varibles keeping
769 * track of the resources
770 */
771 vf->num_queue_pairs = 0;
772 vf->vf_states = 0;
773}
774
775/**
776 * i40e_alloc_vf_res
777 * @vf: pointer to the vf info
778 *
779 * allocate vf resources
780 **/
781static int i40e_alloc_vf_res(struct i40e_vf *vf)
782{
783 struct i40e_pf *pf = vf->pf;
784 int total_queue_pairs = 0;
785 int ret;
786
787 /* allocate hw vsi context & associated resources */
788 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
789 if (ret)
790 goto error_alloc;
791 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
792 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
793
794 /* store the total qps number for the runtime
795 * vf req validation
796 */
797 vf->num_queue_pairs = total_queue_pairs;
798
799 /* vf is now completely initialized */
800 set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
801
802error_alloc:
803 if (ret)
804 i40e_free_vf_res(vf);
805
806 return ret;
807}
808
809/**
810 * i40e_vfs_are_assigned
811 * @pf: pointer to the pf structure
812 *
813 * Determine if any VFs are assigned to VMs
814 **/
815static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
816{
817 struct pci_dev *pdev = pf->pdev;
818 struct pci_dev *vfdev;
819
820 /* loop through all the VFs to see if we own any that are assigned */
821 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
822 while (vfdev) {
823 /* if we don't own it we don't care */
824 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
825 /* if it is assigned we cannot release it */
826 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
827 return true;
828 }
829
830 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
831 I40E_VF_DEVICE_ID,
832 vfdev);
833 }
834
835 return false;
836}
837
838/**
839 * i40e_free_vfs
840 * @pf: pointer to the pf structure
841 *
842 * free vf resources
843 **/
844void i40e_free_vfs(struct i40e_pf *pf)
845{
846 struct i40e_hw *hw = &pf->hw;
847 int i;
848
849 if (!pf->vf)
850 return;
851
852 /* Disable interrupt 0 so we don't try to handle the VFLR. */
853 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
854 i40e_flush(hw);
855
856 /* free up vf resources */
857 for (i = 0; i < pf->num_alloc_vfs; i++) {
858 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
859 i40e_free_vf_res(&pf->vf[i]);
860 /* disable qp mappings */
861 i40e_disable_vf_mappings(&pf->vf[i]);
862 }
863
864 kfree(pf->vf);
865 pf->vf = NULL;
866 pf->num_alloc_vfs = 0;
867
868 if (!i40e_vfs_are_assigned(pf))
869 pci_disable_sriov(pf->pdev);
870 else
871 dev_warn(&pf->pdev->dev,
872 "unable to disable SR-IOV because VFs are assigned.\n");
873
874 /* Re-enable interrupt 0. */
875 wr32(hw, I40E_PFINT_DYN_CTL0,
876 I40E_PFINT_DYN_CTL0_INTENA_MASK |
877 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
878 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
879 i40e_flush(hw);
880}
881
882#ifdef CONFIG_PCI_IOV
883/**
884 * i40e_alloc_vfs
885 * @pf: pointer to the pf structure
886 * @num_alloc_vfs: number of vfs to allocate
887 *
888 * allocate vf resources
889 **/
890static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
891{
892 struct i40e_vf *vfs;
893 int i, ret = 0;
894
895 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
896 if (ret) {
897 dev_err(&pf->pdev->dev,
898 "pci_enable_sriov failed with error %d!\n", ret);
899 pf->num_alloc_vfs = 0;
900 goto err_iov;
901 }
902
903 /* allocate memory */
904 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
905 if (!vfs) {
906 ret = -ENOMEM;
907 goto err_alloc;
908 }
909
910 /* apply default profile */
911 for (i = 0; i < num_alloc_vfs; i++) {
912 vfs[i].pf = pf;
913 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
914 vfs[i].vf_id = i;
915
916 /* assign default capabilities */
917 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
918
919 ret = i40e_alloc_vf_res(&vfs[i]);
920 i40e_reset_vf(&vfs[i], true);
921 if (ret)
922 break;
923
924 /* enable vf vplan_qtable mappings */
925 i40e_enable_vf_mappings(&vfs[i]);
926 }
927 pf->vf = vfs;
928 pf->num_alloc_vfs = num_alloc_vfs;
929
930err_alloc:
931 if (ret)
932 i40e_free_vfs(pf);
933err_iov:
934 return ret;
935}
936
937#endif
938/**
939 * i40e_pci_sriov_enable
940 * @pdev: pointer to a pci_dev structure
941 * @num_vfs: number of vfs to allocate
942 *
943 * Enable or change the number of VFs
944 **/
945static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
946{
947#ifdef CONFIG_PCI_IOV
948 struct i40e_pf *pf = pci_get_drvdata(pdev);
949 int pre_existing_vfs = pci_num_vf(pdev);
950 int err = 0;
951
952 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
953 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
954 i40e_free_vfs(pf);
955 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
956 goto out;
957
958 if (num_vfs > pf->num_req_vfs) {
959 err = -EPERM;
960 goto err_out;
961 }
962
963 err = i40e_alloc_vfs(pf, num_vfs);
964 if (err) {
965 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
966 goto err_out;
967 }
968
969out:
970 return num_vfs;
971
972err_out:
973 return err;
974#endif
975 return 0;
976}
977
978/**
979 * i40e_pci_sriov_configure
980 * @pdev: pointer to a pci_dev structure
981 * @num_vfs: number of vfs to allocate
982 *
983 * Enable or change the number of VFs. Called when the user updates the number
984 * of VFs in sysfs.
985 **/
986int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
987{
988 struct i40e_pf *pf = pci_get_drvdata(pdev);
989
990 if (num_vfs)
991 return i40e_pci_sriov_enable(pdev, num_vfs);
992
993 i40e_free_vfs(pf);
994 return 0;
995}
996
997/***********************virtual channel routines******************/
998
999/**
1000 * i40e_vc_send_msg_to_vf
1001 * @vf: pointer to the vf info
1002 * @v_opcode: virtual channel opcode
1003 * @v_retval: virtual channel return value
1004 * @msg: pointer to the msg buffer
1005 * @msglen: msg length
1006 *
1007 * send msg to vf
1008 **/
1009static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1010 u32 v_retval, u8 *msg, u16 msglen)
1011{
1012 struct i40e_pf *pf = vf->pf;
1013 struct i40e_hw *hw = &pf->hw;
1014 i40e_status aq_ret;
1015
1016 /* single place to detect unsuccessful return values */
1017 if (v_retval) {
1018 vf->num_invalid_msgs++;
1019 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
1020 v_opcode, v_retval);
1021 if (vf->num_invalid_msgs >
1022 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1023 dev_err(&pf->pdev->dev,
1024 "Number of invalid messages exceeded for VF %d\n",
1025 vf->vf_id);
1026 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1027 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1028 }
1029 } else {
1030 vf->num_valid_msgs++;
1031 }
1032
1033 aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1034 msg, msglen, NULL);
1035 if (aq_ret) {
1036 dev_err(&pf->pdev->dev,
1037 "Unable to send the message to VF %d aq_err %d\n",
1038 vf->vf_id, pf->hw.aq.asq_last_status);
1039 return -EIO;
1040 }
1041
1042 return 0;
1043}
1044
1045/**
1046 * i40e_vc_send_resp_to_vf
1047 * @vf: pointer to the vf info
1048 * @opcode: operation code
1049 * @retval: return value
1050 *
1051 * send resp msg to vf
1052 **/
1053static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1054 enum i40e_virtchnl_ops opcode,
1055 i40e_status retval)
1056{
1057 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1058}
1059
1060/**
1061 * i40e_vc_get_version_msg
1062 * @vf: pointer to the vf info
1063 *
1064 * called from the vf to request the API version used by the PF
1065 **/
1066static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1067{
1068 struct i40e_virtchnl_version_info info = {
1069 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1070 };
1071
1072 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1073 I40E_SUCCESS, (u8 *)&info,
1074 sizeof(struct
1075 i40e_virtchnl_version_info));
1076}
1077
1078/**
1079 * i40e_vc_get_vf_resources_msg
1080 * @vf: pointer to the vf info
1081 * @msg: pointer to the msg buffer
1082 * @msglen: msg length
1083 *
1084 * called from the vf to request its resources
1085 **/
1086static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1087{
1088 struct i40e_virtchnl_vf_resource *vfres = NULL;
1089 struct i40e_pf *pf = vf->pf;
1090 i40e_status aq_ret = 0;
1091 struct i40e_vsi *vsi;
1092 int i = 0, len = 0;
1093 int num_vsis = 1;
1094 int ret;
1095
1096 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1097 aq_ret = I40E_ERR_PARAM;
1098 goto err;
1099 }
1100
1101 len = (sizeof(struct i40e_virtchnl_vf_resource) +
1102 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1103
1104 vfres = kzalloc(len, GFP_KERNEL);
1105 if (!vfres) {
1106 aq_ret = I40E_ERR_NO_MEMORY;
1107 len = 0;
1108 goto err;
1109 }
1110
1111 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1112 vsi = pf->vsi[vf->lan_vsi_index];
1113 if (!vsi->info.pvid)
1114 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1115
1116 vfres->num_vsis = num_vsis;
1117 vfres->num_queue_pairs = vf->num_queue_pairs;
1118 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1119 if (vf->lan_vsi_index) {
1120 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1121 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1122 vfres->vsi_res[i].num_queue_pairs =
1123 pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1124 memcpy(vfres->vsi_res[i].default_mac_addr,
1125 vf->default_lan_addr.addr, ETH_ALEN);
1126 i++;
1127 }
1128 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1129
1130err:
1131 /* send the response back to the vf */
1132 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1133 aq_ret, (u8 *)vfres, len);
1134
1135 kfree(vfres);
1136 return ret;
1137}
1138
1139/**
1140 * i40e_vc_reset_vf_msg
1141 * @vf: pointer to the vf info
1142 * @msg: pointer to the msg buffer
1143 * @msglen: msg length
1144 *
1145 * called from the vf to reset itself,
1146 * unlike other virtchnl messages, pf driver
1147 * doesn't send the response back to the vf
1148 **/
1149static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1150{
1151 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1152 return -ENOENT;
1153
1154 return i40e_reset_vf(vf, false);
1155}
1156
1157/**
1158 * i40e_vc_config_promiscuous_mode_msg
1159 * @vf: pointer to the vf info
1160 * @msg: pointer to the msg buffer
1161 * @msglen: msg length
1162 *
1163 * called from the vf to configure the promiscuous mode of
1164 * vf vsis
1165 **/
1166static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1167 u8 *msg, u16 msglen)
1168{
1169 struct i40e_virtchnl_promisc_info *info =
1170 (struct i40e_virtchnl_promisc_info *)msg;
1171 struct i40e_pf *pf = vf->pf;
1172 struct i40e_hw *hw = &pf->hw;
1173 bool allmulti = false;
1174 bool promisc = false;
1175 i40e_status aq_ret;
1176
1177 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1178 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1179 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1180 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1181 aq_ret = I40E_ERR_PARAM;
1182 goto error_param;
1183 }
1184
1185 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1186 promisc = true;
1187 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1188 promisc, NULL);
1189 if (aq_ret)
1190 goto error_param;
1191
1192 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1193 allmulti = true;
1194 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1195 allmulti, NULL);
1196
1197error_param:
1198 /* send the response to the vf */
1199 return i40e_vc_send_resp_to_vf(vf,
1200 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1201 aq_ret);
1202}
1203
1204/**
1205 * i40e_vc_config_queues_msg
1206 * @vf: pointer to the vf info
1207 * @msg: pointer to the msg buffer
1208 * @msglen: msg length
1209 *
1210 * called from the vf to configure the rx/tx
1211 * queues
1212 **/
1213static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1214{
1215 struct i40e_virtchnl_vsi_queue_config_info *qci =
1216 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1217 struct i40e_virtchnl_queue_pair_info *qpi;
1218 u16 vsi_id, vsi_queue_id;
1219 i40e_status aq_ret = 0;
1220 int i;
1221
1222 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1223 aq_ret = I40E_ERR_PARAM;
1224 goto error_param;
1225 }
1226
1227 vsi_id = qci->vsi_id;
1228 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1229 aq_ret = I40E_ERR_PARAM;
1230 goto error_param;
1231 }
1232 for (i = 0; i < qci->num_queue_pairs; i++) {
1233 qpi = &qci->qpair[i];
1234 vsi_queue_id = qpi->txq.queue_id;
1235 if ((qpi->txq.vsi_id != vsi_id) ||
1236 (qpi->rxq.vsi_id != vsi_id) ||
1237 (qpi->rxq.queue_id != vsi_queue_id) ||
1238 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1239 aq_ret = I40E_ERR_PARAM;
1240 goto error_param;
1241 }
1242
1243 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1244 &qpi->rxq) ||
1245 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1246 &qpi->txq)) {
1247 aq_ret = I40E_ERR_PARAM;
1248 goto error_param;
1249 }
1250 }
1251
1252error_param:
1253 /* send the response to the vf */
1254 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1255 aq_ret);
1256}
1257
1258/**
1259 * i40e_vc_config_irq_map_msg
1260 * @vf: pointer to the vf info
1261 * @msg: pointer to the msg buffer
1262 * @msglen: msg length
1263 *
1264 * called from the vf to configure the irq to
1265 * queue map
1266 **/
1267static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1268{
1269 struct i40e_virtchnl_irq_map_info *irqmap_info =
1270 (struct i40e_virtchnl_irq_map_info *)msg;
1271 struct i40e_virtchnl_vector_map *map;
1272 u16 vsi_id, vsi_queue_id, vector_id;
1273 i40e_status aq_ret = 0;
1274 unsigned long tempmap;
1275 int i;
1276
1277 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1278 aq_ret = I40E_ERR_PARAM;
1279 goto error_param;
1280 }
1281
1282 for (i = 0; i < irqmap_info->num_vectors; i++) {
1283 map = &irqmap_info->vecmap[i];
1284
1285 vector_id = map->vector_id;
1286 vsi_id = map->vsi_id;
1287 /* validate msg params */
1288 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1289 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1290 aq_ret = I40E_ERR_PARAM;
1291 goto error_param;
1292 }
1293
1294 /* lookout for the invalid queue index */
1295 tempmap = map->rxq_map;
1296 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1297 while (vsi_queue_id < I40E_MAX_VSI_QP) {
1298 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1299 vsi_queue_id)) {
1300 aq_ret = I40E_ERR_PARAM;
1301 goto error_param;
1302 }
1303 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1304 vsi_queue_id + 1);
1305 }
1306
1307 tempmap = map->txq_map;
1308 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1309 while (vsi_queue_id < I40E_MAX_VSI_QP) {
1310 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1311 vsi_queue_id)) {
1312 aq_ret = I40E_ERR_PARAM;
1313 goto error_param;
1314 }
1315 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1316 vsi_queue_id + 1);
1317 }
1318
1319 i40e_config_irq_link_list(vf, vsi_id, map);
1320 }
1321error_param:
1322 /* send the response to the vf */
1323 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1324 aq_ret);
1325}
1326
1327/**
1328 * i40e_vc_enable_queues_msg
1329 * @vf: pointer to the vf info
1330 * @msg: pointer to the msg buffer
1331 * @msglen: msg length
1332 *
1333 * called from the vf to enable all or specific queue(s)
1334 **/
1335static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1336{
1337 struct i40e_virtchnl_queue_select *vqs =
1338 (struct i40e_virtchnl_queue_select *)msg;
1339 struct i40e_pf *pf = vf->pf;
1340 u16 vsi_id = vqs->vsi_id;
1341 i40e_status aq_ret = 0;
1342 unsigned long tempmap;
1343 u16 queue_id;
1344
1345 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1346 aq_ret = I40E_ERR_PARAM;
1347 goto error_param;
1348 }
1349
1350 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1351 aq_ret = I40E_ERR_PARAM;
1352 goto error_param;
1353 }
1354
1355 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1356 aq_ret = I40E_ERR_PARAM;
1357 goto error_param;
1358 }
1359
1360 tempmap = vqs->rx_queues;
1361 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1362 while (queue_id < I40E_MAX_VSI_QP) {
1363 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1364 aq_ret = I40E_ERR_PARAM;
1365 goto error_param;
1366 }
1367 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1368 I40E_QUEUE_CTRL_ENABLE);
1369
1370 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1371 queue_id + 1);
1372 }
1373
1374 tempmap = vqs->tx_queues;
1375 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1376 while (queue_id < I40E_MAX_VSI_QP) {
1377 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1378 aq_ret = I40E_ERR_PARAM;
1379 goto error_param;
1380 }
1381 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1382 I40E_QUEUE_CTRL_ENABLE);
1383
1384 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1385 queue_id + 1);
1386 }
1387
1388 /* Poll the status register to make sure that the
1389 * requested op was completed successfully
1390 */
1391 udelay(10);
1392
1393 tempmap = vqs->rx_queues;
1394 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1395 while (queue_id < I40E_MAX_VSI_QP) {
1396 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1397 I40E_QUEUE_CTRL_ENABLECHECK)) {
1398 dev_err(&pf->pdev->dev,
1399 "Queue control check failed on RX queue %d of VSI %d VF %d\n",
1400 queue_id, vsi_id, vf->vf_id);
1401 }
1402 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1403 queue_id + 1);
1404 }
1405
1406 tempmap = vqs->tx_queues;
1407 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1408 while (queue_id < I40E_MAX_VSI_QP) {
1409 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1410 I40E_QUEUE_CTRL_ENABLECHECK)) {
1411 dev_err(&pf->pdev->dev,
1412 "Queue control check failed on TX queue %d of VSI %d VF %d\n",
1413 queue_id, vsi_id, vf->vf_id);
1414 }
1415 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1416 queue_id + 1);
1417 }
1418
1419error_param:
1420 /* send the response to the vf */
1421 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1422 aq_ret);
1423}
1424
1425/**
1426 * i40e_vc_disable_queues_msg
1427 * @vf: pointer to the vf info
1428 * @msg: pointer to the msg buffer
1429 * @msglen: msg length
1430 *
1431 * called from the vf to disable all or specific
1432 * queue(s)
1433 **/
1434static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1435{
1436 struct i40e_virtchnl_queue_select *vqs =
1437 (struct i40e_virtchnl_queue_select *)msg;
1438 struct i40e_pf *pf = vf->pf;
1439 u16 vsi_id = vqs->vsi_id;
1440 i40e_status aq_ret = 0;
1441 unsigned long tempmap;
1442 u16 queue_id;
1443
1444 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1445 aq_ret = I40E_ERR_PARAM;
1446 goto error_param;
1447 }
1448
1449 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1450 aq_ret = I40E_ERR_PARAM;
1451 goto error_param;
1452 }
1453
1454 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1455 aq_ret = I40E_ERR_PARAM;
1456 goto error_param;
1457 }
1458
1459 tempmap = vqs->rx_queues;
1460 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1461 while (queue_id < I40E_MAX_VSI_QP) {
1462 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1463 aq_ret = I40E_ERR_PARAM;
1464 goto error_param;
1465 }
1466 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1467 I40E_QUEUE_CTRL_DISABLE);
1468
1469 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1470 queue_id + 1);
1471 }
1472
1473 tempmap = vqs->tx_queues;
1474 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1475 while (queue_id < I40E_MAX_VSI_QP) {
1476 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1477 aq_ret = I40E_ERR_PARAM;
1478 goto error_param;
1479 }
1480 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1481 I40E_QUEUE_CTRL_DISABLE);
1482
1483 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1484 queue_id + 1);
1485 }
1486
1487 /* Poll the status register to make sure that the
1488 * requested op was completed successfully
1489 */
1490 udelay(10);
1491
1492 tempmap = vqs->rx_queues;
1493 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1494 while (queue_id < I40E_MAX_VSI_QP) {
1495 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1496 I40E_QUEUE_CTRL_DISABLECHECK)) {
1497 dev_err(&pf->pdev->dev,
1498 "Queue control check failed on RX queue %d of VSI %d VF %d\n",
1499 queue_id, vsi_id, vf->vf_id);
1500 }
1501 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1502 queue_id + 1);
1503 }
1504
1505 tempmap = vqs->tx_queues;
1506 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1507 while (queue_id < I40E_MAX_VSI_QP) {
1508 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1509 I40E_QUEUE_CTRL_DISABLECHECK)) {
1510 dev_err(&pf->pdev->dev,
1511 "Queue control check failed on TX queue %d of VSI %d VF %d\n",
1512 queue_id, vsi_id, vf->vf_id);
1513 }
1514 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1515 queue_id + 1);
1516 }
1517
1518error_param:
1519 /* send the response to the vf */
1520 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1521 aq_ret);
1522}
1523
1524/**
1525 * i40e_vc_get_stats_msg
1526 * @vf: pointer to the vf info
1527 * @msg: pointer to the msg buffer
1528 * @msglen: msg length
1529 *
1530 * called from the vf to get vsi stats
1531 **/
1532static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1533{
1534 struct i40e_virtchnl_queue_select *vqs =
1535 (struct i40e_virtchnl_queue_select *)msg;
1536 struct i40e_pf *pf = vf->pf;
1537 struct i40e_eth_stats stats;
1538 i40e_status aq_ret = 0;
1539 struct i40e_vsi *vsi;
1540
1541 memset(&stats, 0, sizeof(struct i40e_eth_stats));
1542
1543 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1544 aq_ret = I40E_ERR_PARAM;
1545 goto error_param;
1546 }
1547
1548 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1549 aq_ret = I40E_ERR_PARAM;
1550 goto error_param;
1551 }
1552
1553 vsi = pf->vsi[vqs->vsi_id];
1554 if (!vsi) {
1555 aq_ret = I40E_ERR_PARAM;
1556 goto error_param;
1557 }
1558 i40e_update_eth_stats(vsi);
1559 memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
1560
1561error_param:
1562 /* send the response back to the vf */
1563 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1564 (u8 *)&stats, sizeof(stats));
1565}
1566
1567/**
1568 * i40e_vc_add_mac_addr_msg
1569 * @vf: pointer to the vf info
1570 * @msg: pointer to the msg buffer
1571 * @msglen: msg length
1572 *
1573 * add guest mac address filter
1574 **/
1575static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1576{
1577 struct i40e_virtchnl_ether_addr_list *al =
1578 (struct i40e_virtchnl_ether_addr_list *)msg;
1579 struct i40e_pf *pf = vf->pf;
1580 struct i40e_vsi *vsi = NULL;
1581 u16 vsi_id = al->vsi_id;
1582 i40e_status aq_ret = 0;
1583 int i;
1584
1585 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1586 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1587 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1588 aq_ret = I40E_ERR_PARAM;
1589 goto error_param;
1590 }
1591
1592 for (i = 0; i < al->num_elements; i++) {
1593 if (is_broadcast_ether_addr(al->list[i].addr) ||
1594 is_zero_ether_addr(al->list[i].addr)) {
1595 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
1596 al->list[i].addr);
1597 aq_ret = I40E_ERR_PARAM;
1598 goto error_param;
1599 }
1600 }
1601 vsi = pf->vsi[vsi_id];
1602
1603 /* add new addresses to the list */
1604 for (i = 0; i < al->num_elements; i++) {
1605 struct i40e_mac_filter *f;
1606
1607 f = i40e_find_mac(vsi, al->list[i].addr, true, false);
Mitch Williams7e68edf92013-11-16 10:00:41 +00001608 if (!f) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001609 if (i40e_is_vsi_in_vlan(vsi))
1610 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1611 true, false);
1612 else
1613 f = i40e_add_filter(vsi, al->list[i].addr, -1,
1614 true, false);
1615 }
1616
1617 if (!f) {
1618 dev_err(&pf->pdev->dev,
1619 "Unable to add VF MAC filter\n");
1620 aq_ret = I40E_ERR_PARAM;
1621 goto error_param;
1622 }
1623 }
1624
1625 /* program the updated filter list */
1626 if (i40e_sync_vsi_filters(vsi))
1627 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1628
1629error_param:
1630 /* send the response to the vf */
1631 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1632 aq_ret);
1633}
1634
1635/**
1636 * i40e_vc_del_mac_addr_msg
1637 * @vf: pointer to the vf info
1638 * @msg: pointer to the msg buffer
1639 * @msglen: msg length
1640 *
1641 * remove guest mac address filter
1642 **/
1643static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1644{
1645 struct i40e_virtchnl_ether_addr_list *al =
1646 (struct i40e_virtchnl_ether_addr_list *)msg;
1647 struct i40e_pf *pf = vf->pf;
1648 struct i40e_vsi *vsi = NULL;
1649 u16 vsi_id = al->vsi_id;
1650 i40e_status aq_ret = 0;
1651 int i;
1652
1653 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1654 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1655 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1656 aq_ret = I40E_ERR_PARAM;
1657 goto error_param;
1658 }
1659 vsi = pf->vsi[vsi_id];
1660
1661 /* delete addresses from the list */
1662 for (i = 0; i < al->num_elements; i++)
1663 i40e_del_filter(vsi, al->list[i].addr,
1664 I40E_VLAN_ANY, true, false);
1665
1666 /* program the updated filter list */
1667 if (i40e_sync_vsi_filters(vsi))
1668 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1669
1670error_param:
1671 /* send the response to the vf */
1672 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1673 aq_ret);
1674}
1675
1676/**
1677 * i40e_vc_add_vlan_msg
1678 * @vf: pointer to the vf info
1679 * @msg: pointer to the msg buffer
1680 * @msglen: msg length
1681 *
1682 * program guest vlan id
1683 **/
1684static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1685{
1686 struct i40e_virtchnl_vlan_filter_list *vfl =
1687 (struct i40e_virtchnl_vlan_filter_list *)msg;
1688 struct i40e_pf *pf = vf->pf;
1689 struct i40e_vsi *vsi = NULL;
1690 u16 vsi_id = vfl->vsi_id;
1691 i40e_status aq_ret = 0;
1692 int i;
1693
1694 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1695 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1696 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1697 aq_ret = I40E_ERR_PARAM;
1698 goto error_param;
1699 }
1700
1701 for (i = 0; i < vfl->num_elements; i++) {
1702 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1703 aq_ret = I40E_ERR_PARAM;
1704 dev_err(&pf->pdev->dev,
1705 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1706 goto error_param;
1707 }
1708 }
1709 vsi = pf->vsi[vsi_id];
1710 if (vsi->info.pvid) {
1711 aq_ret = I40E_ERR_PARAM;
1712 goto error_param;
1713 }
1714
1715 i40e_vlan_stripping_enable(vsi);
1716 for (i = 0; i < vfl->num_elements; i++) {
1717 /* add new VLAN filter */
1718 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1719 if (ret)
1720 dev_err(&pf->pdev->dev,
1721 "Unable to add VF vlan filter %d, error %d\n",
1722 vfl->vlan_id[i], ret);
1723 }
1724
1725error_param:
1726 /* send the response to the vf */
1727 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1728}
1729
1730/**
1731 * i40e_vc_remove_vlan_msg
1732 * @vf: pointer to the vf info
1733 * @msg: pointer to the msg buffer
1734 * @msglen: msg length
1735 *
1736 * remove programmed guest vlan id
1737 **/
1738static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1739{
1740 struct i40e_virtchnl_vlan_filter_list *vfl =
1741 (struct i40e_virtchnl_vlan_filter_list *)msg;
1742 struct i40e_pf *pf = vf->pf;
1743 struct i40e_vsi *vsi = NULL;
1744 u16 vsi_id = vfl->vsi_id;
1745 i40e_status aq_ret = 0;
1746 int i;
1747
1748 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1749 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1750 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1751 aq_ret = I40E_ERR_PARAM;
1752 goto error_param;
1753 }
1754
1755 for (i = 0; i < vfl->num_elements; i++) {
1756 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1757 aq_ret = I40E_ERR_PARAM;
1758 goto error_param;
1759 }
1760 }
1761
1762 vsi = pf->vsi[vsi_id];
1763 if (vsi->info.pvid) {
1764 aq_ret = I40E_ERR_PARAM;
1765 goto error_param;
1766 }
1767
1768 for (i = 0; i < vfl->num_elements; i++) {
1769 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1770 if (ret)
1771 dev_err(&pf->pdev->dev,
1772 "Unable to delete VF vlan filter %d, error %d\n",
1773 vfl->vlan_id[i], ret);
1774 }
1775
1776error_param:
1777 /* send the response to the vf */
1778 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1779}
1780
1781/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001782 * i40e_vc_validate_vf_msg
1783 * @vf: pointer to the vf info
1784 * @msg: pointer to the msg buffer
1785 * @msglen: msg length
1786 * @msghndl: msg handle
1787 *
1788 * validate msg
1789 **/
1790static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1791 u32 v_retval, u8 *msg, u16 msglen)
1792{
1793 bool err_msg_format = false;
1794 int valid_len;
1795
1796 /* Check if VF is disabled. */
1797 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1798 return I40E_ERR_PARAM;
1799
1800 /* Validate message length. */
1801 switch (v_opcode) {
1802 case I40E_VIRTCHNL_OP_VERSION:
1803 valid_len = sizeof(struct i40e_virtchnl_version_info);
1804 break;
1805 case I40E_VIRTCHNL_OP_RESET_VF:
1806 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1807 valid_len = 0;
1808 break;
1809 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1810 valid_len = sizeof(struct i40e_virtchnl_txq_info);
1811 break;
1812 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1813 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1814 break;
1815 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1816 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1817 if (msglen >= valid_len) {
1818 struct i40e_virtchnl_vsi_queue_config_info *vqc =
1819 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1820 valid_len += (vqc->num_queue_pairs *
1821 sizeof(struct
1822 i40e_virtchnl_queue_pair_info));
1823 if (vqc->num_queue_pairs == 0)
1824 err_msg_format = true;
1825 }
1826 break;
1827 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1828 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1829 if (msglen >= valid_len) {
1830 struct i40e_virtchnl_irq_map_info *vimi =
1831 (struct i40e_virtchnl_irq_map_info *)msg;
1832 valid_len += (vimi->num_vectors *
1833 sizeof(struct i40e_virtchnl_vector_map));
1834 if (vimi->num_vectors == 0)
1835 err_msg_format = true;
1836 }
1837 break;
1838 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1839 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1840 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1841 break;
1842 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1843 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1844 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1845 if (msglen >= valid_len) {
1846 struct i40e_virtchnl_ether_addr_list *veal =
1847 (struct i40e_virtchnl_ether_addr_list *)msg;
1848 valid_len += veal->num_elements *
1849 sizeof(struct i40e_virtchnl_ether_addr);
1850 if (veal->num_elements == 0)
1851 err_msg_format = true;
1852 }
1853 break;
1854 case I40E_VIRTCHNL_OP_ADD_VLAN:
1855 case I40E_VIRTCHNL_OP_DEL_VLAN:
1856 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1857 if (msglen >= valid_len) {
1858 struct i40e_virtchnl_vlan_filter_list *vfl =
1859 (struct i40e_virtchnl_vlan_filter_list *)msg;
1860 valid_len += vfl->num_elements * sizeof(u16);
1861 if (vfl->num_elements == 0)
1862 err_msg_format = true;
1863 }
1864 break;
1865 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1866 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1867 break;
1868 case I40E_VIRTCHNL_OP_GET_STATS:
1869 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1870 break;
1871 /* These are always errors coming from the VF. */
1872 case I40E_VIRTCHNL_OP_EVENT:
1873 case I40E_VIRTCHNL_OP_UNKNOWN:
1874 default:
1875 return -EPERM;
1876 break;
1877 }
1878 /* few more checks */
1879 if ((valid_len != msglen) || (err_msg_format)) {
1880 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1881 return -EINVAL;
1882 } else {
1883 return 0;
1884 }
1885}
1886
1887/**
1888 * i40e_vc_process_vf_msg
1889 * @pf: pointer to the pf structure
1890 * @vf_id: source vf id
1891 * @msg: pointer to the msg buffer
1892 * @msglen: msg length
1893 * @msghndl: msg handle
1894 *
1895 * called from the common aeq/arq handler to
1896 * process request from vf
1897 **/
1898int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1899 u32 v_retval, u8 *msg, u16 msglen)
1900{
1901 struct i40e_vf *vf = &(pf->vf[vf_id]);
1902 struct i40e_hw *hw = &pf->hw;
1903 int ret;
1904
1905 pf->vf_aq_requests++;
1906 /* perform basic checks on the msg */
1907 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1908
1909 if (ret) {
1910 dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
1911 return ret;
1912 }
1913 wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
1914 switch (v_opcode) {
1915 case I40E_VIRTCHNL_OP_VERSION:
1916 ret = i40e_vc_get_version_msg(vf);
1917 break;
1918 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1919 ret = i40e_vc_get_vf_resources_msg(vf);
1920 break;
1921 case I40E_VIRTCHNL_OP_RESET_VF:
1922 ret = i40e_vc_reset_vf_msg(vf);
1923 break;
1924 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1925 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1926 break;
1927 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1928 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1929 break;
1930 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1931 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1932 break;
1933 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1934 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1935 break;
1936 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1937 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1938 break;
1939 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1940 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1941 break;
1942 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1943 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1944 break;
1945 case I40E_VIRTCHNL_OP_ADD_VLAN:
1946 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1947 break;
1948 case I40E_VIRTCHNL_OP_DEL_VLAN:
1949 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1950 break;
1951 case I40E_VIRTCHNL_OP_GET_STATS:
1952 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1953 break;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001954 case I40E_VIRTCHNL_OP_UNKNOWN:
1955 default:
1956 dev_err(&pf->pdev->dev,
1957 "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
1958 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1959 I40E_ERR_NOT_IMPLEMENTED);
1960 break;
1961 }
1962
1963 return ret;
1964}
1965
1966/**
1967 * i40e_vc_process_vflr_event
1968 * @pf: pointer to the pf structure
1969 *
1970 * called from the vlfr irq handler to
1971 * free up vf resources and state variables
1972 **/
1973int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1974{
1975 u32 reg, reg_idx, bit_idx, vf_id;
1976 struct i40e_hw *hw = &pf->hw;
1977 struct i40e_vf *vf;
1978
1979 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1980 return 0;
1981
1982 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1983 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1984 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1985 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1986 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1987 vf = &pf->vf[vf_id];
1988 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1989 if (reg & (1 << bit_idx)) {
1990 /* clear the bit in GLGEN_VFLRSTAT */
1991 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1992
1993 if (i40e_reset_vf(vf, true))
1994 dev_err(&pf->pdev->dev,
1995 "Unable to reset the VF %d\n", vf_id);
1996 /* free up vf resources to destroy vsi state */
1997 i40e_free_vf_res(vf);
1998
1999 /* allocate new vf resources with the default state */
2000 if (i40e_alloc_vf_res(vf))
2001 dev_err(&pf->pdev->dev,
2002 "Unable to allocate VF resources %d\n",
2003 vf_id);
2004
2005 i40e_enable_vf_mappings(vf);
2006 }
2007 }
2008
2009 /* re-enable vflr interrupt cause */
2010 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2011 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2012 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2013 i40e_flush(hw);
2014
2015 return 0;
2016}
2017
2018/**
2019 * i40e_vc_vf_broadcast
2020 * @pf: pointer to the pf structure
2021 * @opcode: operation code
2022 * @retval: return value
2023 * @msg: pointer to the msg buffer
2024 * @msglen: msg length
2025 *
2026 * send a message to all VFs on a given PF
2027 **/
2028static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
2029 enum i40e_virtchnl_ops v_opcode,
2030 i40e_status v_retval, u8 *msg,
2031 u16 msglen)
2032{
2033 struct i40e_hw *hw = &pf->hw;
2034 struct i40e_vf *vf = pf->vf;
2035 int i;
2036
2037 for (i = 0; i < pf->num_alloc_vfs; i++) {
2038 /* Ignore return value on purpose - a given VF may fail, but
2039 * we need to keep going and send to all of them
2040 */
2041 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
2042 msg, msglen, NULL);
2043 vf++;
2044 }
2045}
2046
2047/**
2048 * i40e_vc_notify_link_state
2049 * @pf: pointer to the pf structure
2050 *
2051 * send a link status message to all VFs on a given PF
2052 **/
2053void i40e_vc_notify_link_state(struct i40e_pf *pf)
2054{
2055 struct i40e_virtchnl_pf_event pfe;
2056
2057 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2058 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2059 pfe.event_data.link_event.link_status =
2060 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2061 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
2062
2063 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2064 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2065}
2066
2067/**
2068 * i40e_vc_notify_reset
2069 * @pf: pointer to the pf structure
2070 *
2071 * indicate a pending reset to all VFs on a given PF
2072 **/
2073void i40e_vc_notify_reset(struct i40e_pf *pf)
2074{
2075 struct i40e_virtchnl_pf_event pfe;
2076
2077 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2078 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2079 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2080 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2081}
2082
2083/**
2084 * i40e_vc_notify_vf_reset
2085 * @vf: pointer to the vf structure
2086 *
2087 * indicate a pending reset to the given VF
2088 **/
2089void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2090{
2091 struct i40e_virtchnl_pf_event pfe;
2092
2093 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2094 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2095 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2096 I40E_SUCCESS, (u8 *)&pfe,
2097 sizeof(struct i40e_virtchnl_pf_event), NULL);
2098}
2099
2100/**
2101 * i40e_ndo_set_vf_mac
2102 * @netdev: network interface device structure
2103 * @vf_id: vf identifier
2104 * @mac: mac address
2105 *
2106 * program vf mac address
2107 **/
2108int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2109{
2110 struct i40e_netdev_priv *np = netdev_priv(netdev);
2111 struct i40e_vsi *vsi = np->vsi;
2112 struct i40e_pf *pf = vsi->back;
2113 struct i40e_mac_filter *f;
2114 struct i40e_vf *vf;
2115 int ret = 0;
2116
2117 /* validate the request */
2118 if (vf_id >= pf->num_alloc_vfs) {
2119 dev_err(&pf->pdev->dev,
2120 "Invalid VF Identifier %d\n", vf_id);
2121 ret = -EINVAL;
2122 goto error_param;
2123 }
2124
2125 vf = &(pf->vf[vf_id]);
2126 vsi = pf->vsi[vf->lan_vsi_index];
2127 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2128 dev_err(&pf->pdev->dev,
2129 "Uninitialized VF %d\n", vf_id);
2130 ret = -EINVAL;
2131 goto error_param;
2132 }
2133
2134 if (!is_valid_ether_addr(mac)) {
2135 dev_err(&pf->pdev->dev,
2136 "Invalid VF ethernet address\n");
2137 ret = -EINVAL;
2138 goto error_param;
2139 }
2140
2141 /* delete the temporary mac address */
2142 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
2143
2144 /* add the new mac address */
2145 f = i40e_add_filter(vsi, mac, 0, true, false);
2146 if (!f) {
2147 dev_err(&pf->pdev->dev,
2148 "Unable to add VF ucast filter\n");
2149 ret = -ENOMEM;
2150 goto error_param;
2151 }
2152
2153 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2154 /* program mac filter */
2155 if (i40e_sync_vsi_filters(vsi)) {
2156 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2157 ret = -EIO;
2158 goto error_param;
2159 }
2160 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
2161 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2162 ret = 0;
2163
2164error_param:
2165 return ret;
2166}
2167
2168/**
2169 * i40e_ndo_set_vf_port_vlan
2170 * @netdev: network interface device structure
2171 * @vf_id: vf identifier
2172 * @vlan_id: mac address
2173 * @qos: priority setting
2174 *
2175 * program vf vlan id and/or qos
2176 **/
2177int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2178 int vf_id, u16 vlan_id, u8 qos)
2179{
2180 struct i40e_netdev_priv *np = netdev_priv(netdev);
2181 struct i40e_pf *pf = np->vsi->back;
2182 struct i40e_vsi *vsi;
2183 struct i40e_vf *vf;
2184 int ret = 0;
2185
2186 /* validate the request */
2187 if (vf_id >= pf->num_alloc_vfs) {
2188 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2189 ret = -EINVAL;
2190 goto error_pvid;
2191 }
2192
2193 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2194 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2195 ret = -EINVAL;
2196 goto error_pvid;
2197 }
2198
2199 vf = &(pf->vf[vf_id]);
2200 vsi = pf->vsi[vf->lan_vsi_index];
2201 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2202 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2203 ret = -EINVAL;
2204 goto error_pvid;
2205 }
2206
2207 if (vsi->info.pvid) {
2208 /* kill old VLAN */
2209 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2210 VLAN_VID_MASK));
2211 if (ret) {
2212 dev_info(&vsi->back->pdev->dev,
2213 "remove VLAN failed, ret=%d, aq_err=%d\n",
2214 ret, pf->hw.aq.asq_last_status);
2215 }
2216 }
2217 if (vlan_id || qos)
2218 ret = i40e_vsi_add_pvid(vsi,
2219 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2220 else
2221 i40e_vlan_stripping_disable(vsi);
2222
2223 if (vlan_id) {
2224 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2225 vlan_id, qos, vf_id);
2226
2227 /* add new VLAN filter */
2228 ret = i40e_vsi_add_vlan(vsi, vlan_id);
2229 if (ret) {
2230 dev_info(&vsi->back->pdev->dev,
2231 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2232 vsi->back->hw.aq.asq_last_status);
2233 goto error_pvid;
2234 }
2235 }
2236
2237 if (ret) {
2238 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2239 goto error_pvid;
2240 }
2241 ret = 0;
2242
2243error_pvid:
2244 return ret;
2245}
2246
2247/**
2248 * i40e_ndo_set_vf_bw
2249 * @netdev: network interface device structure
2250 * @vf_id: vf identifier
2251 * @tx_rate: tx rate
2252 *
2253 * configure vf tx rate
2254 **/
2255int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
2256{
2257 return -EOPNOTSUPP;
2258}
2259
2260/**
2261 * i40e_ndo_get_vf_config
2262 * @netdev: network interface device structure
2263 * @vf_id: vf identifier
2264 * @ivi: vf configuration structure
2265 *
2266 * return vf configuration
2267 **/
2268int i40e_ndo_get_vf_config(struct net_device *netdev,
2269 int vf_id, struct ifla_vf_info *ivi)
2270{
2271 struct i40e_netdev_priv *np = netdev_priv(netdev);
2272 struct i40e_mac_filter *f, *ftmp;
2273 struct i40e_vsi *vsi = np->vsi;
2274 struct i40e_pf *pf = vsi->back;
2275 struct i40e_vf *vf;
2276 int ret = 0;
2277
2278 /* validate the request */
2279 if (vf_id >= pf->num_alloc_vfs) {
2280 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2281 ret = -EINVAL;
2282 goto error_param;
2283 }
2284
2285 vf = &(pf->vf[vf_id]);
2286 /* first vsi is always the LAN vsi */
2287 vsi = pf->vsi[vf->lan_vsi_index];
2288 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2289 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2290 ret = -EINVAL;
2291 goto error_param;
2292 }
2293
2294 ivi->vf = vf_id;
2295
2296 /* first entry of the list is the default ethernet address */
2297 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2298 memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
2299 break;
2300 }
2301
2302 ivi->tx_rate = 0;
2303 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2304 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2305 I40E_VLAN_PRIORITY_SHIFT;
2306 ret = 0;
2307
2308error_param:
2309 return ret;
2310}