blob: ed25fcbb09049ceecc937132897b7a528666fbab [file] [log] [blame]
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e.h"
29
30/***********************misc routines*****************************/
31
32/**
33 * i40e_vc_isvalid_vsi_id
34 * @vf: pointer to the vf info
35 * @vsi_id: vf relative vsi id
36 *
37 * check for the valid vsi id
38 **/
39static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
40{
41 struct i40e_pf *pf = vf->pf;
42
43 return pf->vsi[vsi_id]->vf_id == vf->vf_id;
44}
45
46/**
47 * i40e_vc_isvalid_queue_id
48 * @vf: pointer to the vf info
49 * @vsi_id: vsi id
50 * @qid: vsi relative queue id
51 *
52 * check for the valid queue id
53 **/
54static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
55 u8 qid)
56{
57 struct i40e_pf *pf = vf->pf;
58
59 return qid < pf->vsi[vsi_id]->num_queue_pairs;
60}
61
62/**
63 * i40e_vc_isvalid_vector_id
64 * @vf: pointer to the vf info
65 * @vector_id: vf relative vector id
66 *
67 * check for the valid vector id
68 **/
69static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
70{
71 struct i40e_pf *pf = vf->pf;
72
Mitch Williams54692b42013-11-16 10:00:38 +000073 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +000074}
75
76/***********************vf resource mgmt routines*****************/
77
78/**
79 * i40e_vc_get_pf_queue_id
80 * @vf: pointer to the vf info
81 * @vsi_idx: index of VSI in PF struct
82 * @vsi_queue_id: vsi relative queue id
83 *
84 * return pf relative queue id
85 **/
86static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
87 u8 vsi_queue_id)
88{
89 struct i40e_pf *pf = vf->pf;
90 struct i40e_vsi *vsi = pf->vsi[vsi_idx];
91 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
92
93 if (le16_to_cpu(vsi->info.mapping_flags) &
94 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
95 pf_queue_id =
96 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
97 else
98 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
99 vsi_queue_id;
100
101 return pf_queue_id;
102}
103
104/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000105 * i40e_config_irq_link_list
106 * @vf: pointer to the vf info
107 * @vsi_idx: index of VSI in PF struct
108 * @vecmap: irq map info
109 *
110 * configure irq link list from the map
111 **/
112static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
113 struct i40e_virtchnl_vector_map *vecmap)
114{
115 unsigned long linklistmap = 0, tempmap;
116 struct i40e_pf *pf = vf->pf;
117 struct i40e_hw *hw = &pf->hw;
118 u16 vsi_queue_id, pf_queue_id;
119 enum i40e_queue_type qtype;
120 u16 next_q, vector_id;
121 u32 reg, reg_idx;
122 u16 itr_idx = 0;
123
124 vector_id = vecmap->vector_id;
125 /* setup the head */
126 if (0 == vector_id)
127 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
128 else
129 reg_idx = I40E_VPINT_LNKLSTN(
Mitch Williams13c60b92013-09-28 07:13:18 +0000130 (pf->hw.func_caps.num_msix_vectors_vf
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000131 * vf->vf_id) + (vector_id - 1));
132
133 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
134 /* Special case - No queues mapped on this vector */
135 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
136 goto irq_list_done;
137 }
138 tempmap = vecmap->rxq_map;
Wei Yongjun48366502013-09-24 05:17:36 +0000139 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000140 linklistmap |= (1 <<
141 (I40E_VIRTCHNL_SUPPORTED_QTYPES *
142 vsi_queue_id));
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000143 }
144
145 tempmap = vecmap->txq_map;
Wei Yongjun48366502013-09-24 05:17:36 +0000146 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000147 linklistmap |= (1 <<
148 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
149 + 1));
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000150 }
151
152 next_q = find_first_bit(&linklistmap,
153 (I40E_MAX_VSI_QP *
154 I40E_VIRTCHNL_SUPPORTED_QTYPES));
155 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
156 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
157 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
158 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
159
160 wr32(hw, reg_idx, reg);
161
162 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
163 switch (qtype) {
164 case I40E_QUEUE_TYPE_RX:
165 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
166 itr_idx = vecmap->rxitr_idx;
167 break;
168 case I40E_QUEUE_TYPE_TX:
169 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
170 itr_idx = vecmap->txitr_idx;
171 break;
172 default:
173 break;
174 }
175
176 next_q = find_next_bit(&linklistmap,
177 (I40E_MAX_VSI_QP *
178 I40E_VIRTCHNL_SUPPORTED_QTYPES),
179 next_q + 1);
180 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
181 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
182 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
183 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
184 vsi_queue_id);
185 } else {
186 pf_queue_id = I40E_QUEUE_END_OF_LIST;
187 qtype = 0;
188 }
189
190 /* format for the RQCTL & TQCTL regs is same */
191 reg = (vector_id) |
192 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
193 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
194 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
195 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
196 wr32(hw, reg_idx, reg);
197 }
198
199irq_list_done:
200 i40e_flush(hw);
201}
202
203/**
204 * i40e_config_vsi_tx_queue
205 * @vf: pointer to the vf info
206 * @vsi_idx: index of VSI in PF struct
207 * @vsi_queue_id: vsi relative queue index
208 * @info: config. info
209 *
210 * configure tx queue
211 **/
212static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
213 u16 vsi_queue_id,
214 struct i40e_virtchnl_txq_info *info)
215{
216 struct i40e_pf *pf = vf->pf;
217 struct i40e_hw *hw = &pf->hw;
218 struct i40e_hmc_obj_txq tx_ctx;
219 u16 pf_queue_id;
220 u32 qtx_ctl;
221 int ret = 0;
222
223 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
224
225 /* clear the context structure first */
226 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
227
228 /* only set the required fields */
229 tx_ctx.base = info->dma_ring_addr / 128;
230 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0;
233
234 /* clear the context in the HMC */
235 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
236 if (ret) {
237 dev_err(&pf->pdev->dev,
238 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
239 pf_queue_id, ret);
240 ret = -ENOENT;
241 goto error_context;
242 }
243
244 /* set the context in the HMC */
245 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
246 if (ret) {
247 dev_err(&pf->pdev->dev,
248 "Failed to set VF LAN Tx queue context %d error: %d\n",
249 pf_queue_id, ret);
250 ret = -ENOENT;
251 goto error_context;
252 }
253
254 /* associate this queue with the PCI VF function */
255 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
Shannon Nelson13fd9772013-09-28 07:14:19 +0000256 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000257 & I40E_QTX_CTL_PF_INDX_MASK);
258 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
259 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
260 & I40E_QTX_CTL_VFVM_INDX_MASK);
261 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
262 i40e_flush(hw);
263
264error_context:
265 return ret;
266}
267
268/**
269 * i40e_config_vsi_rx_queue
270 * @vf: pointer to the vf info
271 * @vsi_idx: index of VSI in PF struct
272 * @vsi_queue_id: vsi relative queue index
273 * @info: config. info
274 *
275 * configure rx queue
276 **/
277static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
278 u16 vsi_queue_id,
279 struct i40e_virtchnl_rxq_info *info)
280{
281 struct i40e_pf *pf = vf->pf;
282 struct i40e_hw *hw = &pf->hw;
283 struct i40e_hmc_obj_rxq rx_ctx;
284 u16 pf_queue_id;
285 int ret = 0;
286
287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
288
289 /* clear the context structure first */
290 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
291
292 /* only set the required fields */
293 rx_ctx.base = info->dma_ring_addr / 128;
294 rx_ctx.qlen = info->ring_len;
295
296 if (info->splithdr_enabled) {
297 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
298 I40E_RX_SPLIT_IP |
299 I40E_RX_SPLIT_TCP_UDP |
300 I40E_RX_SPLIT_SCTP;
301 /* header length validation */
302 if (info->hdr_size > ((2 * 1024) - 64)) {
303 ret = -EINVAL;
304 goto error_param;
305 }
306 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
307
308 /* set splitalways mode 10b */
309 rx_ctx.dtype = 0x2;
310 }
311
312 /* databuffer length validation */
313 if (info->databuffer_size > ((16 * 1024) - 128)) {
314 ret = -EINVAL;
315 goto error_param;
316 }
317 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
318
319 /* max pkt. length validation */
320 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
321 ret = -EINVAL;
322 goto error_param;
323 }
324 rx_ctx.rxmax = info->max_pkt_size;
325
326 /* enable 32bytes desc always */
327 rx_ctx.dsize = 1;
328
329 /* default values */
330 rx_ctx.tphrdesc_ena = 1;
331 rx_ctx.tphwdesc_ena = 1;
332 rx_ctx.tphdata_ena = 1;
333 rx_ctx.tphhead_ena = 1;
334 rx_ctx.lrxqthresh = 2;
335 rx_ctx.crcstrip = 1;
336
337 /* clear the context in the HMC */
338 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
339 if (ret) {
340 dev_err(&pf->pdev->dev,
341 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
342 pf_queue_id, ret);
343 ret = -ENOENT;
344 goto error_param;
345 }
346
347 /* set the context in the HMC */
348 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
349 if (ret) {
350 dev_err(&pf->pdev->dev,
351 "Failed to set VF LAN Rx queue context %d error: %d\n",
352 pf_queue_id, ret);
353 ret = -ENOENT;
354 goto error_param;
355 }
356
357error_param:
358 return ret;
359}
360
361/**
362 * i40e_alloc_vsi_res
363 * @vf: pointer to the vf info
364 * @type: type of VSI to allocate
365 *
366 * alloc vf vsi context & resources
367 **/
368static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
369{
370 struct i40e_mac_filter *f = NULL;
371 struct i40e_pf *pf = vf->pf;
372 struct i40e_hw *hw = &pf->hw;
373 struct i40e_vsi *vsi;
374 int ret = 0;
375
376 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
377
378 if (!vsi) {
379 dev_err(&pf->pdev->dev,
380 "add vsi failed for vf %d, aq_err %d\n",
381 vf->vf_id, pf->hw.aq.asq_last_status);
382 ret = -ENOENT;
383 goto error_alloc_vsi_res;
384 }
385 if (type == I40E_VSI_SRIOV) {
386 vf->lan_vsi_index = vsi->idx;
387 vf->lan_vsi_id = vsi->id;
388 dev_info(&pf->pdev->dev,
389 "LAN VSI index %d, VSI id %d\n",
390 vsi->idx, vsi->id);
391 f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
392 0, true, false);
393 }
Neerav Parikh6dbbbfb2013-11-26 10:49:24 +0000394
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000395 if (!f) {
396 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
397 ret = -ENOMEM;
398 goto error_alloc_vsi_res;
399 }
400
401 /* program mac filter */
402 ret = i40e_sync_vsi_filters(vsi);
403 if (ret) {
404 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
405 goto error_alloc_vsi_res;
406 }
407
408 /* accept bcast pkts. by default */
409 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
410 if (ret) {
411 dev_err(&pf->pdev->dev,
412 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
413 vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
414 ret = -EINVAL;
415 }
416
417error_alloc_vsi_res:
418 return ret;
419}
420
421/**
Mitch Williams805bd5b2013-11-28 06:39:26 +0000422 * i40e_enable_vf_mappings
423 * @vf: pointer to the vf info
424 *
425 * enable vf mappings
426 **/
427static void i40e_enable_vf_mappings(struct i40e_vf *vf)
428{
429 struct i40e_pf *pf = vf->pf;
430 struct i40e_hw *hw = &pf->hw;
431 u32 reg, total_queue_pairs = 0;
432 int j;
433
434 /* Tell the hardware we're using noncontiguous mapping. HW requires
435 * that VF queues be mapped using this method, even when they are
436 * contiguous in real life
437 */
438 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
439 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
440
441 /* enable VF vplan_qtable mappings */
442 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
443 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
444
445 /* map PF queues to VF queues */
446 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
447 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
448 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
449 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
450 total_queue_pairs++;
451 }
452
453 /* map PF queues to VSI */
454 for (j = 0; j < 7; j++) {
455 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
456 reg = 0x07FF07FF; /* unused */
457 } else {
458 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
459 j * 2);
460 reg = qid;
461 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
462 (j * 2) + 1);
463 reg |= qid << 16;
464 }
465 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
466 }
467
468 i40e_flush(hw);
469}
470
471/**
472 * i40e_disable_vf_mappings
473 * @vf: pointer to the vf info
474 *
475 * disable vf mappings
476 **/
477static void i40e_disable_vf_mappings(struct i40e_vf *vf)
478{
479 struct i40e_pf *pf = vf->pf;
480 struct i40e_hw *hw = &pf->hw;
481 int i;
482
483 /* disable qp mappings */
484 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
485 for (i = 0; i < I40E_MAX_VSI_QP; i++)
486 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
487 I40E_QUEUE_END_OF_LIST);
488 i40e_flush(hw);
489}
490
491/**
492 * i40e_free_vf_res
493 * @vf: pointer to the vf info
494 *
495 * free vf resources
496 **/
497static void i40e_free_vf_res(struct i40e_vf *vf)
498{
499 struct i40e_pf *pf = vf->pf;
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000500 struct i40e_hw *hw = &pf->hw;
501 u32 reg_idx, reg;
502 int i, msix_vf;
Mitch Williams805bd5b2013-11-28 06:39:26 +0000503
504 /* free vsi & disconnect it from the parent uplink */
505 if (vf->lan_vsi_index) {
506 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
507 vf->lan_vsi_index = 0;
508 vf->lan_vsi_id = 0;
509 }
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000510 msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
511 /* disable interrupts so the VF starts in a known state */
512 for (i = 0; i < msix_vf; i++) {
513 /* format is same for both registers */
514 if (0 == i)
515 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
516 else
517 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
518 (vf->vf_id))
519 + (i - 1));
520 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
521 i40e_flush(hw);
522 }
Mitch Williams805bd5b2013-11-28 06:39:26 +0000523
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000524 /* clear the irq settings */
525 for (i = 0; i < msix_vf; i++) {
526 /* format is same for both registers */
527 if (0 == i)
528 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
529 else
530 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
531 (vf->vf_id))
532 + (i - 1));
533 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
534 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
535 wr32(hw, reg_idx, reg);
536 i40e_flush(hw);
537 }
Mitch Williams805bd5b2013-11-28 06:39:26 +0000538 /* reset some of the state varibles keeping
539 * track of the resources
540 */
541 vf->num_queue_pairs = 0;
542 vf->vf_states = 0;
543}
544
545/**
546 * i40e_alloc_vf_res
547 * @vf: pointer to the vf info
548 *
549 * allocate vf resources
550 **/
551static int i40e_alloc_vf_res(struct i40e_vf *vf)
552{
553 struct i40e_pf *pf = vf->pf;
554 int total_queue_pairs = 0;
555 int ret;
556
557 /* allocate hw vsi context & associated resources */
558 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
559 if (ret)
560 goto error_alloc;
561 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
562 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
563
564 /* store the total qps number for the runtime
565 * vf req validation
566 */
567 vf->num_queue_pairs = total_queue_pairs;
568
569 /* vf is now completely initialized */
570 set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
571
572error_alloc:
573 if (ret)
574 i40e_free_vf_res(vf);
575
576 return ret;
577}
578
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000579#define VF_DEVICE_STATUS 0xAA
580#define VF_TRANS_PENDING_MASK 0x20
581/**
582 * i40e_quiesce_vf_pci
583 * @vf: pointer to the vf structure
584 *
585 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
586 * if the transactions never clear.
587 **/
588static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
589{
590 struct i40e_pf *pf = vf->pf;
591 struct i40e_hw *hw = &pf->hw;
592 int vf_abs_id, i;
593 u32 reg;
594
595 reg = rd32(hw, I40E_PF_VT_PFALLOC);
596 vf_abs_id = vf->vf_id + (reg & I40E_PF_VT_PFALLOC_FIRSTVF_MASK);
597
598 wr32(hw, I40E_PF_PCI_CIAA,
599 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
600 for (i = 0; i < 100; i++) {
601 reg = rd32(hw, I40E_PF_PCI_CIAD);
602 if ((reg & VF_TRANS_PENDING_MASK) == 0)
603 return 0;
604 udelay(1);
605 }
606 return -EIO;
607}
608
Mitch Williams805bd5b2013-11-28 06:39:26 +0000609/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000610 * i40e_reset_vf
611 * @vf: pointer to the vf structure
612 * @flr: VFLR was issued or not
613 *
614 * reset the vf
615 **/
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000616void i40e_reset_vf(struct i40e_vf *vf, bool flr)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000617{
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000618 struct i40e_pf *pf = vf->pf;
619 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000620 bool rsd = false;
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000621 int i;
622 u32 reg;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000623
624 /* warn the VF */
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000625 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
626
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000627 /* In the case of a VFLR, the HW has already reset the VF and we
628 * just need to clean up, so don't hit the VFRTRIG register.
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000629 */
630 if (!flr) {
631 /* reset vf using VPGEN_VFRTRIG reg */
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000632 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
633 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000634 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
635 i40e_flush(hw);
636 }
637
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000638 if (i40e_quiesce_vf_pci(vf))
639 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
640 vf->vf_id);
641
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000642 /* poll VPGEN_VFRSTAT reg to make sure
643 * that reset is complete
644 */
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000645 for (i = 0; i < 100; i++) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000646 /* vf reset requires driver to first reset the
647 * vf & than poll the status register to make sure
648 * that the requested op was completed
649 * successfully
650 */
651 udelay(10);
652 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
653 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
654 rsd = true;
655 break;
656 }
657 }
658
659 if (!rsd)
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000660 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000661 vf->vf_id);
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000662 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000663 /* clear the reset bit in the VPGEN_VFRTRIG reg */
664 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
665 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
666 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000667
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000668 /* On initial reset, we won't have any queues */
669 if (vf->lan_vsi_index == 0)
670 goto complete_reset;
671
672 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
673complete_reset:
674 /* reallocate vf resources to reset the VSI state */
675 i40e_free_vf_res(vf);
676 mdelay(10);
677 i40e_alloc_vf_res(vf);
678 i40e_enable_vf_mappings(vf);
679
680 /* tell the VF the reset is done */
681 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
682 i40e_flush(hw);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000683}
684
685/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000686 * i40e_vfs_are_assigned
687 * @pf: pointer to the pf structure
688 *
689 * Determine if any VFs are assigned to VMs
690 **/
691static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
692{
693 struct pci_dev *pdev = pf->pdev;
694 struct pci_dev *vfdev;
695
696 /* loop through all the VFs to see if we own any that are assigned */
697 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
698 while (vfdev) {
699 /* if we don't own it we don't care */
700 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
701 /* if it is assigned we cannot release it */
702 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
703 return true;
704 }
705
706 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
707 I40E_VF_DEVICE_ID,
708 vfdev);
709 }
710
711 return false;
712}
713
714/**
715 * i40e_free_vfs
716 * @pf: pointer to the pf structure
717 *
718 * free vf resources
719 **/
720void i40e_free_vfs(struct i40e_pf *pf)
721{
722 struct i40e_hw *hw = &pf->hw;
Mitch Williams6c1b5bf2013-11-28 06:39:30 +0000723 int i, tmp;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000724
725 if (!pf->vf)
726 return;
727
728 /* Disable interrupt 0 so we don't try to handle the VFLR. */
729 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
730 i40e_flush(hw);
Mitch Williams6c1b5bf2013-11-28 06:39:30 +0000731 mdelay(10); /* let any messages in transit get finished up */
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000732 /* free up vf resources */
Mitch Williams6c1b5bf2013-11-28 06:39:30 +0000733 tmp = pf->num_alloc_vfs;
734 pf->num_alloc_vfs = 0;
735 for (i = 0; i < tmp; i++) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000736 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
737 i40e_free_vf_res(&pf->vf[i]);
738 /* disable qp mappings */
739 i40e_disable_vf_mappings(&pf->vf[i]);
740 }
741
742 kfree(pf->vf);
743 pf->vf = NULL;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000744
745 if (!i40e_vfs_are_assigned(pf))
746 pci_disable_sriov(pf->pdev);
747 else
748 dev_warn(&pf->pdev->dev,
749 "unable to disable SR-IOV because VFs are assigned.\n");
750
751 /* Re-enable interrupt 0. */
752 wr32(hw, I40E_PFINT_DYN_CTL0,
753 I40E_PFINT_DYN_CTL0_INTENA_MASK |
754 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
755 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
756 i40e_flush(hw);
757}
758
759#ifdef CONFIG_PCI_IOV
760/**
761 * i40e_alloc_vfs
762 * @pf: pointer to the pf structure
763 * @num_alloc_vfs: number of vfs to allocate
764 *
765 * allocate vf resources
766 **/
767static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
768{
Mitch Williams6c1b5bf2013-11-28 06:39:30 +0000769 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000770 struct i40e_vf *vfs;
771 int i, ret = 0;
772
Mitch Williams6c1b5bf2013-11-28 06:39:30 +0000773 /* Disable interrupt 0 so we don't try to handle the VFLR. */
774 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
775 i40e_flush(hw);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000776 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
777 if (ret) {
778 dev_err(&pf->pdev->dev,
779 "pci_enable_sriov failed with error %d!\n", ret);
780 pf->num_alloc_vfs = 0;
781 goto err_iov;
782 }
783
784 /* allocate memory */
785 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
786 if (!vfs) {
787 ret = -ENOMEM;
788 goto err_alloc;
789 }
790
791 /* apply default profile */
792 for (i = 0; i < num_alloc_vfs; i++) {
793 vfs[i].pf = pf;
794 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
795 vfs[i].vf_id = i;
796
797 /* assign default capabilities */
798 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000799 /* vf resources get allocated during reset */
800 i40e_reset_vf(&vfs[i], false);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000801
802 /* enable vf vplan_qtable mappings */
803 i40e_enable_vf_mappings(&vfs[i]);
804 }
805 pf->vf = vfs;
806 pf->num_alloc_vfs = num_alloc_vfs;
807
808err_alloc:
809 if (ret)
810 i40e_free_vfs(pf);
811err_iov:
Mitch Williams6c1b5bf2013-11-28 06:39:30 +0000812 /* Re-enable interrupt 0. */
813 wr32(hw, I40E_PFINT_DYN_CTL0,
814 I40E_PFINT_DYN_CTL0_INTENA_MASK |
815 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
816 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000817 return ret;
818}
819
820#endif
821/**
822 * i40e_pci_sriov_enable
823 * @pdev: pointer to a pci_dev structure
824 * @num_vfs: number of vfs to allocate
825 *
826 * Enable or change the number of VFs
827 **/
828static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
829{
830#ifdef CONFIG_PCI_IOV
831 struct i40e_pf *pf = pci_get_drvdata(pdev);
832 int pre_existing_vfs = pci_num_vf(pdev);
833 int err = 0;
834
835 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
836 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
837 i40e_free_vfs(pf);
838 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
839 goto out;
840
841 if (num_vfs > pf->num_req_vfs) {
842 err = -EPERM;
843 goto err_out;
844 }
845
846 err = i40e_alloc_vfs(pf, num_vfs);
847 if (err) {
848 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
849 goto err_out;
850 }
851
852out:
853 return num_vfs;
854
855err_out:
856 return err;
857#endif
858 return 0;
859}
860
861/**
862 * i40e_pci_sriov_configure
863 * @pdev: pointer to a pci_dev structure
864 * @num_vfs: number of vfs to allocate
865 *
866 * Enable or change the number of VFs. Called when the user updates the number
867 * of VFs in sysfs.
868 **/
869int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
870{
871 struct i40e_pf *pf = pci_get_drvdata(pdev);
872
873 if (num_vfs)
874 return i40e_pci_sriov_enable(pdev, num_vfs);
875
876 i40e_free_vfs(pf);
877 return 0;
878}
879
880/***********************virtual channel routines******************/
881
882/**
883 * i40e_vc_send_msg_to_vf
884 * @vf: pointer to the vf info
885 * @v_opcode: virtual channel opcode
886 * @v_retval: virtual channel return value
887 * @msg: pointer to the msg buffer
888 * @msglen: msg length
889 *
890 * send msg to vf
891 **/
892static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
893 u32 v_retval, u8 *msg, u16 msglen)
894{
895 struct i40e_pf *pf = vf->pf;
896 struct i40e_hw *hw = &pf->hw;
897 i40e_status aq_ret;
898
899 /* single place to detect unsuccessful return values */
900 if (v_retval) {
901 vf->num_invalid_msgs++;
902 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
903 v_opcode, v_retval);
904 if (vf->num_invalid_msgs >
905 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
906 dev_err(&pf->pdev->dev,
907 "Number of invalid messages exceeded for VF %d\n",
908 vf->vf_id);
909 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
910 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
911 }
912 } else {
913 vf->num_valid_msgs++;
914 }
915
916 aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
917 msg, msglen, NULL);
918 if (aq_ret) {
919 dev_err(&pf->pdev->dev,
920 "Unable to send the message to VF %d aq_err %d\n",
921 vf->vf_id, pf->hw.aq.asq_last_status);
922 return -EIO;
923 }
924
925 return 0;
926}
927
928/**
929 * i40e_vc_send_resp_to_vf
930 * @vf: pointer to the vf info
931 * @opcode: operation code
932 * @retval: return value
933 *
934 * send resp msg to vf
935 **/
936static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
937 enum i40e_virtchnl_ops opcode,
938 i40e_status retval)
939{
940 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
941}
942
943/**
944 * i40e_vc_get_version_msg
945 * @vf: pointer to the vf info
946 *
947 * called from the vf to request the API version used by the PF
948 **/
949static int i40e_vc_get_version_msg(struct i40e_vf *vf)
950{
951 struct i40e_virtchnl_version_info info = {
952 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
953 };
954
955 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
956 I40E_SUCCESS, (u8 *)&info,
957 sizeof(struct
958 i40e_virtchnl_version_info));
959}
960
961/**
962 * i40e_vc_get_vf_resources_msg
963 * @vf: pointer to the vf info
964 * @msg: pointer to the msg buffer
965 * @msglen: msg length
966 *
967 * called from the vf to request its resources
968 **/
969static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
970{
971 struct i40e_virtchnl_vf_resource *vfres = NULL;
972 struct i40e_pf *pf = vf->pf;
973 i40e_status aq_ret = 0;
974 struct i40e_vsi *vsi;
975 int i = 0, len = 0;
976 int num_vsis = 1;
977 int ret;
978
979 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
980 aq_ret = I40E_ERR_PARAM;
981 goto err;
982 }
983
984 len = (sizeof(struct i40e_virtchnl_vf_resource) +
985 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
986
987 vfres = kzalloc(len, GFP_KERNEL);
988 if (!vfres) {
989 aq_ret = I40E_ERR_NO_MEMORY;
990 len = 0;
991 goto err;
992 }
993
994 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
995 vsi = pf->vsi[vf->lan_vsi_index];
996 if (!vsi->info.pvid)
997 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
998
999 vfres->num_vsis = num_vsis;
1000 vfres->num_queue_pairs = vf->num_queue_pairs;
1001 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1002 if (vf->lan_vsi_index) {
1003 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1004 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1005 vfres->vsi_res[i].num_queue_pairs =
1006 pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1007 memcpy(vfres->vsi_res[i].default_mac_addr,
1008 vf->default_lan_addr.addr, ETH_ALEN);
1009 i++;
1010 }
1011 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1012
1013err:
1014 /* send the response back to the vf */
1015 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1016 aq_ret, (u8 *)vfres, len);
1017
1018 kfree(vfres);
1019 return ret;
1020}
1021
1022/**
1023 * i40e_vc_reset_vf_msg
1024 * @vf: pointer to the vf info
1025 * @msg: pointer to the msg buffer
1026 * @msglen: msg length
1027 *
1028 * called from the vf to reset itself,
1029 * unlike other virtchnl messages, pf driver
1030 * doesn't send the response back to the vf
1031 **/
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001032static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001033{
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001034 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1035 i40e_reset_vf(vf, false);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001036}
1037
1038/**
1039 * i40e_vc_config_promiscuous_mode_msg
1040 * @vf: pointer to the vf info
1041 * @msg: pointer to the msg buffer
1042 * @msglen: msg length
1043 *
1044 * called from the vf to configure the promiscuous mode of
1045 * vf vsis
1046 **/
1047static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1048 u8 *msg, u16 msglen)
1049{
1050 struct i40e_virtchnl_promisc_info *info =
1051 (struct i40e_virtchnl_promisc_info *)msg;
1052 struct i40e_pf *pf = vf->pf;
1053 struct i40e_hw *hw = &pf->hw;
1054 bool allmulti = false;
1055 bool promisc = false;
1056 i40e_status aq_ret;
1057
1058 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1059 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1060 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1061 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1062 aq_ret = I40E_ERR_PARAM;
1063 goto error_param;
1064 }
1065
1066 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1067 promisc = true;
1068 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1069 promisc, NULL);
1070 if (aq_ret)
1071 goto error_param;
1072
1073 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1074 allmulti = true;
1075 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1076 allmulti, NULL);
1077
1078error_param:
1079 /* send the response to the vf */
1080 return i40e_vc_send_resp_to_vf(vf,
1081 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1082 aq_ret);
1083}
1084
1085/**
1086 * i40e_vc_config_queues_msg
1087 * @vf: pointer to the vf info
1088 * @msg: pointer to the msg buffer
1089 * @msglen: msg length
1090 *
1091 * called from the vf to configure the rx/tx
1092 * queues
1093 **/
1094static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1095{
1096 struct i40e_virtchnl_vsi_queue_config_info *qci =
1097 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1098 struct i40e_virtchnl_queue_pair_info *qpi;
1099 u16 vsi_id, vsi_queue_id;
1100 i40e_status aq_ret = 0;
1101 int i;
1102
1103 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1104 aq_ret = I40E_ERR_PARAM;
1105 goto error_param;
1106 }
1107
1108 vsi_id = qci->vsi_id;
1109 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1110 aq_ret = I40E_ERR_PARAM;
1111 goto error_param;
1112 }
1113 for (i = 0; i < qci->num_queue_pairs; i++) {
1114 qpi = &qci->qpair[i];
1115 vsi_queue_id = qpi->txq.queue_id;
1116 if ((qpi->txq.vsi_id != vsi_id) ||
1117 (qpi->rxq.vsi_id != vsi_id) ||
1118 (qpi->rxq.queue_id != vsi_queue_id) ||
1119 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1120 aq_ret = I40E_ERR_PARAM;
1121 goto error_param;
1122 }
1123
1124 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1125 &qpi->rxq) ||
1126 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1127 &qpi->txq)) {
1128 aq_ret = I40E_ERR_PARAM;
1129 goto error_param;
1130 }
1131 }
1132
1133error_param:
1134 /* send the response to the vf */
1135 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1136 aq_ret);
1137}
1138
1139/**
1140 * i40e_vc_config_irq_map_msg
1141 * @vf: pointer to the vf info
1142 * @msg: pointer to the msg buffer
1143 * @msglen: msg length
1144 *
1145 * called from the vf to configure the irq to
1146 * queue map
1147 **/
1148static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1149{
1150 struct i40e_virtchnl_irq_map_info *irqmap_info =
1151 (struct i40e_virtchnl_irq_map_info *)msg;
1152 struct i40e_virtchnl_vector_map *map;
1153 u16 vsi_id, vsi_queue_id, vector_id;
1154 i40e_status aq_ret = 0;
1155 unsigned long tempmap;
1156 int i;
1157
1158 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1159 aq_ret = I40E_ERR_PARAM;
1160 goto error_param;
1161 }
1162
1163 for (i = 0; i < irqmap_info->num_vectors; i++) {
1164 map = &irqmap_info->vecmap[i];
1165
1166 vector_id = map->vector_id;
1167 vsi_id = map->vsi_id;
1168 /* validate msg params */
1169 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1170 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1171 aq_ret = I40E_ERR_PARAM;
1172 goto error_param;
1173 }
1174
1175 /* lookout for the invalid queue index */
1176 tempmap = map->rxq_map;
Wei Yongjun48366502013-09-24 05:17:36 +00001177 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001178 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1179 vsi_queue_id)) {
1180 aq_ret = I40E_ERR_PARAM;
1181 goto error_param;
1182 }
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001183 }
1184
1185 tempmap = map->txq_map;
Wei Yongjun48366502013-09-24 05:17:36 +00001186 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001187 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1188 vsi_queue_id)) {
1189 aq_ret = I40E_ERR_PARAM;
1190 goto error_param;
1191 }
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001192 }
1193
1194 i40e_config_irq_link_list(vf, vsi_id, map);
1195 }
1196error_param:
1197 /* send the response to the vf */
1198 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1199 aq_ret);
1200}
1201
1202/**
1203 * i40e_vc_enable_queues_msg
1204 * @vf: pointer to the vf info
1205 * @msg: pointer to the msg buffer
1206 * @msglen: msg length
1207 *
1208 * called from the vf to enable all or specific queue(s)
1209 **/
1210static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1211{
1212 struct i40e_virtchnl_queue_select *vqs =
1213 (struct i40e_virtchnl_queue_select *)msg;
1214 struct i40e_pf *pf = vf->pf;
1215 u16 vsi_id = vqs->vsi_id;
1216 i40e_status aq_ret = 0;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001217
1218 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1219 aq_ret = I40E_ERR_PARAM;
1220 goto error_param;
1221 }
1222
1223 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1224 aq_ret = I40E_ERR_PARAM;
1225 goto error_param;
1226 }
1227
1228 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1229 aq_ret = I40E_ERR_PARAM;
1230 goto error_param;
1231 }
Mitch Williams88f65632013-11-28 06:39:28 +00001232 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
1233 aq_ret = I40E_ERR_TIMEOUT;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001234error_param:
1235 /* send the response to the vf */
1236 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1237 aq_ret);
1238}
1239
1240/**
1241 * i40e_vc_disable_queues_msg
1242 * @vf: pointer to the vf info
1243 * @msg: pointer to the msg buffer
1244 * @msglen: msg length
1245 *
1246 * called from the vf to disable all or specific
1247 * queue(s)
1248 **/
1249static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1250{
1251 struct i40e_virtchnl_queue_select *vqs =
1252 (struct i40e_virtchnl_queue_select *)msg;
1253 struct i40e_pf *pf = vf->pf;
1254 u16 vsi_id = vqs->vsi_id;
1255 i40e_status aq_ret = 0;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001256
1257 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1258 aq_ret = I40E_ERR_PARAM;
1259 goto error_param;
1260 }
1261
1262 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1263 aq_ret = I40E_ERR_PARAM;
1264 goto error_param;
1265 }
1266
1267 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1268 aq_ret = I40E_ERR_PARAM;
1269 goto error_param;
1270 }
Mitch Williams88f65632013-11-28 06:39:28 +00001271 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
1272 aq_ret = I40E_ERR_TIMEOUT;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001273
1274error_param:
1275 /* send the response to the vf */
1276 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1277 aq_ret);
1278}
1279
1280/**
1281 * i40e_vc_get_stats_msg
1282 * @vf: pointer to the vf info
1283 * @msg: pointer to the msg buffer
1284 * @msglen: msg length
1285 *
1286 * called from the vf to get vsi stats
1287 **/
1288static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1289{
1290 struct i40e_virtchnl_queue_select *vqs =
1291 (struct i40e_virtchnl_queue_select *)msg;
1292 struct i40e_pf *pf = vf->pf;
1293 struct i40e_eth_stats stats;
1294 i40e_status aq_ret = 0;
1295 struct i40e_vsi *vsi;
1296
1297 memset(&stats, 0, sizeof(struct i40e_eth_stats));
1298
1299 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1300 aq_ret = I40E_ERR_PARAM;
1301 goto error_param;
1302 }
1303
1304 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1305 aq_ret = I40E_ERR_PARAM;
1306 goto error_param;
1307 }
1308
1309 vsi = pf->vsi[vqs->vsi_id];
1310 if (!vsi) {
1311 aq_ret = I40E_ERR_PARAM;
1312 goto error_param;
1313 }
1314 i40e_update_eth_stats(vsi);
1315 memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
1316
1317error_param:
1318 /* send the response back to the vf */
1319 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1320 (u8 *)&stats, sizeof(stats));
1321}
1322
1323/**
1324 * i40e_vc_add_mac_addr_msg
1325 * @vf: pointer to the vf info
1326 * @msg: pointer to the msg buffer
1327 * @msglen: msg length
1328 *
1329 * add guest mac address filter
1330 **/
1331static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1332{
1333 struct i40e_virtchnl_ether_addr_list *al =
1334 (struct i40e_virtchnl_ether_addr_list *)msg;
1335 struct i40e_pf *pf = vf->pf;
1336 struct i40e_vsi *vsi = NULL;
1337 u16 vsi_id = al->vsi_id;
1338 i40e_status aq_ret = 0;
1339 int i;
1340
1341 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1342 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1343 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1344 aq_ret = I40E_ERR_PARAM;
1345 goto error_param;
1346 }
1347
1348 for (i = 0; i < al->num_elements; i++) {
1349 if (is_broadcast_ether_addr(al->list[i].addr) ||
1350 is_zero_ether_addr(al->list[i].addr)) {
1351 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
1352 al->list[i].addr);
Mitch Williamsadaf3562013-11-28 06:39:30 +00001353 aq_ret = I40E_ERR_INVALID_MAC_ADDR;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001354 goto error_param;
1355 }
1356 }
1357 vsi = pf->vsi[vsi_id];
1358
1359 /* add new addresses to the list */
1360 for (i = 0; i < al->num_elements; i++) {
1361 struct i40e_mac_filter *f;
1362
1363 f = i40e_find_mac(vsi, al->list[i].addr, true, false);
Mitch Williams7e68edf92013-11-16 10:00:41 +00001364 if (!f) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001365 if (i40e_is_vsi_in_vlan(vsi))
1366 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1367 true, false);
1368 else
1369 f = i40e_add_filter(vsi, al->list[i].addr, -1,
1370 true, false);
1371 }
1372
1373 if (!f) {
1374 dev_err(&pf->pdev->dev,
1375 "Unable to add VF MAC filter\n");
1376 aq_ret = I40E_ERR_PARAM;
1377 goto error_param;
1378 }
1379 }
1380
1381 /* program the updated filter list */
1382 if (i40e_sync_vsi_filters(vsi))
1383 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1384
1385error_param:
1386 /* send the response to the vf */
1387 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1388 aq_ret);
1389}
1390
1391/**
1392 * i40e_vc_del_mac_addr_msg
1393 * @vf: pointer to the vf info
1394 * @msg: pointer to the msg buffer
1395 * @msglen: msg length
1396 *
1397 * remove guest mac address filter
1398 **/
1399static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1400{
1401 struct i40e_virtchnl_ether_addr_list *al =
1402 (struct i40e_virtchnl_ether_addr_list *)msg;
1403 struct i40e_pf *pf = vf->pf;
1404 struct i40e_vsi *vsi = NULL;
1405 u16 vsi_id = al->vsi_id;
1406 i40e_status aq_ret = 0;
1407 int i;
1408
1409 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1410 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1411 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1412 aq_ret = I40E_ERR_PARAM;
1413 goto error_param;
1414 }
1415 vsi = pf->vsi[vsi_id];
1416
1417 /* delete addresses from the list */
1418 for (i = 0; i < al->num_elements; i++)
1419 i40e_del_filter(vsi, al->list[i].addr,
1420 I40E_VLAN_ANY, true, false);
1421
1422 /* program the updated filter list */
1423 if (i40e_sync_vsi_filters(vsi))
1424 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1425
1426error_param:
1427 /* send the response to the vf */
1428 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1429 aq_ret);
1430}
1431
1432/**
1433 * i40e_vc_add_vlan_msg
1434 * @vf: pointer to the vf info
1435 * @msg: pointer to the msg buffer
1436 * @msglen: msg length
1437 *
1438 * program guest vlan id
1439 **/
1440static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1441{
1442 struct i40e_virtchnl_vlan_filter_list *vfl =
1443 (struct i40e_virtchnl_vlan_filter_list *)msg;
1444 struct i40e_pf *pf = vf->pf;
1445 struct i40e_vsi *vsi = NULL;
1446 u16 vsi_id = vfl->vsi_id;
1447 i40e_status aq_ret = 0;
1448 int i;
1449
1450 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1451 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1452 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1453 aq_ret = I40E_ERR_PARAM;
1454 goto error_param;
1455 }
1456
1457 for (i = 0; i < vfl->num_elements; i++) {
1458 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1459 aq_ret = I40E_ERR_PARAM;
1460 dev_err(&pf->pdev->dev,
1461 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1462 goto error_param;
1463 }
1464 }
1465 vsi = pf->vsi[vsi_id];
1466 if (vsi->info.pvid) {
1467 aq_ret = I40E_ERR_PARAM;
1468 goto error_param;
1469 }
1470
1471 i40e_vlan_stripping_enable(vsi);
1472 for (i = 0; i < vfl->num_elements; i++) {
1473 /* add new VLAN filter */
1474 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1475 if (ret)
1476 dev_err(&pf->pdev->dev,
1477 "Unable to add VF vlan filter %d, error %d\n",
1478 vfl->vlan_id[i], ret);
1479 }
1480
1481error_param:
1482 /* send the response to the vf */
1483 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1484}
1485
1486/**
1487 * i40e_vc_remove_vlan_msg
1488 * @vf: pointer to the vf info
1489 * @msg: pointer to the msg buffer
1490 * @msglen: msg length
1491 *
1492 * remove programmed guest vlan id
1493 **/
1494static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1495{
1496 struct i40e_virtchnl_vlan_filter_list *vfl =
1497 (struct i40e_virtchnl_vlan_filter_list *)msg;
1498 struct i40e_pf *pf = vf->pf;
1499 struct i40e_vsi *vsi = NULL;
1500 u16 vsi_id = vfl->vsi_id;
1501 i40e_status aq_ret = 0;
1502 int i;
1503
1504 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1505 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1506 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1507 aq_ret = I40E_ERR_PARAM;
1508 goto error_param;
1509 }
1510
1511 for (i = 0; i < vfl->num_elements; i++) {
1512 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1513 aq_ret = I40E_ERR_PARAM;
1514 goto error_param;
1515 }
1516 }
1517
1518 vsi = pf->vsi[vsi_id];
1519 if (vsi->info.pvid) {
1520 aq_ret = I40E_ERR_PARAM;
1521 goto error_param;
1522 }
1523
1524 for (i = 0; i < vfl->num_elements; i++) {
1525 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1526 if (ret)
1527 dev_err(&pf->pdev->dev,
1528 "Unable to delete VF vlan filter %d, error %d\n",
1529 vfl->vlan_id[i], ret);
1530 }
1531
1532error_param:
1533 /* send the response to the vf */
1534 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1535}
1536
1537/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001538 * i40e_vc_validate_vf_msg
1539 * @vf: pointer to the vf info
1540 * @msg: pointer to the msg buffer
1541 * @msglen: msg length
1542 * @msghndl: msg handle
1543 *
1544 * validate msg
1545 **/
1546static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1547 u32 v_retval, u8 *msg, u16 msglen)
1548{
1549 bool err_msg_format = false;
1550 int valid_len;
1551
1552 /* Check if VF is disabled. */
1553 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1554 return I40E_ERR_PARAM;
1555
1556 /* Validate message length. */
1557 switch (v_opcode) {
1558 case I40E_VIRTCHNL_OP_VERSION:
1559 valid_len = sizeof(struct i40e_virtchnl_version_info);
1560 break;
1561 case I40E_VIRTCHNL_OP_RESET_VF:
1562 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1563 valid_len = 0;
1564 break;
1565 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1566 valid_len = sizeof(struct i40e_virtchnl_txq_info);
1567 break;
1568 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1569 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1570 break;
1571 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1572 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1573 if (msglen >= valid_len) {
1574 struct i40e_virtchnl_vsi_queue_config_info *vqc =
1575 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1576 valid_len += (vqc->num_queue_pairs *
1577 sizeof(struct
1578 i40e_virtchnl_queue_pair_info));
1579 if (vqc->num_queue_pairs == 0)
1580 err_msg_format = true;
1581 }
1582 break;
1583 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1584 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1585 if (msglen >= valid_len) {
1586 struct i40e_virtchnl_irq_map_info *vimi =
1587 (struct i40e_virtchnl_irq_map_info *)msg;
1588 valid_len += (vimi->num_vectors *
1589 sizeof(struct i40e_virtchnl_vector_map));
1590 if (vimi->num_vectors == 0)
1591 err_msg_format = true;
1592 }
1593 break;
1594 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1595 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1596 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1597 break;
1598 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1599 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1600 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1601 if (msglen >= valid_len) {
1602 struct i40e_virtchnl_ether_addr_list *veal =
1603 (struct i40e_virtchnl_ether_addr_list *)msg;
1604 valid_len += veal->num_elements *
1605 sizeof(struct i40e_virtchnl_ether_addr);
1606 if (veal->num_elements == 0)
1607 err_msg_format = true;
1608 }
1609 break;
1610 case I40E_VIRTCHNL_OP_ADD_VLAN:
1611 case I40E_VIRTCHNL_OP_DEL_VLAN:
1612 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1613 if (msglen >= valid_len) {
1614 struct i40e_virtchnl_vlan_filter_list *vfl =
1615 (struct i40e_virtchnl_vlan_filter_list *)msg;
1616 valid_len += vfl->num_elements * sizeof(u16);
1617 if (vfl->num_elements == 0)
1618 err_msg_format = true;
1619 }
1620 break;
1621 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1622 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1623 break;
1624 case I40E_VIRTCHNL_OP_GET_STATS:
1625 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1626 break;
1627 /* These are always errors coming from the VF. */
1628 case I40E_VIRTCHNL_OP_EVENT:
1629 case I40E_VIRTCHNL_OP_UNKNOWN:
1630 default:
1631 return -EPERM;
1632 break;
1633 }
1634 /* few more checks */
1635 if ((valid_len != msglen) || (err_msg_format)) {
1636 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1637 return -EINVAL;
1638 } else {
1639 return 0;
1640 }
1641}
1642
1643/**
1644 * i40e_vc_process_vf_msg
1645 * @pf: pointer to the pf structure
1646 * @vf_id: source vf id
1647 * @msg: pointer to the msg buffer
1648 * @msglen: msg length
1649 * @msghndl: msg handle
1650 *
1651 * called from the common aeq/arq handler to
1652 * process request from vf
1653 **/
1654int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1655 u32 v_retval, u8 *msg, u16 msglen)
1656{
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001657 struct i40e_hw *hw = &pf->hw;
Mitch Williams6c1b5bf2013-11-28 06:39:30 +00001658 struct i40e_vf *vf;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001659 int ret;
1660
1661 pf->vf_aq_requests++;
Mitch Williams6c1b5bf2013-11-28 06:39:30 +00001662 if (vf_id >= pf->num_alloc_vfs)
1663 return -EINVAL;
1664 vf = &(pf->vf[vf_id]);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001665 /* perform basic checks on the msg */
1666 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1667
1668 if (ret) {
Mitch Williams499ec802013-11-28 06:39:31 +00001669 dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
1670 vf_id, v_opcode, msglen);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001671 return ret;
1672 }
1673 wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
1674 switch (v_opcode) {
1675 case I40E_VIRTCHNL_OP_VERSION:
1676 ret = i40e_vc_get_version_msg(vf);
1677 break;
1678 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1679 ret = i40e_vc_get_vf_resources_msg(vf);
1680 break;
1681 case I40E_VIRTCHNL_OP_RESET_VF:
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001682 i40e_vc_reset_vf_msg(vf);
1683 ret = 0;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001684 break;
1685 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1686 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1687 break;
1688 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1689 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1690 break;
1691 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1692 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1693 break;
1694 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1695 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1696 break;
1697 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1698 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1699 break;
1700 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1701 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1702 break;
1703 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1704 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1705 break;
1706 case I40E_VIRTCHNL_OP_ADD_VLAN:
1707 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1708 break;
1709 case I40E_VIRTCHNL_OP_DEL_VLAN:
1710 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1711 break;
1712 case I40E_VIRTCHNL_OP_GET_STATS:
1713 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1714 break;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001715 case I40E_VIRTCHNL_OP_UNKNOWN:
1716 default:
1717 dev_err(&pf->pdev->dev,
1718 "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
1719 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1720 I40E_ERR_NOT_IMPLEMENTED);
1721 break;
1722 }
1723
1724 return ret;
1725}
1726
1727/**
1728 * i40e_vc_process_vflr_event
1729 * @pf: pointer to the pf structure
1730 *
1731 * called from the vlfr irq handler to
1732 * free up vf resources and state variables
1733 **/
1734int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1735{
1736 u32 reg, reg_idx, bit_idx, vf_id;
1737 struct i40e_hw *hw = &pf->hw;
1738 struct i40e_vf *vf;
1739
1740 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1741 return 0;
1742
1743 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1744 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1745 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1746 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1747 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1748 vf = &pf->vf[vf_id];
1749 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1750 if (reg & (1 << bit_idx)) {
1751 /* clear the bit in GLGEN_VFLRSTAT */
1752 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1753
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001754 i40e_reset_vf(vf, true);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001755 }
1756 }
1757
1758 /* re-enable vflr interrupt cause */
1759 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1760 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1761 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1762 i40e_flush(hw);
1763
1764 return 0;
1765}
1766
1767/**
1768 * i40e_vc_vf_broadcast
1769 * @pf: pointer to the pf structure
1770 * @opcode: operation code
1771 * @retval: return value
1772 * @msg: pointer to the msg buffer
1773 * @msglen: msg length
1774 *
1775 * send a message to all VFs on a given PF
1776 **/
1777static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1778 enum i40e_virtchnl_ops v_opcode,
1779 i40e_status v_retval, u8 *msg,
1780 u16 msglen)
1781{
1782 struct i40e_hw *hw = &pf->hw;
1783 struct i40e_vf *vf = pf->vf;
1784 int i;
1785
1786 for (i = 0; i < pf->num_alloc_vfs; i++) {
1787 /* Ignore return value on purpose - a given VF may fail, but
1788 * we need to keep going and send to all of them
1789 */
1790 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1791 msg, msglen, NULL);
1792 vf++;
1793 }
1794}
1795
1796/**
1797 * i40e_vc_notify_link_state
1798 * @pf: pointer to the pf structure
1799 *
1800 * send a link status message to all VFs on a given PF
1801 **/
1802void i40e_vc_notify_link_state(struct i40e_pf *pf)
1803{
1804 struct i40e_virtchnl_pf_event pfe;
1805
1806 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1807 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1808 pfe.event_data.link_event.link_status =
1809 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
1810 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
1811
1812 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1813 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1814}
1815
1816/**
1817 * i40e_vc_notify_reset
1818 * @pf: pointer to the pf structure
1819 *
1820 * indicate a pending reset to all VFs on a given PF
1821 **/
1822void i40e_vc_notify_reset(struct i40e_pf *pf)
1823{
1824 struct i40e_virtchnl_pf_event pfe;
1825
1826 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1827 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1828 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1829 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1830}
1831
1832/**
1833 * i40e_vc_notify_vf_reset
1834 * @vf: pointer to the vf structure
1835 *
1836 * indicate a pending reset to the given VF
1837 **/
1838void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
1839{
1840 struct i40e_virtchnl_pf_event pfe;
1841
1842 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1843 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1844 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1845 I40E_SUCCESS, (u8 *)&pfe,
1846 sizeof(struct i40e_virtchnl_pf_event), NULL);
1847}
1848
1849/**
1850 * i40e_ndo_set_vf_mac
1851 * @netdev: network interface device structure
1852 * @vf_id: vf identifier
1853 * @mac: mac address
1854 *
1855 * program vf mac address
1856 **/
1857int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1858{
1859 struct i40e_netdev_priv *np = netdev_priv(netdev);
1860 struct i40e_vsi *vsi = np->vsi;
1861 struct i40e_pf *pf = vsi->back;
1862 struct i40e_mac_filter *f;
1863 struct i40e_vf *vf;
1864 int ret = 0;
1865
1866 /* validate the request */
1867 if (vf_id >= pf->num_alloc_vfs) {
1868 dev_err(&pf->pdev->dev,
1869 "Invalid VF Identifier %d\n", vf_id);
1870 ret = -EINVAL;
1871 goto error_param;
1872 }
1873
1874 vf = &(pf->vf[vf_id]);
1875 vsi = pf->vsi[vf->lan_vsi_index];
1876 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1877 dev_err(&pf->pdev->dev,
1878 "Uninitialized VF %d\n", vf_id);
1879 ret = -EINVAL;
1880 goto error_param;
1881 }
1882
1883 if (!is_valid_ether_addr(mac)) {
1884 dev_err(&pf->pdev->dev,
1885 "Invalid VF ethernet address\n");
1886 ret = -EINVAL;
1887 goto error_param;
1888 }
1889
1890 /* delete the temporary mac address */
1891 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
1892
1893 /* add the new mac address */
1894 f = i40e_add_filter(vsi, mac, 0, true, false);
1895 if (!f) {
1896 dev_err(&pf->pdev->dev,
1897 "Unable to add VF ucast filter\n");
1898 ret = -ENOMEM;
1899 goto error_param;
1900 }
1901
1902 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
1903 /* program mac filter */
1904 if (i40e_sync_vsi_filters(vsi)) {
1905 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
1906 ret = -EIO;
1907 goto error_param;
1908 }
1909 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
1910 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
1911 ret = 0;
1912
1913error_param:
1914 return ret;
1915}
1916
1917/**
1918 * i40e_ndo_set_vf_port_vlan
1919 * @netdev: network interface device structure
1920 * @vf_id: vf identifier
1921 * @vlan_id: mac address
1922 * @qos: priority setting
1923 *
1924 * program vf vlan id and/or qos
1925 **/
1926int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
1927 int vf_id, u16 vlan_id, u8 qos)
1928{
1929 struct i40e_netdev_priv *np = netdev_priv(netdev);
1930 struct i40e_pf *pf = np->vsi->back;
1931 struct i40e_vsi *vsi;
1932 struct i40e_vf *vf;
1933 int ret = 0;
1934
1935 /* validate the request */
1936 if (vf_id >= pf->num_alloc_vfs) {
1937 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
1938 ret = -EINVAL;
1939 goto error_pvid;
1940 }
1941
1942 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
1943 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
1944 ret = -EINVAL;
1945 goto error_pvid;
1946 }
1947
1948 vf = &(pf->vf[vf_id]);
1949 vsi = pf->vsi[vf->lan_vsi_index];
1950 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1951 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
1952 ret = -EINVAL;
1953 goto error_pvid;
1954 }
1955
1956 if (vsi->info.pvid) {
1957 /* kill old VLAN */
1958 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
1959 VLAN_VID_MASK));
1960 if (ret) {
1961 dev_info(&vsi->back->pdev->dev,
1962 "remove VLAN failed, ret=%d, aq_err=%d\n",
1963 ret, pf->hw.aq.asq_last_status);
1964 }
1965 }
1966 if (vlan_id || qos)
1967 ret = i40e_vsi_add_pvid(vsi,
1968 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
1969 else
1970 i40e_vlan_stripping_disable(vsi);
1971
1972 if (vlan_id) {
1973 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
1974 vlan_id, qos, vf_id);
1975
1976 /* add new VLAN filter */
1977 ret = i40e_vsi_add_vlan(vsi, vlan_id);
1978 if (ret) {
1979 dev_info(&vsi->back->pdev->dev,
1980 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
1981 vsi->back->hw.aq.asq_last_status);
1982 goto error_pvid;
1983 }
1984 }
1985
1986 if (ret) {
1987 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
1988 goto error_pvid;
1989 }
1990 ret = 0;
1991
1992error_pvid:
1993 return ret;
1994}
1995
1996/**
1997 * i40e_ndo_set_vf_bw
1998 * @netdev: network interface device structure
1999 * @vf_id: vf identifier
2000 * @tx_rate: tx rate
2001 *
2002 * configure vf tx rate
2003 **/
2004int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
2005{
2006 return -EOPNOTSUPP;
2007}
2008
2009/**
2010 * i40e_ndo_get_vf_config
2011 * @netdev: network interface device structure
2012 * @vf_id: vf identifier
2013 * @ivi: vf configuration structure
2014 *
2015 * return vf configuration
2016 **/
2017int i40e_ndo_get_vf_config(struct net_device *netdev,
2018 int vf_id, struct ifla_vf_info *ivi)
2019{
2020 struct i40e_netdev_priv *np = netdev_priv(netdev);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00002021 struct i40e_vsi *vsi = np->vsi;
2022 struct i40e_pf *pf = vsi->back;
2023 struct i40e_vf *vf;
2024 int ret = 0;
2025
2026 /* validate the request */
2027 if (vf_id >= pf->num_alloc_vfs) {
2028 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2029 ret = -EINVAL;
2030 goto error_param;
2031 }
2032
2033 vf = &(pf->vf[vf_id]);
2034 /* first vsi is always the LAN vsi */
2035 vsi = pf->vsi[vf->lan_vsi_index];
2036 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2037 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2038 ret = -EINVAL;
2039 goto error_param;
2040 }
2041
2042 ivi->vf = vf_id;
2043
Mitch Williamsf4a1c5c2013-11-28 06:39:34 +00002044 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00002045
2046 ivi->tx_rate = 0;
2047 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2048 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2049 I40E_VLAN_PRIORITY_SHIFT;
2050 ret = 0;
2051
2052error_param:
2053 return ret;
2054}