blob: 1a7052a0284d9d25a79b72fd9640c0f1ee4c830c [file] [log] [blame]
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e.h"
29
30/***********************misc routines*****************************/
31
32/**
33 * i40e_vc_isvalid_vsi_id
34 * @vf: pointer to the vf info
35 * @vsi_id: vf relative vsi id
36 *
37 * check for the valid vsi id
38 **/
39static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
40{
41 struct i40e_pf *pf = vf->pf;
42
43 return pf->vsi[vsi_id]->vf_id == vf->vf_id;
44}
45
46/**
47 * i40e_vc_isvalid_queue_id
48 * @vf: pointer to the vf info
49 * @vsi_id: vsi id
50 * @qid: vsi relative queue id
51 *
52 * check for the valid queue id
53 **/
54static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
55 u8 qid)
56{
57 struct i40e_pf *pf = vf->pf;
58
59 return qid < pf->vsi[vsi_id]->num_queue_pairs;
60}
61
62/**
63 * i40e_vc_isvalid_vector_id
64 * @vf: pointer to the vf info
65 * @vector_id: vf relative vector id
66 *
67 * check for the valid vector id
68 **/
69static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
70{
71 struct i40e_pf *pf = vf->pf;
72
Mitch Williams54692b42013-11-16 10:00:38 +000073 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +000074}
75
76/***********************vf resource mgmt routines*****************/
77
78/**
79 * i40e_vc_get_pf_queue_id
80 * @vf: pointer to the vf info
81 * @vsi_idx: index of VSI in PF struct
82 * @vsi_queue_id: vsi relative queue id
83 *
84 * return pf relative queue id
85 **/
86static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
87 u8 vsi_queue_id)
88{
89 struct i40e_pf *pf = vf->pf;
90 struct i40e_vsi *vsi = pf->vsi[vsi_idx];
91 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
92
93 if (le16_to_cpu(vsi->info.mapping_flags) &
94 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
95 pf_queue_id =
96 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
97 else
98 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
99 vsi_queue_id;
100
101 return pf_queue_id;
102}
103
104/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000105 * i40e_config_irq_link_list
106 * @vf: pointer to the vf info
107 * @vsi_idx: index of VSI in PF struct
108 * @vecmap: irq map info
109 *
110 * configure irq link list from the map
111 **/
112static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
113 struct i40e_virtchnl_vector_map *vecmap)
114{
115 unsigned long linklistmap = 0, tempmap;
116 struct i40e_pf *pf = vf->pf;
117 struct i40e_hw *hw = &pf->hw;
118 u16 vsi_queue_id, pf_queue_id;
119 enum i40e_queue_type qtype;
120 u16 next_q, vector_id;
121 u32 reg, reg_idx;
122 u16 itr_idx = 0;
123
124 vector_id = vecmap->vector_id;
125 /* setup the head */
126 if (0 == vector_id)
127 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
128 else
129 reg_idx = I40E_VPINT_LNKLSTN(
Mitch Williams13c60b92013-09-28 07:13:18 +0000130 (pf->hw.func_caps.num_msix_vectors_vf
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000131 * vf->vf_id) + (vector_id - 1));
132
133 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
134 /* Special case - No queues mapped on this vector */
135 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
136 goto irq_list_done;
137 }
138 tempmap = vecmap->rxq_map;
Wei Yongjun48366502013-09-24 05:17:36 +0000139 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000140 linklistmap |= (1 <<
141 (I40E_VIRTCHNL_SUPPORTED_QTYPES *
142 vsi_queue_id));
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000143 }
144
145 tempmap = vecmap->txq_map;
Wei Yongjun48366502013-09-24 05:17:36 +0000146 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000147 linklistmap |= (1 <<
148 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
149 + 1));
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000150 }
151
152 next_q = find_first_bit(&linklistmap,
153 (I40E_MAX_VSI_QP *
154 I40E_VIRTCHNL_SUPPORTED_QTYPES));
155 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
156 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
157 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
158 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
159
160 wr32(hw, reg_idx, reg);
161
162 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
163 switch (qtype) {
164 case I40E_QUEUE_TYPE_RX:
165 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
166 itr_idx = vecmap->rxitr_idx;
167 break;
168 case I40E_QUEUE_TYPE_TX:
169 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
170 itr_idx = vecmap->txitr_idx;
171 break;
172 default:
173 break;
174 }
175
176 next_q = find_next_bit(&linklistmap,
177 (I40E_MAX_VSI_QP *
178 I40E_VIRTCHNL_SUPPORTED_QTYPES),
179 next_q + 1);
180 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
181 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
182 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
183 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
184 vsi_queue_id);
185 } else {
186 pf_queue_id = I40E_QUEUE_END_OF_LIST;
187 qtype = 0;
188 }
189
190 /* format for the RQCTL & TQCTL regs is same */
191 reg = (vector_id) |
192 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
193 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
194 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
195 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
196 wr32(hw, reg_idx, reg);
197 }
198
199irq_list_done:
200 i40e_flush(hw);
201}
202
203/**
204 * i40e_config_vsi_tx_queue
205 * @vf: pointer to the vf info
206 * @vsi_idx: index of VSI in PF struct
207 * @vsi_queue_id: vsi relative queue index
208 * @info: config. info
209 *
210 * configure tx queue
211 **/
212static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
213 u16 vsi_queue_id,
214 struct i40e_virtchnl_txq_info *info)
215{
216 struct i40e_pf *pf = vf->pf;
217 struct i40e_hw *hw = &pf->hw;
218 struct i40e_hmc_obj_txq tx_ctx;
219 u16 pf_queue_id;
220 u32 qtx_ctl;
221 int ret = 0;
222
223 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
224
225 /* clear the context structure first */
226 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
227
228 /* only set the required fields */
229 tx_ctx.base = info->dma_ring_addr / 128;
230 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0;
233
234 /* clear the context in the HMC */
235 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
236 if (ret) {
237 dev_err(&pf->pdev->dev,
238 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
239 pf_queue_id, ret);
240 ret = -ENOENT;
241 goto error_context;
242 }
243
244 /* set the context in the HMC */
245 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
246 if (ret) {
247 dev_err(&pf->pdev->dev,
248 "Failed to set VF LAN Tx queue context %d error: %d\n",
249 pf_queue_id, ret);
250 ret = -ENOENT;
251 goto error_context;
252 }
253
254 /* associate this queue with the PCI VF function */
255 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
Shannon Nelson13fd9772013-09-28 07:14:19 +0000256 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000257 & I40E_QTX_CTL_PF_INDX_MASK);
258 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
259 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
260 & I40E_QTX_CTL_VFVM_INDX_MASK);
261 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
262 i40e_flush(hw);
263
264error_context:
265 return ret;
266}
267
268/**
269 * i40e_config_vsi_rx_queue
270 * @vf: pointer to the vf info
271 * @vsi_idx: index of VSI in PF struct
272 * @vsi_queue_id: vsi relative queue index
273 * @info: config. info
274 *
275 * configure rx queue
276 **/
277static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
278 u16 vsi_queue_id,
279 struct i40e_virtchnl_rxq_info *info)
280{
281 struct i40e_pf *pf = vf->pf;
282 struct i40e_hw *hw = &pf->hw;
283 struct i40e_hmc_obj_rxq rx_ctx;
284 u16 pf_queue_id;
285 int ret = 0;
286
287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
288
289 /* clear the context structure first */
290 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
291
292 /* only set the required fields */
293 rx_ctx.base = info->dma_ring_addr / 128;
294 rx_ctx.qlen = info->ring_len;
295
296 if (info->splithdr_enabled) {
297 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
298 I40E_RX_SPLIT_IP |
299 I40E_RX_SPLIT_TCP_UDP |
300 I40E_RX_SPLIT_SCTP;
301 /* header length validation */
302 if (info->hdr_size > ((2 * 1024) - 64)) {
303 ret = -EINVAL;
304 goto error_param;
305 }
306 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
307
308 /* set splitalways mode 10b */
309 rx_ctx.dtype = 0x2;
310 }
311
312 /* databuffer length validation */
313 if (info->databuffer_size > ((16 * 1024) - 128)) {
314 ret = -EINVAL;
315 goto error_param;
316 }
317 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
318
319 /* max pkt. length validation */
320 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
321 ret = -EINVAL;
322 goto error_param;
323 }
324 rx_ctx.rxmax = info->max_pkt_size;
325
326 /* enable 32bytes desc always */
327 rx_ctx.dsize = 1;
328
329 /* default values */
330 rx_ctx.tphrdesc_ena = 1;
331 rx_ctx.tphwdesc_ena = 1;
332 rx_ctx.tphdata_ena = 1;
333 rx_ctx.tphhead_ena = 1;
334 rx_ctx.lrxqthresh = 2;
335 rx_ctx.crcstrip = 1;
336
337 /* clear the context in the HMC */
338 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
339 if (ret) {
340 dev_err(&pf->pdev->dev,
341 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
342 pf_queue_id, ret);
343 ret = -ENOENT;
344 goto error_param;
345 }
346
347 /* set the context in the HMC */
348 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
349 if (ret) {
350 dev_err(&pf->pdev->dev,
351 "Failed to set VF LAN Rx queue context %d error: %d\n",
352 pf_queue_id, ret);
353 ret = -ENOENT;
354 goto error_param;
355 }
356
357error_param:
358 return ret;
359}
360
361/**
362 * i40e_alloc_vsi_res
363 * @vf: pointer to the vf info
364 * @type: type of VSI to allocate
365 *
366 * alloc vf vsi context & resources
367 **/
368static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
369{
370 struct i40e_mac_filter *f = NULL;
371 struct i40e_pf *pf = vf->pf;
372 struct i40e_hw *hw = &pf->hw;
373 struct i40e_vsi *vsi;
374 int ret = 0;
375
376 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
377
378 if (!vsi) {
379 dev_err(&pf->pdev->dev,
380 "add vsi failed for vf %d, aq_err %d\n",
381 vf->vf_id, pf->hw.aq.asq_last_status);
382 ret = -ENOENT;
383 goto error_alloc_vsi_res;
384 }
385 if (type == I40E_VSI_SRIOV) {
386 vf->lan_vsi_index = vsi->idx;
387 vf->lan_vsi_id = vsi->id;
388 dev_info(&pf->pdev->dev,
389 "LAN VSI index %d, VSI id %d\n",
390 vsi->idx, vsi->id);
391 f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
392 0, true, false);
393 }
Neerav Parikh6dbbbfb2013-11-26 10:49:24 +0000394
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000395 if (!f) {
396 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
397 ret = -ENOMEM;
398 goto error_alloc_vsi_res;
399 }
400
401 /* program mac filter */
402 ret = i40e_sync_vsi_filters(vsi);
403 if (ret) {
404 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
405 goto error_alloc_vsi_res;
406 }
407
408 /* accept bcast pkts. by default */
409 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
410 if (ret) {
411 dev_err(&pf->pdev->dev,
412 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
413 vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
414 ret = -EINVAL;
415 }
416
417error_alloc_vsi_res:
418 return ret;
419}
420
421/**
Mitch Williams805bd5b2013-11-28 06:39:26 +0000422 * i40e_enable_vf_mappings
423 * @vf: pointer to the vf info
424 *
425 * enable vf mappings
426 **/
427static void i40e_enable_vf_mappings(struct i40e_vf *vf)
428{
429 struct i40e_pf *pf = vf->pf;
430 struct i40e_hw *hw = &pf->hw;
431 u32 reg, total_queue_pairs = 0;
432 int j;
433
434 /* Tell the hardware we're using noncontiguous mapping. HW requires
435 * that VF queues be mapped using this method, even when they are
436 * contiguous in real life
437 */
438 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
439 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
440
441 /* enable VF vplan_qtable mappings */
442 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
443 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
444
445 /* map PF queues to VF queues */
446 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
447 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
448 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
449 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
450 total_queue_pairs++;
451 }
452
453 /* map PF queues to VSI */
454 for (j = 0; j < 7; j++) {
455 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
456 reg = 0x07FF07FF; /* unused */
457 } else {
458 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
459 j * 2);
460 reg = qid;
461 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
462 (j * 2) + 1);
463 reg |= qid << 16;
464 }
465 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
466 }
467
468 i40e_flush(hw);
469}
470
471/**
472 * i40e_disable_vf_mappings
473 * @vf: pointer to the vf info
474 *
475 * disable vf mappings
476 **/
477static void i40e_disable_vf_mappings(struct i40e_vf *vf)
478{
479 struct i40e_pf *pf = vf->pf;
480 struct i40e_hw *hw = &pf->hw;
481 int i;
482
483 /* disable qp mappings */
484 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
485 for (i = 0; i < I40E_MAX_VSI_QP; i++)
486 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
487 I40E_QUEUE_END_OF_LIST);
488 i40e_flush(hw);
489}
490
491/**
492 * i40e_free_vf_res
493 * @vf: pointer to the vf info
494 *
495 * free vf resources
496 **/
497static void i40e_free_vf_res(struct i40e_vf *vf)
498{
499 struct i40e_pf *pf = vf->pf;
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000500 struct i40e_hw *hw = &pf->hw;
501 u32 reg_idx, reg;
502 int i, msix_vf;
Mitch Williams805bd5b2013-11-28 06:39:26 +0000503
504 /* free vsi & disconnect it from the parent uplink */
505 if (vf->lan_vsi_index) {
506 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
507 vf->lan_vsi_index = 0;
508 vf->lan_vsi_id = 0;
509 }
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000510 msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
511 /* disable interrupts so the VF starts in a known state */
512 for (i = 0; i < msix_vf; i++) {
513 /* format is same for both registers */
514 if (0 == i)
515 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
516 else
517 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
518 (vf->vf_id))
519 + (i - 1));
520 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
521 i40e_flush(hw);
522 }
Mitch Williams805bd5b2013-11-28 06:39:26 +0000523
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000524 /* clear the irq settings */
525 for (i = 0; i < msix_vf; i++) {
526 /* format is same for both registers */
527 if (0 == i)
528 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
529 else
530 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
531 (vf->vf_id))
532 + (i - 1));
533 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
534 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
535 wr32(hw, reg_idx, reg);
536 i40e_flush(hw);
537 }
Mitch Williams805bd5b2013-11-28 06:39:26 +0000538 /* reset some of the state varibles keeping
539 * track of the resources
540 */
541 vf->num_queue_pairs = 0;
542 vf->vf_states = 0;
543}
544
545/**
546 * i40e_alloc_vf_res
547 * @vf: pointer to the vf info
548 *
549 * allocate vf resources
550 **/
551static int i40e_alloc_vf_res(struct i40e_vf *vf)
552{
553 struct i40e_pf *pf = vf->pf;
554 int total_queue_pairs = 0;
555 int ret;
556
557 /* allocate hw vsi context & associated resources */
558 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
559 if (ret)
560 goto error_alloc;
561 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
562 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
563
564 /* store the total qps number for the runtime
565 * vf req validation
566 */
567 vf->num_queue_pairs = total_queue_pairs;
568
569 /* vf is now completely initialized */
570 set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
571
572error_alloc:
573 if (ret)
574 i40e_free_vf_res(vf);
575
576 return ret;
577}
578
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000579#define VF_DEVICE_STATUS 0xAA
580#define VF_TRANS_PENDING_MASK 0x20
581/**
582 * i40e_quiesce_vf_pci
583 * @vf: pointer to the vf structure
584 *
585 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
586 * if the transactions never clear.
587 **/
588static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
589{
590 struct i40e_pf *pf = vf->pf;
591 struct i40e_hw *hw = &pf->hw;
592 int vf_abs_id, i;
593 u32 reg;
594
595 reg = rd32(hw, I40E_PF_VT_PFALLOC);
596 vf_abs_id = vf->vf_id + (reg & I40E_PF_VT_PFALLOC_FIRSTVF_MASK);
597
598 wr32(hw, I40E_PF_PCI_CIAA,
599 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
600 for (i = 0; i < 100; i++) {
601 reg = rd32(hw, I40E_PF_PCI_CIAD);
602 if ((reg & VF_TRANS_PENDING_MASK) == 0)
603 return 0;
604 udelay(1);
605 }
606 return -EIO;
607}
608
Mitch Williams805bd5b2013-11-28 06:39:26 +0000609/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000610 * i40e_reset_vf
611 * @vf: pointer to the vf structure
612 * @flr: VFLR was issued or not
613 *
614 * reset the vf
615 **/
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000616void i40e_reset_vf(struct i40e_vf *vf, bool flr)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000617{
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000618 struct i40e_pf *pf = vf->pf;
619 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000620 bool rsd = false;
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000621 int i;
622 u32 reg;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000623
624 /* warn the VF */
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000625 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
626
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000627 /* In the case of a VFLR, the HW has already reset the VF and we
628 * just need to clean up, so don't hit the VFRTRIG register.
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000629 */
630 if (!flr) {
631 /* reset vf using VPGEN_VFRTRIG reg */
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000632 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
633 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000634 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
635 i40e_flush(hw);
636 }
637
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000638 if (i40e_quiesce_vf_pci(vf))
639 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
640 vf->vf_id);
641
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000642 /* poll VPGEN_VFRSTAT reg to make sure
643 * that reset is complete
644 */
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000645 for (i = 0; i < 100; i++) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000646 /* vf reset requires driver to first reset the
647 * vf & than poll the status register to make sure
648 * that the requested op was completed
649 * successfully
650 */
651 udelay(10);
652 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
653 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
654 rsd = true;
655 break;
656 }
657 }
658
659 if (!rsd)
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000660 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000661 vf->vf_id);
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000662 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000663 /* clear the reset bit in the VPGEN_VFRTRIG reg */
664 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
665 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
666 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000667
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000668 /* On initial reset, we won't have any queues */
669 if (vf->lan_vsi_index == 0)
670 goto complete_reset;
671
672 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
673complete_reset:
674 /* reallocate vf resources to reset the VSI state */
675 i40e_free_vf_res(vf);
676 mdelay(10);
677 i40e_alloc_vf_res(vf);
678 i40e_enable_vf_mappings(vf);
679
680 /* tell the VF the reset is done */
681 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
682 i40e_flush(hw);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000683}
684
685/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000686 * i40e_vfs_are_assigned
687 * @pf: pointer to the pf structure
688 *
689 * Determine if any VFs are assigned to VMs
690 **/
691static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
692{
693 struct pci_dev *pdev = pf->pdev;
694 struct pci_dev *vfdev;
695
696 /* loop through all the VFs to see if we own any that are assigned */
697 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
698 while (vfdev) {
699 /* if we don't own it we don't care */
700 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
701 /* if it is assigned we cannot release it */
702 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
703 return true;
704 }
705
706 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
707 I40E_VF_DEVICE_ID,
708 vfdev);
709 }
710
711 return false;
712}
713
714/**
715 * i40e_free_vfs
716 * @pf: pointer to the pf structure
717 *
718 * free vf resources
719 **/
720void i40e_free_vfs(struct i40e_pf *pf)
721{
722 struct i40e_hw *hw = &pf->hw;
723 int i;
724
725 if (!pf->vf)
726 return;
727
728 /* Disable interrupt 0 so we don't try to handle the VFLR. */
729 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
730 i40e_flush(hw);
731
732 /* free up vf resources */
733 for (i = 0; i < pf->num_alloc_vfs; i++) {
734 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
735 i40e_free_vf_res(&pf->vf[i]);
736 /* disable qp mappings */
737 i40e_disable_vf_mappings(&pf->vf[i]);
738 }
739
740 kfree(pf->vf);
741 pf->vf = NULL;
742 pf->num_alloc_vfs = 0;
743
744 if (!i40e_vfs_are_assigned(pf))
745 pci_disable_sriov(pf->pdev);
746 else
747 dev_warn(&pf->pdev->dev,
748 "unable to disable SR-IOV because VFs are assigned.\n");
749
750 /* Re-enable interrupt 0. */
751 wr32(hw, I40E_PFINT_DYN_CTL0,
752 I40E_PFINT_DYN_CTL0_INTENA_MASK |
753 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
754 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
755 i40e_flush(hw);
756}
757
758#ifdef CONFIG_PCI_IOV
759/**
760 * i40e_alloc_vfs
761 * @pf: pointer to the pf structure
762 * @num_alloc_vfs: number of vfs to allocate
763 *
764 * allocate vf resources
765 **/
766static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
767{
768 struct i40e_vf *vfs;
769 int i, ret = 0;
770
771 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
772 if (ret) {
773 dev_err(&pf->pdev->dev,
774 "pci_enable_sriov failed with error %d!\n", ret);
775 pf->num_alloc_vfs = 0;
776 goto err_iov;
777 }
778
779 /* allocate memory */
780 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
781 if (!vfs) {
782 ret = -ENOMEM;
783 goto err_alloc;
784 }
785
786 /* apply default profile */
787 for (i = 0; i < num_alloc_vfs; i++) {
788 vfs[i].pf = pf;
789 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
790 vfs[i].vf_id = i;
791
792 /* assign default capabilities */
793 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
Mitch Williamsfc18eaa2013-11-28 06:39:27 +0000794 /* vf resources get allocated during reset */
795 i40e_reset_vf(&vfs[i], false);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +0000796
797 /* enable vf vplan_qtable mappings */
798 i40e_enable_vf_mappings(&vfs[i]);
799 }
800 pf->vf = vfs;
801 pf->num_alloc_vfs = num_alloc_vfs;
802
803err_alloc:
804 if (ret)
805 i40e_free_vfs(pf);
806err_iov:
807 return ret;
808}
809
810#endif
811/**
812 * i40e_pci_sriov_enable
813 * @pdev: pointer to a pci_dev structure
814 * @num_vfs: number of vfs to allocate
815 *
816 * Enable or change the number of VFs
817 **/
818static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
819{
820#ifdef CONFIG_PCI_IOV
821 struct i40e_pf *pf = pci_get_drvdata(pdev);
822 int pre_existing_vfs = pci_num_vf(pdev);
823 int err = 0;
824
825 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
826 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
827 i40e_free_vfs(pf);
828 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
829 goto out;
830
831 if (num_vfs > pf->num_req_vfs) {
832 err = -EPERM;
833 goto err_out;
834 }
835
836 err = i40e_alloc_vfs(pf, num_vfs);
837 if (err) {
838 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
839 goto err_out;
840 }
841
842out:
843 return num_vfs;
844
845err_out:
846 return err;
847#endif
848 return 0;
849}
850
851/**
852 * i40e_pci_sriov_configure
853 * @pdev: pointer to a pci_dev structure
854 * @num_vfs: number of vfs to allocate
855 *
856 * Enable or change the number of VFs. Called when the user updates the number
857 * of VFs in sysfs.
858 **/
859int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
860{
861 struct i40e_pf *pf = pci_get_drvdata(pdev);
862
863 if (num_vfs)
864 return i40e_pci_sriov_enable(pdev, num_vfs);
865
866 i40e_free_vfs(pf);
867 return 0;
868}
869
870/***********************virtual channel routines******************/
871
872/**
873 * i40e_vc_send_msg_to_vf
874 * @vf: pointer to the vf info
875 * @v_opcode: virtual channel opcode
876 * @v_retval: virtual channel return value
877 * @msg: pointer to the msg buffer
878 * @msglen: msg length
879 *
880 * send msg to vf
881 **/
882static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
883 u32 v_retval, u8 *msg, u16 msglen)
884{
885 struct i40e_pf *pf = vf->pf;
886 struct i40e_hw *hw = &pf->hw;
887 i40e_status aq_ret;
888
889 /* single place to detect unsuccessful return values */
890 if (v_retval) {
891 vf->num_invalid_msgs++;
892 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
893 v_opcode, v_retval);
894 if (vf->num_invalid_msgs >
895 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
896 dev_err(&pf->pdev->dev,
897 "Number of invalid messages exceeded for VF %d\n",
898 vf->vf_id);
899 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
900 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
901 }
902 } else {
903 vf->num_valid_msgs++;
904 }
905
906 aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
907 msg, msglen, NULL);
908 if (aq_ret) {
909 dev_err(&pf->pdev->dev,
910 "Unable to send the message to VF %d aq_err %d\n",
911 vf->vf_id, pf->hw.aq.asq_last_status);
912 return -EIO;
913 }
914
915 return 0;
916}
917
918/**
919 * i40e_vc_send_resp_to_vf
920 * @vf: pointer to the vf info
921 * @opcode: operation code
922 * @retval: return value
923 *
924 * send resp msg to vf
925 **/
926static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
927 enum i40e_virtchnl_ops opcode,
928 i40e_status retval)
929{
930 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
931}
932
933/**
934 * i40e_vc_get_version_msg
935 * @vf: pointer to the vf info
936 *
937 * called from the vf to request the API version used by the PF
938 **/
939static int i40e_vc_get_version_msg(struct i40e_vf *vf)
940{
941 struct i40e_virtchnl_version_info info = {
942 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
943 };
944
945 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
946 I40E_SUCCESS, (u8 *)&info,
947 sizeof(struct
948 i40e_virtchnl_version_info));
949}
950
951/**
952 * i40e_vc_get_vf_resources_msg
953 * @vf: pointer to the vf info
954 * @msg: pointer to the msg buffer
955 * @msglen: msg length
956 *
957 * called from the vf to request its resources
958 **/
959static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
960{
961 struct i40e_virtchnl_vf_resource *vfres = NULL;
962 struct i40e_pf *pf = vf->pf;
963 i40e_status aq_ret = 0;
964 struct i40e_vsi *vsi;
965 int i = 0, len = 0;
966 int num_vsis = 1;
967 int ret;
968
969 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
970 aq_ret = I40E_ERR_PARAM;
971 goto err;
972 }
973
974 len = (sizeof(struct i40e_virtchnl_vf_resource) +
975 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
976
977 vfres = kzalloc(len, GFP_KERNEL);
978 if (!vfres) {
979 aq_ret = I40E_ERR_NO_MEMORY;
980 len = 0;
981 goto err;
982 }
983
984 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
985 vsi = pf->vsi[vf->lan_vsi_index];
986 if (!vsi->info.pvid)
987 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
988
989 vfres->num_vsis = num_vsis;
990 vfres->num_queue_pairs = vf->num_queue_pairs;
991 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
992 if (vf->lan_vsi_index) {
993 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
994 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
995 vfres->vsi_res[i].num_queue_pairs =
996 pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
997 memcpy(vfres->vsi_res[i].default_mac_addr,
998 vf->default_lan_addr.addr, ETH_ALEN);
999 i++;
1000 }
1001 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1002
1003err:
1004 /* send the response back to the vf */
1005 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1006 aq_ret, (u8 *)vfres, len);
1007
1008 kfree(vfres);
1009 return ret;
1010}
1011
1012/**
1013 * i40e_vc_reset_vf_msg
1014 * @vf: pointer to the vf info
1015 * @msg: pointer to the msg buffer
1016 * @msglen: msg length
1017 *
1018 * called from the vf to reset itself,
1019 * unlike other virtchnl messages, pf driver
1020 * doesn't send the response back to the vf
1021 **/
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001022static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001023{
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001024 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1025 i40e_reset_vf(vf, false);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001026}
1027
1028/**
1029 * i40e_vc_config_promiscuous_mode_msg
1030 * @vf: pointer to the vf info
1031 * @msg: pointer to the msg buffer
1032 * @msglen: msg length
1033 *
1034 * called from the vf to configure the promiscuous mode of
1035 * vf vsis
1036 **/
1037static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1038 u8 *msg, u16 msglen)
1039{
1040 struct i40e_virtchnl_promisc_info *info =
1041 (struct i40e_virtchnl_promisc_info *)msg;
1042 struct i40e_pf *pf = vf->pf;
1043 struct i40e_hw *hw = &pf->hw;
1044 bool allmulti = false;
1045 bool promisc = false;
1046 i40e_status aq_ret;
1047
1048 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1049 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1050 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1051 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1052 aq_ret = I40E_ERR_PARAM;
1053 goto error_param;
1054 }
1055
1056 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1057 promisc = true;
1058 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1059 promisc, NULL);
1060 if (aq_ret)
1061 goto error_param;
1062
1063 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1064 allmulti = true;
1065 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1066 allmulti, NULL);
1067
1068error_param:
1069 /* send the response to the vf */
1070 return i40e_vc_send_resp_to_vf(vf,
1071 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1072 aq_ret);
1073}
1074
1075/**
1076 * i40e_vc_config_queues_msg
1077 * @vf: pointer to the vf info
1078 * @msg: pointer to the msg buffer
1079 * @msglen: msg length
1080 *
1081 * called from the vf to configure the rx/tx
1082 * queues
1083 **/
1084static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1085{
1086 struct i40e_virtchnl_vsi_queue_config_info *qci =
1087 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1088 struct i40e_virtchnl_queue_pair_info *qpi;
1089 u16 vsi_id, vsi_queue_id;
1090 i40e_status aq_ret = 0;
1091 int i;
1092
1093 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1094 aq_ret = I40E_ERR_PARAM;
1095 goto error_param;
1096 }
1097
1098 vsi_id = qci->vsi_id;
1099 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1100 aq_ret = I40E_ERR_PARAM;
1101 goto error_param;
1102 }
1103 for (i = 0; i < qci->num_queue_pairs; i++) {
1104 qpi = &qci->qpair[i];
1105 vsi_queue_id = qpi->txq.queue_id;
1106 if ((qpi->txq.vsi_id != vsi_id) ||
1107 (qpi->rxq.vsi_id != vsi_id) ||
1108 (qpi->rxq.queue_id != vsi_queue_id) ||
1109 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1110 aq_ret = I40E_ERR_PARAM;
1111 goto error_param;
1112 }
1113
1114 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1115 &qpi->rxq) ||
1116 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1117 &qpi->txq)) {
1118 aq_ret = I40E_ERR_PARAM;
1119 goto error_param;
1120 }
1121 }
1122
1123error_param:
1124 /* send the response to the vf */
1125 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1126 aq_ret);
1127}
1128
1129/**
1130 * i40e_vc_config_irq_map_msg
1131 * @vf: pointer to the vf info
1132 * @msg: pointer to the msg buffer
1133 * @msglen: msg length
1134 *
1135 * called from the vf to configure the irq to
1136 * queue map
1137 **/
1138static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1139{
1140 struct i40e_virtchnl_irq_map_info *irqmap_info =
1141 (struct i40e_virtchnl_irq_map_info *)msg;
1142 struct i40e_virtchnl_vector_map *map;
1143 u16 vsi_id, vsi_queue_id, vector_id;
1144 i40e_status aq_ret = 0;
1145 unsigned long tempmap;
1146 int i;
1147
1148 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1149 aq_ret = I40E_ERR_PARAM;
1150 goto error_param;
1151 }
1152
1153 for (i = 0; i < irqmap_info->num_vectors; i++) {
1154 map = &irqmap_info->vecmap[i];
1155
1156 vector_id = map->vector_id;
1157 vsi_id = map->vsi_id;
1158 /* validate msg params */
1159 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1160 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1161 aq_ret = I40E_ERR_PARAM;
1162 goto error_param;
1163 }
1164
1165 /* lookout for the invalid queue index */
1166 tempmap = map->rxq_map;
Wei Yongjun48366502013-09-24 05:17:36 +00001167 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001168 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1169 vsi_queue_id)) {
1170 aq_ret = I40E_ERR_PARAM;
1171 goto error_param;
1172 }
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001173 }
1174
1175 tempmap = map->txq_map;
Wei Yongjun48366502013-09-24 05:17:36 +00001176 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001177 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1178 vsi_queue_id)) {
1179 aq_ret = I40E_ERR_PARAM;
1180 goto error_param;
1181 }
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001182 }
1183
1184 i40e_config_irq_link_list(vf, vsi_id, map);
1185 }
1186error_param:
1187 /* send the response to the vf */
1188 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1189 aq_ret);
1190}
1191
1192/**
1193 * i40e_vc_enable_queues_msg
1194 * @vf: pointer to the vf info
1195 * @msg: pointer to the msg buffer
1196 * @msglen: msg length
1197 *
1198 * called from the vf to enable all or specific queue(s)
1199 **/
1200static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1201{
1202 struct i40e_virtchnl_queue_select *vqs =
1203 (struct i40e_virtchnl_queue_select *)msg;
1204 struct i40e_pf *pf = vf->pf;
1205 u16 vsi_id = vqs->vsi_id;
1206 i40e_status aq_ret = 0;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001207
1208 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1209 aq_ret = I40E_ERR_PARAM;
1210 goto error_param;
1211 }
1212
1213 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1214 aq_ret = I40E_ERR_PARAM;
1215 goto error_param;
1216 }
1217
1218 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1219 aq_ret = I40E_ERR_PARAM;
1220 goto error_param;
1221 }
Mitch Williams88f65632013-11-28 06:39:28 +00001222 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
1223 aq_ret = I40E_ERR_TIMEOUT;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001224error_param:
1225 /* send the response to the vf */
1226 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1227 aq_ret);
1228}
1229
1230/**
1231 * i40e_vc_disable_queues_msg
1232 * @vf: pointer to the vf info
1233 * @msg: pointer to the msg buffer
1234 * @msglen: msg length
1235 *
1236 * called from the vf to disable all or specific
1237 * queue(s)
1238 **/
1239static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1240{
1241 struct i40e_virtchnl_queue_select *vqs =
1242 (struct i40e_virtchnl_queue_select *)msg;
1243 struct i40e_pf *pf = vf->pf;
1244 u16 vsi_id = vqs->vsi_id;
1245 i40e_status aq_ret = 0;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001246
1247 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1248 aq_ret = I40E_ERR_PARAM;
1249 goto error_param;
1250 }
1251
1252 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1253 aq_ret = I40E_ERR_PARAM;
1254 goto error_param;
1255 }
1256
1257 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1258 aq_ret = I40E_ERR_PARAM;
1259 goto error_param;
1260 }
Mitch Williams88f65632013-11-28 06:39:28 +00001261 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
1262 aq_ret = I40E_ERR_TIMEOUT;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001263
1264error_param:
1265 /* send the response to the vf */
1266 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1267 aq_ret);
1268}
1269
1270/**
1271 * i40e_vc_get_stats_msg
1272 * @vf: pointer to the vf info
1273 * @msg: pointer to the msg buffer
1274 * @msglen: msg length
1275 *
1276 * called from the vf to get vsi stats
1277 **/
1278static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1279{
1280 struct i40e_virtchnl_queue_select *vqs =
1281 (struct i40e_virtchnl_queue_select *)msg;
1282 struct i40e_pf *pf = vf->pf;
1283 struct i40e_eth_stats stats;
1284 i40e_status aq_ret = 0;
1285 struct i40e_vsi *vsi;
1286
1287 memset(&stats, 0, sizeof(struct i40e_eth_stats));
1288
1289 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1290 aq_ret = I40E_ERR_PARAM;
1291 goto error_param;
1292 }
1293
1294 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1295 aq_ret = I40E_ERR_PARAM;
1296 goto error_param;
1297 }
1298
1299 vsi = pf->vsi[vqs->vsi_id];
1300 if (!vsi) {
1301 aq_ret = I40E_ERR_PARAM;
1302 goto error_param;
1303 }
1304 i40e_update_eth_stats(vsi);
1305 memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
1306
1307error_param:
1308 /* send the response back to the vf */
1309 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1310 (u8 *)&stats, sizeof(stats));
1311}
1312
1313/**
1314 * i40e_vc_add_mac_addr_msg
1315 * @vf: pointer to the vf info
1316 * @msg: pointer to the msg buffer
1317 * @msglen: msg length
1318 *
1319 * add guest mac address filter
1320 **/
1321static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1322{
1323 struct i40e_virtchnl_ether_addr_list *al =
1324 (struct i40e_virtchnl_ether_addr_list *)msg;
1325 struct i40e_pf *pf = vf->pf;
1326 struct i40e_vsi *vsi = NULL;
1327 u16 vsi_id = al->vsi_id;
1328 i40e_status aq_ret = 0;
1329 int i;
1330
1331 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1332 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1333 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1334 aq_ret = I40E_ERR_PARAM;
1335 goto error_param;
1336 }
1337
1338 for (i = 0; i < al->num_elements; i++) {
1339 if (is_broadcast_ether_addr(al->list[i].addr) ||
1340 is_zero_ether_addr(al->list[i].addr)) {
1341 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
1342 al->list[i].addr);
1343 aq_ret = I40E_ERR_PARAM;
1344 goto error_param;
1345 }
1346 }
1347 vsi = pf->vsi[vsi_id];
1348
1349 /* add new addresses to the list */
1350 for (i = 0; i < al->num_elements; i++) {
1351 struct i40e_mac_filter *f;
1352
1353 f = i40e_find_mac(vsi, al->list[i].addr, true, false);
Mitch Williams7e68edf92013-11-16 10:00:41 +00001354 if (!f) {
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001355 if (i40e_is_vsi_in_vlan(vsi))
1356 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1357 true, false);
1358 else
1359 f = i40e_add_filter(vsi, al->list[i].addr, -1,
1360 true, false);
1361 }
1362
1363 if (!f) {
1364 dev_err(&pf->pdev->dev,
1365 "Unable to add VF MAC filter\n");
1366 aq_ret = I40E_ERR_PARAM;
1367 goto error_param;
1368 }
1369 }
1370
1371 /* program the updated filter list */
1372 if (i40e_sync_vsi_filters(vsi))
1373 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1374
1375error_param:
1376 /* send the response to the vf */
1377 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1378 aq_ret);
1379}
1380
1381/**
1382 * i40e_vc_del_mac_addr_msg
1383 * @vf: pointer to the vf info
1384 * @msg: pointer to the msg buffer
1385 * @msglen: msg length
1386 *
1387 * remove guest mac address filter
1388 **/
1389static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1390{
1391 struct i40e_virtchnl_ether_addr_list *al =
1392 (struct i40e_virtchnl_ether_addr_list *)msg;
1393 struct i40e_pf *pf = vf->pf;
1394 struct i40e_vsi *vsi = NULL;
1395 u16 vsi_id = al->vsi_id;
1396 i40e_status aq_ret = 0;
1397 int i;
1398
1399 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1400 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1401 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1402 aq_ret = I40E_ERR_PARAM;
1403 goto error_param;
1404 }
1405 vsi = pf->vsi[vsi_id];
1406
1407 /* delete addresses from the list */
1408 for (i = 0; i < al->num_elements; i++)
1409 i40e_del_filter(vsi, al->list[i].addr,
1410 I40E_VLAN_ANY, true, false);
1411
1412 /* program the updated filter list */
1413 if (i40e_sync_vsi_filters(vsi))
1414 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1415
1416error_param:
1417 /* send the response to the vf */
1418 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1419 aq_ret);
1420}
1421
1422/**
1423 * i40e_vc_add_vlan_msg
1424 * @vf: pointer to the vf info
1425 * @msg: pointer to the msg buffer
1426 * @msglen: msg length
1427 *
1428 * program guest vlan id
1429 **/
1430static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1431{
1432 struct i40e_virtchnl_vlan_filter_list *vfl =
1433 (struct i40e_virtchnl_vlan_filter_list *)msg;
1434 struct i40e_pf *pf = vf->pf;
1435 struct i40e_vsi *vsi = NULL;
1436 u16 vsi_id = vfl->vsi_id;
1437 i40e_status aq_ret = 0;
1438 int i;
1439
1440 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1441 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1442 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1443 aq_ret = I40E_ERR_PARAM;
1444 goto error_param;
1445 }
1446
1447 for (i = 0; i < vfl->num_elements; i++) {
1448 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1449 aq_ret = I40E_ERR_PARAM;
1450 dev_err(&pf->pdev->dev,
1451 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1452 goto error_param;
1453 }
1454 }
1455 vsi = pf->vsi[vsi_id];
1456 if (vsi->info.pvid) {
1457 aq_ret = I40E_ERR_PARAM;
1458 goto error_param;
1459 }
1460
1461 i40e_vlan_stripping_enable(vsi);
1462 for (i = 0; i < vfl->num_elements; i++) {
1463 /* add new VLAN filter */
1464 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1465 if (ret)
1466 dev_err(&pf->pdev->dev,
1467 "Unable to add VF vlan filter %d, error %d\n",
1468 vfl->vlan_id[i], ret);
1469 }
1470
1471error_param:
1472 /* send the response to the vf */
1473 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1474}
1475
1476/**
1477 * i40e_vc_remove_vlan_msg
1478 * @vf: pointer to the vf info
1479 * @msg: pointer to the msg buffer
1480 * @msglen: msg length
1481 *
1482 * remove programmed guest vlan id
1483 **/
1484static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1485{
1486 struct i40e_virtchnl_vlan_filter_list *vfl =
1487 (struct i40e_virtchnl_vlan_filter_list *)msg;
1488 struct i40e_pf *pf = vf->pf;
1489 struct i40e_vsi *vsi = NULL;
1490 u16 vsi_id = vfl->vsi_id;
1491 i40e_status aq_ret = 0;
1492 int i;
1493
1494 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1495 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1496 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1497 aq_ret = I40E_ERR_PARAM;
1498 goto error_param;
1499 }
1500
1501 for (i = 0; i < vfl->num_elements; i++) {
1502 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1503 aq_ret = I40E_ERR_PARAM;
1504 goto error_param;
1505 }
1506 }
1507
1508 vsi = pf->vsi[vsi_id];
1509 if (vsi->info.pvid) {
1510 aq_ret = I40E_ERR_PARAM;
1511 goto error_param;
1512 }
1513
1514 for (i = 0; i < vfl->num_elements; i++) {
1515 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1516 if (ret)
1517 dev_err(&pf->pdev->dev,
1518 "Unable to delete VF vlan filter %d, error %d\n",
1519 vfl->vlan_id[i], ret);
1520 }
1521
1522error_param:
1523 /* send the response to the vf */
1524 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1525}
1526
1527/**
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001528 * i40e_vc_validate_vf_msg
1529 * @vf: pointer to the vf info
1530 * @msg: pointer to the msg buffer
1531 * @msglen: msg length
1532 * @msghndl: msg handle
1533 *
1534 * validate msg
1535 **/
1536static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1537 u32 v_retval, u8 *msg, u16 msglen)
1538{
1539 bool err_msg_format = false;
1540 int valid_len;
1541
1542 /* Check if VF is disabled. */
1543 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1544 return I40E_ERR_PARAM;
1545
1546 /* Validate message length. */
1547 switch (v_opcode) {
1548 case I40E_VIRTCHNL_OP_VERSION:
1549 valid_len = sizeof(struct i40e_virtchnl_version_info);
1550 break;
1551 case I40E_VIRTCHNL_OP_RESET_VF:
1552 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1553 valid_len = 0;
1554 break;
1555 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1556 valid_len = sizeof(struct i40e_virtchnl_txq_info);
1557 break;
1558 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1559 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1560 break;
1561 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1562 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1563 if (msglen >= valid_len) {
1564 struct i40e_virtchnl_vsi_queue_config_info *vqc =
1565 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1566 valid_len += (vqc->num_queue_pairs *
1567 sizeof(struct
1568 i40e_virtchnl_queue_pair_info));
1569 if (vqc->num_queue_pairs == 0)
1570 err_msg_format = true;
1571 }
1572 break;
1573 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1574 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1575 if (msglen >= valid_len) {
1576 struct i40e_virtchnl_irq_map_info *vimi =
1577 (struct i40e_virtchnl_irq_map_info *)msg;
1578 valid_len += (vimi->num_vectors *
1579 sizeof(struct i40e_virtchnl_vector_map));
1580 if (vimi->num_vectors == 0)
1581 err_msg_format = true;
1582 }
1583 break;
1584 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1585 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1586 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1587 break;
1588 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1589 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1590 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1591 if (msglen >= valid_len) {
1592 struct i40e_virtchnl_ether_addr_list *veal =
1593 (struct i40e_virtchnl_ether_addr_list *)msg;
1594 valid_len += veal->num_elements *
1595 sizeof(struct i40e_virtchnl_ether_addr);
1596 if (veal->num_elements == 0)
1597 err_msg_format = true;
1598 }
1599 break;
1600 case I40E_VIRTCHNL_OP_ADD_VLAN:
1601 case I40E_VIRTCHNL_OP_DEL_VLAN:
1602 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1603 if (msglen >= valid_len) {
1604 struct i40e_virtchnl_vlan_filter_list *vfl =
1605 (struct i40e_virtchnl_vlan_filter_list *)msg;
1606 valid_len += vfl->num_elements * sizeof(u16);
1607 if (vfl->num_elements == 0)
1608 err_msg_format = true;
1609 }
1610 break;
1611 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1612 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1613 break;
1614 case I40E_VIRTCHNL_OP_GET_STATS:
1615 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1616 break;
1617 /* These are always errors coming from the VF. */
1618 case I40E_VIRTCHNL_OP_EVENT:
1619 case I40E_VIRTCHNL_OP_UNKNOWN:
1620 default:
1621 return -EPERM;
1622 break;
1623 }
1624 /* few more checks */
1625 if ((valid_len != msglen) || (err_msg_format)) {
1626 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1627 return -EINVAL;
1628 } else {
1629 return 0;
1630 }
1631}
1632
1633/**
1634 * i40e_vc_process_vf_msg
1635 * @pf: pointer to the pf structure
1636 * @vf_id: source vf id
1637 * @msg: pointer to the msg buffer
1638 * @msglen: msg length
1639 * @msghndl: msg handle
1640 *
1641 * called from the common aeq/arq handler to
1642 * process request from vf
1643 **/
1644int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1645 u32 v_retval, u8 *msg, u16 msglen)
1646{
1647 struct i40e_vf *vf = &(pf->vf[vf_id]);
1648 struct i40e_hw *hw = &pf->hw;
1649 int ret;
1650
1651 pf->vf_aq_requests++;
1652 /* perform basic checks on the msg */
1653 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1654
1655 if (ret) {
1656 dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
1657 return ret;
1658 }
1659 wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
1660 switch (v_opcode) {
1661 case I40E_VIRTCHNL_OP_VERSION:
1662 ret = i40e_vc_get_version_msg(vf);
1663 break;
1664 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1665 ret = i40e_vc_get_vf_resources_msg(vf);
1666 break;
1667 case I40E_VIRTCHNL_OP_RESET_VF:
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001668 i40e_vc_reset_vf_msg(vf);
1669 ret = 0;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001670 break;
1671 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1672 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1673 break;
1674 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1675 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1676 break;
1677 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1678 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1679 break;
1680 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1681 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1682 break;
1683 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1684 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1685 break;
1686 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1687 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1688 break;
1689 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1690 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1691 break;
1692 case I40E_VIRTCHNL_OP_ADD_VLAN:
1693 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1694 break;
1695 case I40E_VIRTCHNL_OP_DEL_VLAN:
1696 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1697 break;
1698 case I40E_VIRTCHNL_OP_GET_STATS:
1699 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1700 break;
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001701 case I40E_VIRTCHNL_OP_UNKNOWN:
1702 default:
1703 dev_err(&pf->pdev->dev,
1704 "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
1705 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1706 I40E_ERR_NOT_IMPLEMENTED);
1707 break;
1708 }
1709
1710 return ret;
1711}
1712
1713/**
1714 * i40e_vc_process_vflr_event
1715 * @pf: pointer to the pf structure
1716 *
1717 * called from the vlfr irq handler to
1718 * free up vf resources and state variables
1719 **/
1720int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1721{
1722 u32 reg, reg_idx, bit_idx, vf_id;
1723 struct i40e_hw *hw = &pf->hw;
1724 struct i40e_vf *vf;
1725
1726 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1727 return 0;
1728
1729 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1730 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1731 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1732 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1733 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1734 vf = &pf->vf[vf_id];
1735 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1736 if (reg & (1 << bit_idx)) {
1737 /* clear the bit in GLGEN_VFLRSTAT */
1738 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1739
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00001740 i40e_reset_vf(vf, true);
Jesse Brandeburg5c3c48a2013-09-11 08:40:07 +00001741 }
1742 }
1743
1744 /* re-enable vflr interrupt cause */
1745 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1746 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1747 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1748 i40e_flush(hw);
1749
1750 return 0;
1751}
1752
1753/**
1754 * i40e_vc_vf_broadcast
1755 * @pf: pointer to the pf structure
1756 * @opcode: operation code
1757 * @retval: return value
1758 * @msg: pointer to the msg buffer
1759 * @msglen: msg length
1760 *
1761 * send a message to all VFs on a given PF
1762 **/
1763static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1764 enum i40e_virtchnl_ops v_opcode,
1765 i40e_status v_retval, u8 *msg,
1766 u16 msglen)
1767{
1768 struct i40e_hw *hw = &pf->hw;
1769 struct i40e_vf *vf = pf->vf;
1770 int i;
1771
1772 for (i = 0; i < pf->num_alloc_vfs; i++) {
1773 /* Ignore return value on purpose - a given VF may fail, but
1774 * we need to keep going and send to all of them
1775 */
1776 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1777 msg, msglen, NULL);
1778 vf++;
1779 }
1780}
1781
1782/**
1783 * i40e_vc_notify_link_state
1784 * @pf: pointer to the pf structure
1785 *
1786 * send a link status message to all VFs on a given PF
1787 **/
1788void i40e_vc_notify_link_state(struct i40e_pf *pf)
1789{
1790 struct i40e_virtchnl_pf_event pfe;
1791
1792 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1793 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1794 pfe.event_data.link_event.link_status =
1795 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
1796 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
1797
1798 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1799 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1800}
1801
1802/**
1803 * i40e_vc_notify_reset
1804 * @pf: pointer to the pf structure
1805 *
1806 * indicate a pending reset to all VFs on a given PF
1807 **/
1808void i40e_vc_notify_reset(struct i40e_pf *pf)
1809{
1810 struct i40e_virtchnl_pf_event pfe;
1811
1812 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1813 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1814 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1815 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1816}
1817
1818/**
1819 * i40e_vc_notify_vf_reset
1820 * @vf: pointer to the vf structure
1821 *
1822 * indicate a pending reset to the given VF
1823 **/
1824void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
1825{
1826 struct i40e_virtchnl_pf_event pfe;
1827
1828 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1829 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1830 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1831 I40E_SUCCESS, (u8 *)&pfe,
1832 sizeof(struct i40e_virtchnl_pf_event), NULL);
1833}
1834
1835/**
1836 * i40e_ndo_set_vf_mac
1837 * @netdev: network interface device structure
1838 * @vf_id: vf identifier
1839 * @mac: mac address
1840 *
1841 * program vf mac address
1842 **/
1843int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1844{
1845 struct i40e_netdev_priv *np = netdev_priv(netdev);
1846 struct i40e_vsi *vsi = np->vsi;
1847 struct i40e_pf *pf = vsi->back;
1848 struct i40e_mac_filter *f;
1849 struct i40e_vf *vf;
1850 int ret = 0;
1851
1852 /* validate the request */
1853 if (vf_id >= pf->num_alloc_vfs) {
1854 dev_err(&pf->pdev->dev,
1855 "Invalid VF Identifier %d\n", vf_id);
1856 ret = -EINVAL;
1857 goto error_param;
1858 }
1859
1860 vf = &(pf->vf[vf_id]);
1861 vsi = pf->vsi[vf->lan_vsi_index];
1862 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1863 dev_err(&pf->pdev->dev,
1864 "Uninitialized VF %d\n", vf_id);
1865 ret = -EINVAL;
1866 goto error_param;
1867 }
1868
1869 if (!is_valid_ether_addr(mac)) {
1870 dev_err(&pf->pdev->dev,
1871 "Invalid VF ethernet address\n");
1872 ret = -EINVAL;
1873 goto error_param;
1874 }
1875
1876 /* delete the temporary mac address */
1877 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
1878
1879 /* add the new mac address */
1880 f = i40e_add_filter(vsi, mac, 0, true, false);
1881 if (!f) {
1882 dev_err(&pf->pdev->dev,
1883 "Unable to add VF ucast filter\n");
1884 ret = -ENOMEM;
1885 goto error_param;
1886 }
1887
1888 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
1889 /* program mac filter */
1890 if (i40e_sync_vsi_filters(vsi)) {
1891 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
1892 ret = -EIO;
1893 goto error_param;
1894 }
1895 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
1896 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
1897 ret = 0;
1898
1899error_param:
1900 return ret;
1901}
1902
1903/**
1904 * i40e_ndo_set_vf_port_vlan
1905 * @netdev: network interface device structure
1906 * @vf_id: vf identifier
1907 * @vlan_id: mac address
1908 * @qos: priority setting
1909 *
1910 * program vf vlan id and/or qos
1911 **/
1912int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
1913 int vf_id, u16 vlan_id, u8 qos)
1914{
1915 struct i40e_netdev_priv *np = netdev_priv(netdev);
1916 struct i40e_pf *pf = np->vsi->back;
1917 struct i40e_vsi *vsi;
1918 struct i40e_vf *vf;
1919 int ret = 0;
1920
1921 /* validate the request */
1922 if (vf_id >= pf->num_alloc_vfs) {
1923 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
1924 ret = -EINVAL;
1925 goto error_pvid;
1926 }
1927
1928 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
1929 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
1930 ret = -EINVAL;
1931 goto error_pvid;
1932 }
1933
1934 vf = &(pf->vf[vf_id]);
1935 vsi = pf->vsi[vf->lan_vsi_index];
1936 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1937 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
1938 ret = -EINVAL;
1939 goto error_pvid;
1940 }
1941
1942 if (vsi->info.pvid) {
1943 /* kill old VLAN */
1944 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
1945 VLAN_VID_MASK));
1946 if (ret) {
1947 dev_info(&vsi->back->pdev->dev,
1948 "remove VLAN failed, ret=%d, aq_err=%d\n",
1949 ret, pf->hw.aq.asq_last_status);
1950 }
1951 }
1952 if (vlan_id || qos)
1953 ret = i40e_vsi_add_pvid(vsi,
1954 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
1955 else
1956 i40e_vlan_stripping_disable(vsi);
1957
1958 if (vlan_id) {
1959 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
1960 vlan_id, qos, vf_id);
1961
1962 /* add new VLAN filter */
1963 ret = i40e_vsi_add_vlan(vsi, vlan_id);
1964 if (ret) {
1965 dev_info(&vsi->back->pdev->dev,
1966 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
1967 vsi->back->hw.aq.asq_last_status);
1968 goto error_pvid;
1969 }
1970 }
1971
1972 if (ret) {
1973 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
1974 goto error_pvid;
1975 }
1976 ret = 0;
1977
1978error_pvid:
1979 return ret;
1980}
1981
1982/**
1983 * i40e_ndo_set_vf_bw
1984 * @netdev: network interface device structure
1985 * @vf_id: vf identifier
1986 * @tx_rate: tx rate
1987 *
1988 * configure vf tx rate
1989 **/
1990int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
1991{
1992 return -EOPNOTSUPP;
1993}
1994
1995/**
1996 * i40e_ndo_get_vf_config
1997 * @netdev: network interface device structure
1998 * @vf_id: vf identifier
1999 * @ivi: vf configuration structure
2000 *
2001 * return vf configuration
2002 **/
2003int i40e_ndo_get_vf_config(struct net_device *netdev,
2004 int vf_id, struct ifla_vf_info *ivi)
2005{
2006 struct i40e_netdev_priv *np = netdev_priv(netdev);
2007 struct i40e_mac_filter *f, *ftmp;
2008 struct i40e_vsi *vsi = np->vsi;
2009 struct i40e_pf *pf = vsi->back;
2010 struct i40e_vf *vf;
2011 int ret = 0;
2012
2013 /* validate the request */
2014 if (vf_id >= pf->num_alloc_vfs) {
2015 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2016 ret = -EINVAL;
2017 goto error_param;
2018 }
2019
2020 vf = &(pf->vf[vf_id]);
2021 /* first vsi is always the LAN vsi */
2022 vsi = pf->vsi[vf->lan_vsi_index];
2023 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2024 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2025 ret = -EINVAL;
2026 goto error_param;
2027 }
2028
2029 ivi->vf = vf_id;
2030
2031 /* first entry of the list is the default ethernet address */
2032 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2033 memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
2034 break;
2035 }
2036
2037 ivi->tx_rate = 0;
2038 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2039 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2040 I40E_VLAN_PRIORITY_SHIFT;
2041 ret = 0;
2042
2043error_param:
2044 return ret;
2045}