blob: d49b53dc2a500a2602093a2df63daba5c5ad2538 [file] [log] [blame]
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9#include <linux/pci.h>
10#include <linux/module.h>
11#include "net_driver.h"
12#include "efx.h"
13#include "nic.h"
14#include "io.h"
15#include "mcdi.h"
16#include "filter.h"
17#include "mcdi_pcol.h"
18#include "regs.h"
19#include "vfdi.h"
20
21/* Number of longs required to track all the VIs in a VF */
22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
23
Ben Hutchings45078372012-09-19 02:53:34 +010024/* Maximum number of RX queues supported */
25#define VF_MAX_RX_QUEUES 63
26
Ben Hutchingscd2d5b52012-02-14 00:48:07 +000027/**
28 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
29 * @VF_TX_FILTER_OFF: Disabled
30 * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
31 * 2 TX queues allowed per VF.
32 * @VF_TX_FILTER_ON: Enabled
33 */
34enum efx_vf_tx_filter_mode {
35 VF_TX_FILTER_OFF,
36 VF_TX_FILTER_AUTO,
37 VF_TX_FILTER_ON,
38};
39
40/**
41 * struct efx_vf - Back-end resource and protocol state for a PCI VF
42 * @efx: The Efx NIC owning this VF
43 * @pci_rid: The PCI requester ID for this VF
44 * @pci_name: The PCI name (formatted address) of this VF
45 * @index: Index of VF within its port and PF.
46 * @req: VFDI incoming request work item. Incoming USR_EV events are received
47 * by the NAPI handler, but must be handled by executing MCDI requests
48 * inside a work item.
49 * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
50 * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
51 * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
52 * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
53 * @status_lock
54 * @busy: VFDI request queued to be processed or being processed. Receiving
55 * a VFDI request when @busy is set is an error condition.
56 * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
57 * @buftbl_base: Buffer table entries for this VF start at this index.
58 * @rx_filtering: Receive filtering has been requested by the VF driver.
59 * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
60 * @rx_filter_qid: VF relative qid for RX filter requested by VF.
61 * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
62 * @tx_filter_mode: Transmit MAC filtering mode.
63 * @tx_filter_id: Transmit MAC filter ID.
64 * @addr: The MAC address and outer vlan tag of the VF.
65 * @status_addr: VF DMA address of page for &struct vfdi_status updates.
66 * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
67 * @peer_page_addrs and @peer_page_count from simultaneous
68 * updates by the VM and consumption by
69 * efx_sriov_update_vf_addr()
70 * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
71 * @peer_page_count: Number of entries in @peer_page_count.
72 * @evq0_addrs: Array of guest pages backing evq0.
73 * @evq0_count: Number of entries in @evq0_addrs.
74 * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
75 * to wait for flush completions.
76 * @txq_lock: Mutex for TX queue allocation.
77 * @txq_mask: Mask of initialized transmit queues.
78 * @txq_count: Number of initialized transmit queues.
79 * @rxq_mask: Mask of initialized receive queues.
80 * @rxq_count: Number of initialized receive queues.
81 * @rxq_retry_mask: Mask or receive queues that need to be flushed again
82 * due to flush failure.
83 * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
84 * @reset_work: Work item to schedule a VF reset.
85 */
86struct efx_vf {
87 struct efx_nic *efx;
88 unsigned int pci_rid;
89 char pci_name[13]; /* dddd:bb:dd.f */
90 unsigned int index;
91 struct work_struct req;
92 u64 req_addr;
93 int req_type;
94 unsigned req_seqno;
95 unsigned msg_seqno;
96 bool busy;
97 struct efx_buffer buf;
98 unsigned buftbl_base;
99 bool rx_filtering;
100 enum efx_filter_flags rx_filter_flags;
101 unsigned rx_filter_qid;
102 int rx_filter_id;
103 enum efx_vf_tx_filter_mode tx_filter_mode;
104 int tx_filter_id;
105 struct vfdi_endpoint addr;
106 u64 status_addr;
107 struct mutex status_lock;
108 u64 *peer_page_addrs;
109 unsigned peer_page_count;
110 u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
111 EFX_BUF_SIZE];
112 unsigned evq0_count;
113 wait_queue_head_t flush_waitq;
114 struct mutex txq_lock;
115 unsigned long txq_mask[VI_MASK_LENGTH];
116 unsigned txq_count;
117 unsigned long rxq_mask[VI_MASK_LENGTH];
118 unsigned rxq_count;
119 unsigned long rxq_retry_mask[VI_MASK_LENGTH];
120 atomic_t rxq_retry_count;
121 struct work_struct reset_work;
122};
123
124struct efx_memcpy_req {
125 unsigned int from_rid;
126 void *from_buf;
127 u64 from_addr;
128 unsigned int to_rid;
129 u64 to_addr;
130 unsigned length;
131};
132
133/**
134 * struct efx_local_addr - A MAC address on the vswitch without a VF.
135 *
136 * Siena does not have a switch, so VFs can't transmit data to each
137 * other. Instead the VFs must be made aware of the local addresses
138 * on the vswitch, so that they can arrange for an alternative
139 * software datapath to be used.
140 *
141 * @link: List head for insertion into efx->local_addr_list.
142 * @addr: Ethernet address
143 */
144struct efx_local_addr {
145 struct list_head link;
146 u8 addr[ETH_ALEN];
147};
148
149/**
150 * struct efx_endpoint_page - Page of vfdi_endpoint structures
151 *
152 * @link: List head for insertion into efx->local_page_list.
153 * @ptr: Pointer to page.
154 * @addr: DMA address of page.
155 */
156struct efx_endpoint_page {
157 struct list_head link;
158 void *ptr;
159 dma_addr_t addr;
160};
161
162/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
163#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
164 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
165#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
168#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
169 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
170 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
171
172#define EFX_FIELD_MASK(_field) \
173 ((1 << _field ## _WIDTH) - 1)
174
175/* VFs can only use this many transmit channels */
176static unsigned int vf_max_tx_channels = 2;
177module_param(vf_max_tx_channels, uint, 0444);
178MODULE_PARM_DESC(vf_max_tx_channels,
179 "Limit the number of TX channels VFs can use");
180
181static int max_vfs = -1;
182module_param(max_vfs, int, 0444);
183MODULE_PARM_DESC(max_vfs,
184 "Reduce the number of VFs initialized by the driver");
185
186/* Workqueue used by VFDI communication. We can't use the global
187 * workqueue because it may be running the VF driver's probe()
188 * routine, which will be blocked there waiting for a VFDI response.
189 */
190static struct workqueue_struct *vfdi_workqueue;
191
192static unsigned abs_index(struct efx_vf *vf, unsigned index)
193{
194 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
195}
196
197static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
198 unsigned *vi_scale_out, unsigned *vf_total_out)
199{
200 u8 inbuf[MC_CMD_SRIOV_IN_LEN];
201 u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
202 unsigned vi_scale, vf_total;
203 size_t outlen;
204 int rc;
205
206 MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
207 MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
208 MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
209
210 rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
211 outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
212 if (rc)
213 return rc;
214 if (outlen < MC_CMD_SRIOV_OUT_LEN)
215 return -EIO;
216
217 vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
218 vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
219 if (vi_scale > EFX_VI_SCALE_MAX)
220 return -EOPNOTSUPP;
221
222 if (vi_scale_out)
223 *vi_scale_out = vi_scale;
224 if (vf_total_out)
225 *vf_total_out = vf_total;
226
227 return 0;
228}
229
230static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
231{
232 efx_oword_t reg;
233
234 EFX_POPULATE_OWORD_2(reg,
235 FRF_CZ_USREV_DIS, enabled ? 0 : 1,
236 FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
237 efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
238}
239
240static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
241 unsigned int count)
242{
243 u8 *inbuf, *record;
244 unsigned int used;
245 u32 from_rid, from_hi, from_lo;
246 int rc;
247
248 mb(); /* Finish writing source/reading dest before DMA starts */
249
250 used = MC_CMD_MEMCPY_IN_LEN(count);
251 if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
252 return -ENOBUFS;
253
254 /* Allocate room for the largest request */
255 inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
256 if (inbuf == NULL)
257 return -ENOMEM;
258
259 record = inbuf;
260 MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
261 while (count-- > 0) {
262 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
263 req->to_rid);
264 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
265 (u32)req->to_addr);
266 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
267 (u32)(req->to_addr >> 32));
268 if (req->from_buf == NULL) {
269 from_rid = req->from_rid;
270 from_lo = (u32)req->from_addr;
271 from_hi = (u32)(req->from_addr >> 32);
272 } else {
273 if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
274 rc = -ENOBUFS;
275 goto out;
276 }
277
278 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
279 from_lo = used;
280 from_hi = 0;
281 memcpy(inbuf + used, req->from_buf, req->length);
282 used += req->length;
283 }
284
285 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
286 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
287 from_lo);
288 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
289 from_hi);
290 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
291 req->length);
292
293 ++req;
294 record += MC_CMD_MEMCPY_IN_RECORD_LEN;
295 }
296
297 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
298out:
299 kfree(inbuf);
300
301 mb(); /* Don't write source/read dest before DMA is complete */
302
303 return rc;
304}
305
306/* The TX filter is entirely controlled by this driver, and is modified
307 * underneath the feet of the VF
308 */
309static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
310{
311 struct efx_nic *efx = vf->efx;
312 struct efx_filter_spec filter;
313 u16 vlan;
314 int rc;
315
316 if (vf->tx_filter_id != -1) {
317 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
318 vf->tx_filter_id);
319 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
320 vf->pci_name, vf->tx_filter_id);
321 vf->tx_filter_id = -1;
322 }
323
324 if (is_zero_ether_addr(vf->addr.mac_addr))
325 return;
326
327 /* Turn on TX filtering automatically if not explicitly
328 * enabled or disabled.
329 */
330 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
331 vf->tx_filter_mode = VF_TX_FILTER_ON;
332
333 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
334 efx_filter_init_tx(&filter, abs_index(vf, 0));
335 rc = efx_filter_set_eth_local(&filter,
336 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
337 vf->addr.mac_addr);
338 BUG_ON(rc);
339
340 rc = efx_filter_insert_filter(efx, &filter, true);
341 if (rc < 0) {
342 netif_warn(efx, hw, efx->net_dev,
343 "Unable to migrate tx filter for vf %s\n",
344 vf->pci_name);
345 } else {
346 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
347 vf->pci_name, rc);
348 vf->tx_filter_id = rc;
349 }
350}
351
352/* The RX filter is managed here on behalf of the VF driver */
353static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
354{
355 struct efx_nic *efx = vf->efx;
356 struct efx_filter_spec filter;
357 u16 vlan;
358 int rc;
359
360 if (vf->rx_filter_id != -1) {
361 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
362 vf->rx_filter_id);
363 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
364 vf->pci_name, vf->rx_filter_id);
365 vf->rx_filter_id = -1;
366 }
367
368 if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
369 return;
370
371 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
372 efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
373 vf->rx_filter_flags,
374 abs_index(vf, vf->rx_filter_qid));
375 rc = efx_filter_set_eth_local(&filter,
376 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
377 vf->addr.mac_addr);
378 BUG_ON(rc);
379
380 rc = efx_filter_insert_filter(efx, &filter, true);
381 if (rc < 0) {
382 netif_warn(efx, hw, efx->net_dev,
383 "Unable to insert rx filter for vf %s\n",
384 vf->pci_name);
385 } else {
386 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
387 vf->pci_name, rc);
388 vf->rx_filter_id = rc;
389 }
390}
391
392static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
393{
394 efx_sriov_reset_tx_filter(vf);
395 efx_sriov_reset_rx_filter(vf);
396 queue_work(vfdi_workqueue, &vf->efx->peer_work);
397}
398
399/* Push the peer list to this VF. The caller must hold status_lock to interlock
400 * with VFDI requests, and they must be serialised against manipulation of
401 * local_page_list, either by acquiring local_lock or by running from
402 * efx_sriov_peer_work()
403 */
404static void __efx_sriov_push_vf_status(struct efx_vf *vf)
405{
406 struct efx_nic *efx = vf->efx;
407 struct vfdi_status *status = efx->vfdi_status.addr;
408 struct efx_memcpy_req copy[4];
409 struct efx_endpoint_page *epp;
410 unsigned int pos, count;
411 unsigned data_offset;
412 efx_qword_t event;
413
414 WARN_ON(!mutex_is_locked(&vf->status_lock));
415 WARN_ON(!vf->status_addr);
416
417 status->local = vf->addr;
418 status->generation_end = ++status->generation_start;
419
420 memset(copy, '\0', sizeof(copy));
421 /* Write generation_start */
422 copy[0].from_buf = &status->generation_start;
423 copy[0].to_rid = vf->pci_rid;
424 copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
425 generation_start);
426 copy[0].length = sizeof(status->generation_start);
427 /* DMA the rest of the structure (excluding the generations). This
428 * assumes that the non-generation portion of vfdi_status is in
429 * one chunk starting at the version member.
430 */
431 data_offset = offsetof(struct vfdi_status, version);
432 copy[1].from_rid = efx->pci_dev->devfn;
433 copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
434 copy[1].to_rid = vf->pci_rid;
435 copy[1].to_addr = vf->status_addr + data_offset;
436 copy[1].length = status->length - data_offset;
437
438 /* Copy the peer pages */
439 pos = 2;
440 count = 0;
441 list_for_each_entry(epp, &efx->local_page_list, link) {
442 if (count == vf->peer_page_count) {
443 /* The VF driver will know they need to provide more
444 * pages because peer_addr_count is too large.
445 */
446 break;
447 }
448 copy[pos].from_buf = NULL;
449 copy[pos].from_rid = efx->pci_dev->devfn;
450 copy[pos].from_addr = epp->addr;
451 copy[pos].to_rid = vf->pci_rid;
452 copy[pos].to_addr = vf->peer_page_addrs[count];
453 copy[pos].length = EFX_PAGE_SIZE;
454
455 if (++pos == ARRAY_SIZE(copy)) {
456 efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
457 pos = 0;
458 }
459 ++count;
460 }
461
462 /* Write generation_end */
463 copy[pos].from_buf = &status->generation_end;
464 copy[pos].to_rid = vf->pci_rid;
465 copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
466 generation_end);
467 copy[pos].length = sizeof(status->generation_end);
468 efx_sriov_memcpy(efx, copy, pos + 1);
469
470 /* Notify the guest */
471 EFX_POPULATE_QWORD_3(event,
472 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
473 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
474 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
475 ++vf->msg_seqno;
476 efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
477 &event);
478}
479
480static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
481 u64 *addr, unsigned count)
482{
483 efx_qword_t buf;
484 unsigned pos;
485
486 for (pos = 0; pos < count; ++pos) {
487 EFX_POPULATE_QWORD_3(buf,
488 FRF_AZ_BUF_ADR_REGION, 0,
489 FRF_AZ_BUF_ADR_FBUF,
490 addr ? addr[pos] >> 12 : 0,
491 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
492 efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
493 &buf, offset + pos);
494 }
495}
496
497static bool bad_vf_index(struct efx_nic *efx, unsigned index)
498{
499 return index >= efx_vf_size(efx);
500}
501
502static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
503{
504 unsigned max_buf_count = max_entry_count *
505 sizeof(efx_qword_t) / EFX_BUF_SIZE;
506
507 return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
508}
509
510/* Check that VI specified by per-port index belongs to a VF.
511 * Optionally set VF index and VI index within the VF.
512 */
513static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
514 struct efx_vf **vf_out, unsigned *rel_index_out)
515{
516 unsigned vf_i;
517
518 if (abs_index < EFX_VI_BASE)
519 return true;
Robert Stonehouse2c61c8a2012-03-02 17:20:00 +0000520 vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000521 if (vf_i >= efx->vf_init_count)
522 return true;
523
524 if (vf_out)
525 *vf_out = efx->vf + vf_i;
526 if (rel_index_out)
527 *rel_index_out = abs_index % efx_vf_size(efx);
528 return false;
529}
530
531static int efx_vfdi_init_evq(struct efx_vf *vf)
532{
533 struct efx_nic *efx = vf->efx;
534 struct vfdi_req *req = vf->buf.addr;
535 unsigned vf_evq = req->u.init_evq.index;
536 unsigned buf_count = req->u.init_evq.buf_count;
537 unsigned abs_evq = abs_index(vf, vf_evq);
538 unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
539 efx_oword_t reg;
540
541 if (bad_vf_index(efx, vf_evq) ||
542 bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
543 if (net_ratelimit())
544 netif_err(efx, hw, efx->net_dev,
545 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
546 vf->pci_name, vf_evq, buf_count);
547 return VFDI_RC_EINVAL;
548 }
549
550 efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
551
552 EFX_POPULATE_OWORD_3(reg,
553 FRF_CZ_TIMER_Q_EN, 1,
554 FRF_CZ_HOST_NOTIFY_MODE, 0,
555 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
556 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
557 EFX_POPULATE_OWORD_3(reg,
558 FRF_AZ_EVQ_EN, 1,
559 FRF_AZ_EVQ_SIZE, __ffs(buf_count),
560 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
561 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
562
563 if (vf_evq == 0) {
564 memcpy(vf->evq0_addrs, req->u.init_evq.addr,
565 buf_count * sizeof(u64));
566 vf->evq0_count = buf_count;
567 }
568
569 return VFDI_RC_SUCCESS;
570}
571
572static int efx_vfdi_init_rxq(struct efx_vf *vf)
573{
574 struct efx_nic *efx = vf->efx;
575 struct vfdi_req *req = vf->buf.addr;
576 unsigned vf_rxq = req->u.init_rxq.index;
577 unsigned vf_evq = req->u.init_rxq.evq;
578 unsigned buf_count = req->u.init_rxq.buf_count;
579 unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
580 unsigned label;
581 efx_oword_t reg;
582
583 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
Ben Hutchings45078372012-09-19 02:53:34 +0100584 vf_rxq >= VF_MAX_RX_QUEUES ||
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000585 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
586 if (net_ratelimit())
587 netif_err(efx, hw, efx->net_dev,
588 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
589 "buf_count %d\n", vf->pci_name, vf_rxq,
590 vf_evq, buf_count);
591 return VFDI_RC_EINVAL;
592 }
593 if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
594 ++vf->rxq_count;
595 efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
596
597 label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
598 EFX_POPULATE_OWORD_6(reg,
599 FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
600 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
601 FRF_AZ_RX_DESCQ_LABEL, label,
602 FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
603 FRF_AZ_RX_DESCQ_JUMBO,
604 !!(req->u.init_rxq.flags &
605 VFDI_RXQ_FLAG_SCATTER_EN),
606 FRF_AZ_RX_DESCQ_EN, 1);
607 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
608 abs_index(vf, vf_rxq));
609
610 return VFDI_RC_SUCCESS;
611}
612
613static int efx_vfdi_init_txq(struct efx_vf *vf)
614{
615 struct efx_nic *efx = vf->efx;
616 struct vfdi_req *req = vf->buf.addr;
617 unsigned vf_txq = req->u.init_txq.index;
618 unsigned vf_evq = req->u.init_txq.evq;
619 unsigned buf_count = req->u.init_txq.buf_count;
620 unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
621 unsigned label, eth_filt_en;
622 efx_oword_t reg;
623
624 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
625 vf_txq >= vf_max_tx_channels ||
626 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
627 if (net_ratelimit())
628 netif_err(efx, hw, efx->net_dev,
629 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
630 "buf_count %d\n", vf->pci_name, vf_txq,
631 vf_evq, buf_count);
632 return VFDI_RC_EINVAL;
633 }
634
635 mutex_lock(&vf->txq_lock);
636 if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
637 ++vf->txq_count;
638 mutex_unlock(&vf->txq_lock);
639 efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
640
641 eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
642
643 label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
644 EFX_POPULATE_OWORD_8(reg,
645 FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
646 FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
647 FRF_AZ_TX_DESCQ_EN, 1,
648 FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
649 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
650 FRF_AZ_TX_DESCQ_LABEL, label,
651 FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
652 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
653 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
654 abs_index(vf, vf_txq));
655
656 return VFDI_RC_SUCCESS;
657}
658
659/* Returns true when efx_vfdi_fini_all_queues should wake */
660static bool efx_vfdi_flush_wake(struct efx_vf *vf)
661{
662 /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
663 smp_mb();
664
665 return (!vf->txq_count && !vf->rxq_count) ||
666 atomic_read(&vf->rxq_retry_count);
667}
668
669static void efx_vfdi_flush_clear(struct efx_vf *vf)
670{
671 memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
672 vf->txq_count = 0;
673 memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
674 vf->rxq_count = 0;
675 memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
676 atomic_set(&vf->rxq_retry_count, 0);
677}
678
679static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
680{
681 struct efx_nic *efx = vf->efx;
682 efx_oword_t reg;
683 unsigned count = efx_vf_size(efx);
684 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
685 unsigned timeout = HZ;
686 unsigned index, rxqs_count;
687 __le32 *rxqs;
688 int rc;
689
Ben Hutchings45078372012-09-19 02:53:34 +0100690 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
691 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
692
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000693 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
694 if (rxqs == NULL)
695 return VFDI_RC_ENOMEM;
696
697 rtnl_lock();
698 if (efx->fc_disable++ == 0)
699 efx_mcdi_set_mac(efx);
700 rtnl_unlock();
701
702 /* Flush all the initialized queues */
703 rxqs_count = 0;
704 for (index = 0; index < count; ++index) {
705 if (test_bit(index, vf->txq_mask)) {
706 EFX_POPULATE_OWORD_2(reg,
707 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
708 FRF_AZ_TX_FLUSH_DESCQ,
709 vf_offset + index);
710 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
711 }
712 if (test_bit(index, vf->rxq_mask))
713 rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
714 }
715
716 atomic_set(&vf->rxq_retry_count, 0);
717 while (timeout && (vf->rxq_count || vf->txq_count)) {
718 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
719 rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
720 WARN_ON(rc < 0);
721
722 timeout = wait_event_timeout(vf->flush_waitq,
723 efx_vfdi_flush_wake(vf),
724 timeout);
725 rxqs_count = 0;
726 for (index = 0; index < count; ++index) {
727 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
728 atomic_dec(&vf->rxq_retry_count);
729 rxqs[rxqs_count++] =
730 cpu_to_le32(vf_offset + index);
731 }
732 }
733 }
734
735 rtnl_lock();
736 if (--efx->fc_disable == 0)
737 efx_mcdi_set_mac(efx);
738 rtnl_unlock();
739
740 /* Irrespective of success/failure, fini the queues */
741 EFX_ZERO_OWORD(reg);
742 for (index = 0; index < count; ++index) {
743 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
744 vf_offset + index);
745 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
746 vf_offset + index);
747 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL,
748 vf_offset + index);
749 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
750 vf_offset + index);
751 }
752 efx_sriov_bufs(efx, vf->buftbl_base, NULL,
753 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
754 kfree(rxqs);
755 efx_vfdi_flush_clear(vf);
756
757 vf->evq0_count = 0;
758
759 return timeout ? 0 : VFDI_RC_ETIMEDOUT;
760}
761
762static int efx_vfdi_insert_filter(struct efx_vf *vf)
763{
764 struct efx_nic *efx = vf->efx;
765 struct vfdi_req *req = vf->buf.addr;
766 unsigned vf_rxq = req->u.mac_filter.rxq;
767 unsigned flags;
768
769 if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
770 if (net_ratelimit())
771 netif_err(efx, hw, efx->net_dev,
772 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
773 "flags 0x%x\n", vf->pci_name, vf_rxq,
774 req->u.mac_filter.flags);
775 return VFDI_RC_EINVAL;
776 }
777
778 flags = 0;
779 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
780 flags |= EFX_FILTER_FLAG_RX_RSS;
781 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
782 flags |= EFX_FILTER_FLAG_RX_SCATTER;
783 vf->rx_filter_flags = flags;
784 vf->rx_filter_qid = vf_rxq;
785 vf->rx_filtering = true;
786
787 efx_sriov_reset_rx_filter(vf);
788 queue_work(vfdi_workqueue, &efx->peer_work);
789
790 return VFDI_RC_SUCCESS;
791}
792
793static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
794{
795 vf->rx_filtering = false;
796 efx_sriov_reset_rx_filter(vf);
797 queue_work(vfdi_workqueue, &vf->efx->peer_work);
798
799 return VFDI_RC_SUCCESS;
800}
801
802static int efx_vfdi_set_status_page(struct efx_vf *vf)
803{
804 struct efx_nic *efx = vf->efx;
805 struct vfdi_req *req = vf->buf.addr;
Ben Hutchings01cb5432012-02-21 02:57:33 +0000806 u64 page_count = req->u.set_status_page.peer_page_count;
807 u64 max_page_count =
808 (EFX_PAGE_SIZE -
809 offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
810 / sizeof(req->u.set_status_page.peer_page_addr[0]);
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000811
Ben Hutchings01cb5432012-02-21 02:57:33 +0000812 if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000813 if (net_ratelimit())
814 netif_err(efx, hw, efx->net_dev,
815 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
816 vf->pci_name);
817 return VFDI_RC_EINVAL;
818 }
819
820 mutex_lock(&efx->local_lock);
821 mutex_lock(&vf->status_lock);
822 vf->status_addr = req->u.set_status_page.dma_addr;
823
824 kfree(vf->peer_page_addrs);
825 vf->peer_page_addrs = NULL;
826 vf->peer_page_count = 0;
827
828 if (page_count) {
829 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
830 GFP_KERNEL);
831 if (vf->peer_page_addrs) {
832 memcpy(vf->peer_page_addrs,
833 req->u.set_status_page.peer_page_addr,
834 page_count * sizeof(u64));
835 vf->peer_page_count = page_count;
836 }
837 }
838
839 __efx_sriov_push_vf_status(vf);
840 mutex_unlock(&vf->status_lock);
841 mutex_unlock(&efx->local_lock);
842
843 return VFDI_RC_SUCCESS;
844}
845
846static int efx_vfdi_clear_status_page(struct efx_vf *vf)
847{
848 mutex_lock(&vf->status_lock);
849 vf->status_addr = 0;
850 mutex_unlock(&vf->status_lock);
851
852 return VFDI_RC_SUCCESS;
853}
854
855typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
856
857static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
858 [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
859 [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
860 [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
861 [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
862 [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
863 [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
864 [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
865 [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
866};
867
868static void efx_sriov_vfdi(struct work_struct *work)
869{
870 struct efx_vf *vf = container_of(work, struct efx_vf, req);
871 struct efx_nic *efx = vf->efx;
872 struct vfdi_req *req = vf->buf.addr;
873 struct efx_memcpy_req copy[2];
874 int rc;
875
876 /* Copy this page into the local address space */
877 memset(copy, '\0', sizeof(copy));
878 copy[0].from_rid = vf->pci_rid;
879 copy[0].from_addr = vf->req_addr;
880 copy[0].to_rid = efx->pci_dev->devfn;
881 copy[0].to_addr = vf->buf.dma_addr;
882 copy[0].length = EFX_PAGE_SIZE;
883 rc = efx_sriov_memcpy(efx, copy, 1);
884 if (rc) {
885 /* If we can't get the request, we can't reply to the caller */
886 if (net_ratelimit())
887 netif_err(efx, hw, efx->net_dev,
888 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
889 vf->pci_name, -rc);
890 vf->busy = false;
891 return;
892 }
893
894 if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
895 rc = vfdi_ops[req->op](vf);
896 if (rc == 0) {
897 netif_dbg(efx, hw, efx->net_dev,
898 "vfdi request %d from %s ok\n",
899 req->op, vf->pci_name);
900 }
901 } else {
902 netif_dbg(efx, hw, efx->net_dev,
903 "ERROR: Unrecognised request %d from VF %s addr "
904 "%llx\n", req->op, vf->pci_name,
905 (unsigned long long)vf->req_addr);
906 rc = VFDI_RC_EOPNOTSUPP;
907 }
908
909 /* Allow subsequent VF requests */
910 vf->busy = false;
911 smp_wmb();
912
913 /* Respond to the request */
914 req->rc = rc;
915 req->op = VFDI_OP_RESPONSE;
916
917 memset(copy, '\0', sizeof(copy));
918 copy[0].from_buf = &req->rc;
919 copy[0].to_rid = vf->pci_rid;
920 copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
921 copy[0].length = sizeof(req->rc);
922 copy[1].from_buf = &req->op;
923 copy[1].to_rid = vf->pci_rid;
924 copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
925 copy[1].length = sizeof(req->op);
926
927 (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
928}
929
930
931
932/* After a reset the event queues inside the guests no longer exist. Fill the
933 * event ring in guest memory with VFDI reset events, then (re-initialise) the
934 * event queue to raise an interrupt. The guest driver will then recover.
935 */
936static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
937{
938 struct efx_nic *efx = vf->efx;
939 struct efx_memcpy_req copy_req[4];
940 efx_qword_t event;
941 unsigned int pos, count, k, buftbl, abs_evq;
942 efx_oword_t reg;
943 efx_dword_t ptr;
944 int rc;
945
946 BUG_ON(buffer->len != EFX_PAGE_SIZE);
947
948 if (!vf->evq0_count)
949 return;
950 BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
951
952 mutex_lock(&vf->status_lock);
953 EFX_POPULATE_QWORD_3(event,
954 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
955 VFDI_EV_SEQ, vf->msg_seqno,
956 VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
957 vf->msg_seqno++;
958 for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
959 memcpy(buffer->addr + pos, &event, sizeof(event));
960
961 for (pos = 0; pos < vf->evq0_count; pos += count) {
962 count = min_t(unsigned, vf->evq0_count - pos,
963 ARRAY_SIZE(copy_req));
964 for (k = 0; k < count; k++) {
965 copy_req[k].from_buf = NULL;
966 copy_req[k].from_rid = efx->pci_dev->devfn;
967 copy_req[k].from_addr = buffer->dma_addr;
968 copy_req[k].to_rid = vf->pci_rid;
969 copy_req[k].to_addr = vf->evq0_addrs[pos + k];
970 copy_req[k].length = EFX_PAGE_SIZE;
971 }
972 rc = efx_sriov_memcpy(efx, copy_req, count);
973 if (rc) {
974 if (net_ratelimit())
975 netif_err(efx, hw, efx->net_dev,
976 "ERROR: Unable to notify %s of reset"
977 ": %d\n", vf->pci_name, -rc);
978 break;
979 }
980 }
981
982 /* Reinitialise, arm and trigger evq0 */
983 abs_evq = abs_index(vf, 0);
984 buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
985 efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
986
987 EFX_POPULATE_OWORD_3(reg,
988 FRF_CZ_TIMER_Q_EN, 1,
989 FRF_CZ_HOST_NOTIFY_MODE, 0,
990 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
991 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
992 EFX_POPULATE_OWORD_3(reg,
993 FRF_AZ_EVQ_EN, 1,
994 FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
995 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
996 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
997 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
998 efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
999
1000 mutex_unlock(&vf->status_lock);
1001}
1002
1003static void efx_sriov_reset_vf_work(struct work_struct *work)
1004{
1005 struct efx_vf *vf = container_of(work, struct efx_vf, req);
1006 struct efx_nic *efx = vf->efx;
1007 struct efx_buffer buf;
1008
1009 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
1010 efx_sriov_reset_vf(vf, &buf);
1011 efx_nic_free_buffer(efx, &buf);
1012 }
1013}
1014
1015static void efx_sriov_handle_no_channel(struct efx_nic *efx)
1016{
1017 netif_err(efx, drv, efx->net_dev,
1018 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1019 "vector. IOV disabled\n");
1020 efx->vf_count = 0;
1021}
1022
1023static int efx_sriov_probe_channel(struct efx_channel *channel)
1024{
1025 channel->efx->vfdi_channel = channel;
1026 return 0;
1027}
1028
1029static void
1030efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
1031{
1032 snprintf(buf, len, "%s-iov", channel->efx->name);
1033}
1034
1035static const struct efx_channel_type efx_sriov_channel_type = {
1036 .handle_no_channel = efx_sriov_handle_no_channel,
1037 .pre_probe = efx_sriov_probe_channel,
Ben Hutchings726ba0e2012-10-02 01:43:45 +01001038 .post_remove = efx_channel_dummy_op_void,
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001039 .get_name = efx_sriov_get_channel_name,
1040 /* no copy operation; channel must not be reallocated */
1041 .keep_eventq = true,
1042};
1043
1044void efx_sriov_probe(struct efx_nic *efx)
1045{
1046 unsigned count;
1047
1048 if (!max_vfs)
1049 return;
1050
1051 if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
1052 return;
1053 if (count > 0 && count > max_vfs)
1054 count = max_vfs;
1055
1056 /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1057 efx->vf_count = count;
1058
1059 efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
1060}
1061
1062/* Copy the list of individual addresses into the vfdi_status.peers
1063 * array and auxillary pages, protected by %local_lock. Drop that lock
1064 * and then broadcast the address list to every VF.
1065 */
1066static void efx_sriov_peer_work(struct work_struct *data)
1067{
1068 struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
1069 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1070 struct efx_vf *vf;
1071 struct efx_local_addr *local_addr;
1072 struct vfdi_endpoint *peer;
1073 struct efx_endpoint_page *epp;
1074 struct list_head pages;
1075 unsigned int peer_space;
1076 unsigned int peer_count;
1077 unsigned int pos;
1078
1079 mutex_lock(&efx->local_lock);
1080
1081 /* Move the existing peer pages off %local_page_list */
1082 INIT_LIST_HEAD(&pages);
1083 list_splice_tail_init(&efx->local_page_list, &pages);
1084
1085 /* Populate the VF addresses starting from entry 1 (entry 0 is
1086 * the PF address)
1087 */
1088 peer = vfdi_status->peers + 1;
1089 peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
1090 peer_count = 1;
1091 for (pos = 0; pos < efx->vf_count; ++pos) {
1092 vf = efx->vf + pos;
1093
1094 mutex_lock(&vf->status_lock);
1095 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
1096 *peer++ = vf->addr;
1097 ++peer_count;
1098 --peer_space;
1099 BUG_ON(peer_space == 0);
1100 }
1101 mutex_unlock(&vf->status_lock);
1102 }
1103
1104 /* Fill the remaining addresses */
1105 list_for_each_entry(local_addr, &efx->local_addr_list, link) {
1106 memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
1107 peer->tci = 0;
1108 ++peer;
1109 ++peer_count;
1110 if (--peer_space == 0) {
1111 if (list_empty(&pages)) {
1112 epp = kmalloc(sizeof(*epp), GFP_KERNEL);
1113 if (!epp)
1114 break;
1115 epp->ptr = dma_alloc_coherent(
1116 &efx->pci_dev->dev, EFX_PAGE_SIZE,
1117 &epp->addr, GFP_KERNEL);
1118 if (!epp->ptr) {
1119 kfree(epp);
1120 break;
1121 }
1122 } else {
1123 epp = list_first_entry(
1124 &pages, struct efx_endpoint_page, link);
1125 list_del(&epp->link);
1126 }
1127
1128 list_add_tail(&epp->link, &efx->local_page_list);
1129 peer = (struct vfdi_endpoint *)epp->ptr;
1130 peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
1131 }
1132 }
1133 vfdi_status->peer_count = peer_count;
1134 mutex_unlock(&efx->local_lock);
1135
1136 /* Free any now unused endpoint pages */
1137 while (!list_empty(&pages)) {
1138 epp = list_first_entry(
1139 &pages, struct efx_endpoint_page, link);
1140 list_del(&epp->link);
1141 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1142 epp->ptr, epp->addr);
1143 kfree(epp);
1144 }
1145
1146 /* Finally, push the pages */
1147 for (pos = 0; pos < efx->vf_count; ++pos) {
1148 vf = efx->vf + pos;
1149
1150 mutex_lock(&vf->status_lock);
1151 if (vf->status_addr)
1152 __efx_sriov_push_vf_status(vf);
1153 mutex_unlock(&vf->status_lock);
1154 }
1155}
1156
1157static void efx_sriov_free_local(struct efx_nic *efx)
1158{
1159 struct efx_local_addr *local_addr;
1160 struct efx_endpoint_page *epp;
1161
1162 while (!list_empty(&efx->local_addr_list)) {
1163 local_addr = list_first_entry(&efx->local_addr_list,
1164 struct efx_local_addr, link);
1165 list_del(&local_addr->link);
1166 kfree(local_addr);
1167 }
1168
1169 while (!list_empty(&efx->local_page_list)) {
1170 epp = list_first_entry(&efx->local_page_list,
1171 struct efx_endpoint_page, link);
1172 list_del(&epp->link);
1173 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1174 epp->ptr, epp->addr);
1175 kfree(epp);
1176 }
1177}
1178
1179static int efx_sriov_vf_alloc(struct efx_nic *efx)
1180{
1181 unsigned index;
1182 struct efx_vf *vf;
1183
1184 efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
1185 if (!efx->vf)
1186 return -ENOMEM;
1187
1188 for (index = 0; index < efx->vf_count; ++index) {
1189 vf = efx->vf + index;
1190
1191 vf->efx = efx;
1192 vf->index = index;
1193 vf->rx_filter_id = -1;
1194 vf->tx_filter_mode = VF_TX_FILTER_AUTO;
1195 vf->tx_filter_id = -1;
1196 INIT_WORK(&vf->req, efx_sriov_vfdi);
1197 INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
1198 init_waitqueue_head(&vf->flush_waitq);
1199 mutex_init(&vf->status_lock);
1200 mutex_init(&vf->txq_lock);
1201 }
1202
1203 return 0;
1204}
1205
1206static void efx_sriov_vfs_fini(struct efx_nic *efx)
1207{
1208 struct efx_vf *vf;
1209 unsigned int pos;
1210
1211 for (pos = 0; pos < efx->vf_count; ++pos) {
1212 vf = efx->vf + pos;
1213
1214 efx_nic_free_buffer(efx, &vf->buf);
1215 kfree(vf->peer_page_addrs);
1216 vf->peer_page_addrs = NULL;
1217 vf->peer_page_count = 0;
1218
1219 vf->evq0_count = 0;
1220 }
1221}
1222
1223static int efx_sriov_vfs_init(struct efx_nic *efx)
1224{
1225 struct pci_dev *pci_dev = efx->pci_dev;
1226 unsigned index, devfn, sriov, buftbl_base;
1227 u16 offset, stride;
1228 struct efx_vf *vf;
1229 int rc;
1230
1231 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
1232 if (!sriov)
1233 return -ENOENT;
1234
1235 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
1236 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
1237
1238 buftbl_base = efx->vf_buftbl_base;
1239 devfn = pci_dev->devfn + offset;
1240 for (index = 0; index < efx->vf_count; ++index) {
1241 vf = efx->vf + index;
1242
1243 /* Reserve buffer entries */
1244 vf->buftbl_base = buftbl_base;
1245 buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
1246
1247 vf->pci_rid = devfn;
1248 snprintf(vf->pci_name, sizeof(vf->pci_name),
1249 "%04x:%02x:%02x.%d",
1250 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1251 PCI_SLOT(devfn), PCI_FUNC(devfn));
1252
1253 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
1254 if (rc)
1255 goto fail;
1256
1257 devfn += stride;
1258 }
1259
1260 return 0;
1261
1262fail:
1263 efx_sriov_vfs_fini(efx);
1264 return rc;
1265}
1266
1267int efx_sriov_init(struct efx_nic *efx)
1268{
1269 struct net_device *net_dev = efx->net_dev;
1270 struct vfdi_status *vfdi_status;
1271 int rc;
1272
1273 /* Ensure there's room for vf_channel */
1274 BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
1275 /* Ensure that VI_BASE is aligned on VI_SCALE */
1276 BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
1277
1278 if (efx->vf_count == 0)
1279 return 0;
1280
1281 rc = efx_sriov_cmd(efx, true, NULL, NULL);
1282 if (rc)
1283 goto fail_cmd;
1284
1285 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
1286 if (rc)
1287 goto fail_status;
1288 vfdi_status = efx->vfdi_status.addr;
1289 memset(vfdi_status, 0, sizeof(*vfdi_status));
1290 vfdi_status->version = 1;
1291 vfdi_status->length = sizeof(*vfdi_status);
1292 vfdi_status->max_tx_channels = vf_max_tx_channels;
1293 vfdi_status->vi_scale = efx->vi_scale;
1294 vfdi_status->rss_rxq_count = efx->rss_spread;
1295 vfdi_status->peer_count = 1 + efx->vf_count;
1296 vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
1297
1298 rc = efx_sriov_vf_alloc(efx);
1299 if (rc)
1300 goto fail_alloc;
1301
1302 mutex_init(&efx->local_lock);
1303 INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
1304 INIT_LIST_HEAD(&efx->local_addr_list);
1305 INIT_LIST_HEAD(&efx->local_page_list);
1306
1307 rc = efx_sriov_vfs_init(efx);
1308 if (rc)
1309 goto fail_vfs;
1310
1311 rtnl_lock();
1312 memcpy(vfdi_status->peers[0].mac_addr,
1313 net_dev->dev_addr, ETH_ALEN);
1314 efx->vf_init_count = efx->vf_count;
1315 rtnl_unlock();
1316
1317 efx_sriov_usrev(efx, true);
1318
1319 /* At this point we must be ready to accept VFDI requests */
1320
1321 rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
1322 if (rc)
1323 goto fail_pci;
1324
1325 netif_info(efx, probe, net_dev,
1326 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1327 efx->vf_count, efx_vf_size(efx));
1328 return 0;
1329
1330fail_pci:
1331 efx_sriov_usrev(efx, false);
1332 rtnl_lock();
1333 efx->vf_init_count = 0;
1334 rtnl_unlock();
1335 efx_sriov_vfs_fini(efx);
1336fail_vfs:
1337 cancel_work_sync(&efx->peer_work);
1338 efx_sriov_free_local(efx);
1339 kfree(efx->vf);
1340fail_alloc:
1341 efx_nic_free_buffer(efx, &efx->vfdi_status);
1342fail_status:
1343 efx_sriov_cmd(efx, false, NULL, NULL);
1344fail_cmd:
1345 return rc;
1346}
1347
1348void efx_sriov_fini(struct efx_nic *efx)
1349{
1350 struct efx_vf *vf;
1351 unsigned int pos;
1352
1353 if (efx->vf_init_count == 0)
1354 return;
1355
1356 /* Disable all interfaces to reconfiguration */
1357 BUG_ON(efx->vfdi_channel->enabled);
1358 efx_sriov_usrev(efx, false);
1359 rtnl_lock();
1360 efx->vf_init_count = 0;
1361 rtnl_unlock();
1362
1363 /* Flush all reconfiguration work */
1364 for (pos = 0; pos < efx->vf_count; ++pos) {
1365 vf = efx->vf + pos;
1366 cancel_work_sync(&vf->req);
1367 cancel_work_sync(&vf->reset_work);
1368 }
1369 cancel_work_sync(&efx->peer_work);
1370
1371 pci_disable_sriov(efx->pci_dev);
1372
1373 /* Tear down back-end state */
1374 efx_sriov_vfs_fini(efx);
1375 efx_sriov_free_local(efx);
1376 kfree(efx->vf);
1377 efx_nic_free_buffer(efx, &efx->vfdi_status);
1378 efx_sriov_cmd(efx, false, NULL, NULL);
1379}
1380
1381void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
1382{
1383 struct efx_nic *efx = channel->efx;
1384 struct efx_vf *vf;
1385 unsigned qid, seq, type, data;
1386
1387 qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
1388
1389 /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1390 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
1391 seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
1392 type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
1393 data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
1394
1395 netif_vdbg(efx, hw, efx->net_dev,
1396 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1397 qid, seq, type, data);
1398
1399 if (map_vi_index(efx, qid, &vf, NULL))
1400 return;
1401 if (vf->busy)
1402 goto error;
1403
1404 if (type == VFDI_EV_TYPE_REQ_WORD0) {
1405 /* Resynchronise */
1406 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1407 vf->req_seqno = seq + 1;
1408 vf->req_addr = 0;
1409 } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
1410 goto error;
1411
1412 switch (vf->req_type) {
1413 case VFDI_EV_TYPE_REQ_WORD0:
1414 case VFDI_EV_TYPE_REQ_WORD1:
1415 case VFDI_EV_TYPE_REQ_WORD2:
1416 vf->req_addr |= (u64)data << (vf->req_type << 4);
1417 ++vf->req_type;
1418 return;
1419
1420 case VFDI_EV_TYPE_REQ_WORD3:
1421 vf->req_addr |= (u64)data << 48;
1422 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1423 vf->busy = true;
1424 queue_work(vfdi_workqueue, &vf->req);
1425 return;
1426 }
1427
1428error:
1429 if (net_ratelimit())
1430 netif_err(efx, hw, efx->net_dev,
1431 "ERROR: Screaming VFDI request from %s\n",
1432 vf->pci_name);
1433 /* Reset the request and sequence number */
1434 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1435 vf->req_seqno = seq + 1;
1436}
1437
1438void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
1439{
1440 struct efx_vf *vf;
1441
1442 if (vf_i > efx->vf_init_count)
1443 return;
1444 vf = efx->vf + vf_i;
1445 netif_info(efx, hw, efx->net_dev,
1446 "FLR on VF %s\n", vf->pci_name);
1447
1448 vf->status_addr = 0;
1449 efx_vfdi_remove_all_filters(vf);
1450 efx_vfdi_flush_clear(vf);
1451
1452 vf->evq0_count = 0;
1453}
1454
1455void efx_sriov_mac_address_changed(struct efx_nic *efx)
1456{
1457 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1458
1459 if (!efx->vf_init_count)
1460 return;
1461 memcpy(vfdi_status->peers[0].mac_addr,
1462 efx->net_dev->dev_addr, ETH_ALEN);
1463 queue_work(vfdi_workqueue, &efx->peer_work);
1464}
1465
1466void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1467{
1468 struct efx_vf *vf;
1469 unsigned queue, qid;
1470
1471 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1472 if (map_vi_index(efx, queue, &vf, &qid))
1473 return;
1474 /* Ignore flush completions triggered by an FLR */
1475 if (!test_bit(qid, vf->txq_mask))
1476 return;
1477
1478 __clear_bit(qid, vf->txq_mask);
1479 --vf->txq_count;
1480
1481 if (efx_vfdi_flush_wake(vf))
1482 wake_up(&vf->flush_waitq);
1483}
1484
1485void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1486{
1487 struct efx_vf *vf;
1488 unsigned ev_failed, queue, qid;
1489
1490 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1491 ev_failed = EFX_QWORD_FIELD(*event,
1492 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1493 if (map_vi_index(efx, queue, &vf, &qid))
1494 return;
1495 if (!test_bit(qid, vf->rxq_mask))
1496 return;
1497
1498 if (ev_failed) {
1499 set_bit(qid, vf->rxq_retry_mask);
1500 atomic_inc(&vf->rxq_retry_count);
1501 } else {
1502 __clear_bit(qid, vf->rxq_mask);
1503 --vf->rxq_count;
1504 }
1505 if (efx_vfdi_flush_wake(vf))
1506 wake_up(&vf->flush_waitq);
1507}
1508
1509/* Called from napi. Schedule the reset work item */
1510void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
1511{
1512 struct efx_vf *vf;
1513 unsigned int rel;
1514
1515 if (map_vi_index(efx, dmaq, &vf, &rel))
1516 return;
1517
1518 if (net_ratelimit())
1519 netif_err(efx, hw, efx->net_dev,
1520 "VF %d DMA Q %d reports descriptor fetch error.\n",
1521 vf->index, rel);
1522 queue_work(vfdi_workqueue, &vf->reset_work);
1523}
1524
1525/* Reset all VFs */
1526void efx_sriov_reset(struct efx_nic *efx)
1527{
1528 unsigned int vf_i;
1529 struct efx_buffer buf;
1530 struct efx_vf *vf;
1531
1532 ASSERT_RTNL();
1533
1534 if (efx->vf_init_count == 0)
1535 return;
1536
1537 efx_sriov_usrev(efx, true);
1538 (void)efx_sriov_cmd(efx, true, NULL, NULL);
1539
1540 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
1541 return;
1542
1543 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1544 vf = efx->vf + vf_i;
1545 efx_sriov_reset_vf(vf, &buf);
1546 }
1547
1548 efx_nic_free_buffer(efx, &buf);
1549}
1550
1551int efx_init_sriov(void)
1552{
1553 /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
1554 * efx_sriov_peer_work() spend almost all their time sleeping for
1555 * MCDI to complete anyway
1556 */
1557 vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
1558 if (!vfdi_workqueue)
1559 return -ENOMEM;
1560
1561 return 0;
1562}
1563
1564void efx_fini_sriov(void)
1565{
1566 destroy_workqueue(vfdi_workqueue);
1567}
1568
1569int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
1570{
1571 struct efx_nic *efx = netdev_priv(net_dev);
1572 struct efx_vf *vf;
1573
1574 if (vf_i >= efx->vf_init_count)
1575 return -EINVAL;
1576 vf = efx->vf + vf_i;
1577
1578 mutex_lock(&vf->status_lock);
1579 memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
1580 __efx_sriov_update_vf_addr(vf);
1581 mutex_unlock(&vf->status_lock);
1582
1583 return 0;
1584}
1585
1586int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
1587 u16 vlan, u8 qos)
1588{
1589 struct efx_nic *efx = netdev_priv(net_dev);
1590 struct efx_vf *vf;
1591 u16 tci;
1592
1593 if (vf_i >= efx->vf_init_count)
1594 return -EINVAL;
1595 vf = efx->vf + vf_i;
1596
1597 mutex_lock(&vf->status_lock);
1598 tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
1599 vf->addr.tci = htons(tci);
1600 __efx_sriov_update_vf_addr(vf);
1601 mutex_unlock(&vf->status_lock);
1602
1603 return 0;
1604}
1605
1606int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
1607 bool spoofchk)
1608{
1609 struct efx_nic *efx = netdev_priv(net_dev);
1610 struct efx_vf *vf;
1611 int rc;
1612
1613 if (vf_i >= efx->vf_init_count)
1614 return -EINVAL;
1615 vf = efx->vf + vf_i;
1616
1617 mutex_lock(&vf->txq_lock);
1618 if (vf->txq_count == 0) {
1619 vf->tx_filter_mode =
1620 spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
1621 rc = 0;
1622 } else {
1623 /* This cannot be changed while TX queues are running */
1624 rc = -EBUSY;
1625 }
1626 mutex_unlock(&vf->txq_lock);
1627 return rc;
1628}
1629
1630int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1631 struct ifla_vf_info *ivi)
1632{
1633 struct efx_nic *efx = netdev_priv(net_dev);
1634 struct efx_vf *vf;
1635 u16 tci;
1636
1637 if (vf_i >= efx->vf_init_count)
1638 return -EINVAL;
1639 vf = efx->vf + vf_i;
1640
1641 ivi->vf = vf_i;
1642 memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
1643 ivi->tx_rate = 0;
1644 tci = ntohs(vf->addr.tci);
1645 ivi->vlan = tci & VLAN_VID_MASK;
1646 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
1647 ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
1648
1649 return 0;
1650}
1651