blob: 01cbbe655af497ed6fa1e7039e4b548d9a618464 [file] [log] [blame]
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030031 */
32
Yuval Mintz36558c32016-05-11 16:36:17 +030033#include <linux/crc32.h>
Yuval Mintzeff16962016-05-11 16:36:21 +030034#include <linux/etherdevice.h>
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030035#include "qed.h"
36#include "qed_sriov.h"
37#include "qed_vf.h"
38
39static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
40{
41 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
42 void *p_tlv;
43
44 /* This lock is released when we receive PF's response
45 * in qed_send_msg2pf().
46 * So, qed_vf_pf_prep() and qed_send_msg2pf()
47 * must come in sequence.
48 */
49 mutex_lock(&(p_iov->mutex));
50
51 DP_VERBOSE(p_hwfn,
52 QED_MSG_IOV,
53 "preparing to send 0x%04x tlv over vf pf channel\n",
54 type);
55
56 /* Reset Requst offset */
57 p_iov->offset = (u8 *)p_iov->vf2pf_request;
58
59 /* Clear mailbox - both request and reply */
60 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
61 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
62
63 /* Init type and length */
64 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
65
66 /* Init first tlv header */
67 ((struct vfpf_first_tlv *)p_tlv)->reply_address =
68 (u64)p_iov->pf2vf_reply_phys;
69
70 return p_tlv;
71}
72
Yuval Mintzb0bccb62016-08-22 13:25:12 +030073static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
74{
75 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
76
77 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
78 "VF request status = 0x%x, PF reply status = 0x%x\n",
79 req_status, resp->default_resp.hdr.status);
80
81 mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
82}
83
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030084static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
85{
86 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
87 struct ustorm_trigger_vf_zone trigger;
88 struct ustorm_vf_zone *zone_data;
89 int rc = 0, time = 100;
90
91 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
92
93 /* output tlvs list */
94 qed_dp_tlv_list(p_hwfn, p_req);
95
96 /* need to add the END TLV to the message size */
97 resp_size += sizeof(struct channel_list_end_tlv);
98
99 /* Send TLVs over HW channel */
100 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
101 trigger.vf_pf_msg_valid = 1;
102
103 DP_VERBOSE(p_hwfn,
104 QED_MSG_IOV,
105 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
106 GET_FIELD(p_hwfn->hw_info.concrete_fid,
107 PXP_CONCRETE_FID_PFID),
108 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
109 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
110 &zone_data->non_trigger.vf_pf_msg_addr,
111 *((u32 *)&trigger), &zone_data->trigger);
112
113 REG_WR(p_hwfn,
114 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
115 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
116
117 REG_WR(p_hwfn,
118 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
119 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
120
121 /* The message data must be written first, to prevent trigger before
122 * data is written.
123 */
124 wmb();
125
126 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
127
128 /* When PF would be done with the response, it would write back to the
129 * `done' address. Poll until then.
130 */
131 while ((!*done) && time) {
132 msleep(25);
133 time--;
134 }
135
136 if (!*done) {
Mintz, Yuval81e64ef2017-03-19 13:08:12 +0200137 DP_NOTICE(p_hwfn,
138 "VF <-- PF Timeout [Type %d]\n",
139 p_req->first_tlv.tl.type);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300140 rc = -EBUSY;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300141 } else {
Mintz, Yuval81e64ef2017-03-19 13:08:12 +0200142 if ((*done != PFVF_STATUS_SUCCESS) &&
143 (*done != PFVF_STATUS_NO_RESOURCE))
144 DP_NOTICE(p_hwfn,
145 "PF response: %d [Type %d]\n",
146 *done, p_req->first_tlv.tl.type);
147 else
148 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
149 "PF response: %d [Type %d]\n",
150 *done, p_req->first_tlv.tl.type);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300151 }
152
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300153 return rc;
154}
155
156#define VF_ACQUIRE_THRESH 3
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300157static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
158 struct vf_pf_resc_request *p_req,
159 struct pf_vf_resc *p_resp)
160{
161 DP_VERBOSE(p_hwfn,
162 QED_MSG_IOV,
163 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
164 p_req->num_rxqs,
165 p_resp->num_rxqs,
166 p_req->num_rxqs,
167 p_resp->num_txqs,
168 p_req->num_sbs,
169 p_resp->num_sbs,
170 p_req->num_mac_filters,
171 p_resp->num_mac_filters,
172 p_req->num_vlan_filters,
173 p_resp->num_vlan_filters,
174 p_req->num_mc_filters, p_resp->num_mc_filters);
175
176 /* humble our request */
177 p_req->num_txqs = p_resp->num_txqs;
178 p_req->num_rxqs = p_resp->num_rxqs;
179 p_req->num_sbs = p_resp->num_sbs;
180 p_req->num_mac_filters = p_resp->num_mac_filters;
181 p_req->num_vlan_filters = p_resp->num_vlan_filters;
182 p_req->num_mc_filters = p_resp->num_mc_filters;
183}
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300184
185static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
186{
187 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
188 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
189 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300190 struct vf_pf_resc_request *p_resc;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300191 bool resources_acquired = false;
192 struct vfpf_acquire_tlv *req;
193 int rc = 0, attempts = 0;
194
195 /* clear mailbox and prep first tlv */
196 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300197 p_resc = &req->resc_request;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300198
199 /* starting filling the request */
200 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
201
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300202 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
203 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
204 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
205 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
206 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300207
208 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
209 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
210 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
211 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
212 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
Yuval Mintz1fe614d2016-06-05 13:11:11 +0300213 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
214 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300215
216 /* Fill capability field with any non-deprecated config we support */
217 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
218
219 /* pf 2 vf bulletin board address */
220 req->bulletin_addr = p_iov->bulletin.phys;
221 req->bulletin_size = p_iov->bulletin.size;
222
223 /* add list termination tlv */
224 qed_add_tlv(p_hwfn, &p_iov->offset,
225 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
226
227 while (!resources_acquired) {
228 DP_VERBOSE(p_hwfn,
229 QED_MSG_IOV, "attempting to acquire resources\n");
230
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300231 /* Clear response buffer, as this might be a re-send */
232 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
233
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300234 /* send acquire request */
235 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
236 if (rc)
237 return rc;
238
239 /* copy acquire response from buffer to p_hwfn */
240 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
241
242 attempts++;
243
244 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
245 /* PF agrees to allocate our resources */
246 if (!(resp->pfdev_info.capabilities &
247 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300248 /* It's possible legacy PF mistakenly accepted;
249 * but we don't care - simply mark it as
250 * legacy and continue.
251 */
252 req->vfdev_info.capabilities |=
253 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300254 }
255 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
256 resources_acquired = true;
257 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
258 attempts < VF_ACQUIRE_THRESH) {
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300259 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
260 &resp->resc);
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300261 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
262 if (pfdev_info->major_fp_hsi &&
263 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
264 DP_NOTICE(p_hwfn,
265 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
266 pfdev_info->major_fp_hsi,
267 pfdev_info->minor_fp_hsi,
268 ETH_HSI_VER_MAJOR,
269 ETH_HSI_VER_MINOR,
270 pfdev_info->major_fp_hsi);
271 rc = -EINVAL;
272 goto exit;
273 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300274
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300275 if (!pfdev_info->major_fp_hsi) {
276 if (req->vfdev_info.capabilities &
277 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
278 DP_NOTICE(p_hwfn,
279 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
280 rc = -EINVAL;
281 goto exit;
282 } else {
283 DP_INFO(p_hwfn,
284 "PF is old - try re-acquire to see if it supports FW-version override\n");
285 req->vfdev_info.capabilities |=
286 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
287 continue;
288 }
289 }
290
291 /* If PF/VF are using same Major, PF must have had
292 * it's reasons. Simply fail.
293 */
294 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
295 rc = -EINVAL;
296 goto exit;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300297 } else {
298 DP_ERR(p_hwfn,
299 "PF returned error %d to VF acquisition request\n",
300 resp->hdr.status);
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300301 rc = -EAGAIN;
302 goto exit;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300303 }
304 }
305
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300306 /* Mark the PF as legacy, if needed */
307 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
308 p_iov->b_pre_fp_hsi = true;
309
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300310 /* Update bulletin board size with response from PF */
311 p_iov->bulletin.size = resp->bulletin_size;
312
313 /* get HW info */
314 p_hwfn->cdev->type = resp->pfdev_info.dev_type;
315 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
316
317 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
318
319 /* Learn of the possibility of CMT */
320 if (IS_LEAD_HWFN(p_hwfn)) {
321 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
322 DP_NOTICE(p_hwfn, "100g VF\n");
323 p_hwfn->cdev->num_hwfns = 2;
324 }
325 }
326
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300327 if (!p_iov->b_pre_fp_hsi &&
328 ETH_HSI_VER_MINOR &&
Yuval Mintz1fe614d2016-06-05 13:11:11 +0300329 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
330 DP_INFO(p_hwfn,
331 "PF is using older fastpath HSI; %02x.%02x is configured\n",
332 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
333 }
334
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300335exit:
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300336 qed_vf_pf_req_end(p_hwfn, rc);
337
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300338 return rc;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300339}
340
341int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
342{
343 struct qed_vf_iov *p_iov;
344 u32 reg;
345
346 /* Set number of hwfns - might be overriden once leading hwfn learns
347 * actual configuration from PF.
348 */
349 if (IS_LEAD_HWFN(p_hwfn))
350 p_hwfn->cdev->num_hwfns = 1;
351
352 /* Set the doorbell bar. Assumption: regview is set */
353 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
354 PXP_VF_BAR0_START_DQ;
355
356 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
357 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
358
359 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
360 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
361
362 /* Allocate vf sriov info */
363 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700364 if (!p_iov)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300365 return -ENOMEM;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300366
367 /* Allocate vf2pf msg */
368 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
369 sizeof(union vfpf_tlvs),
370 &p_iov->vf2pf_request_phys,
371 GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700372 if (!p_iov->vf2pf_request)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300373 goto free_p_iov;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300374
375 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
376 sizeof(union pfvf_tlvs),
377 &p_iov->pf2vf_reply_phys,
378 GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700379 if (!p_iov->pf2vf_reply)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300380 goto free_vf2pf_request;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300381
382 DP_VERBOSE(p_hwfn,
383 QED_MSG_IOV,
384 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
385 p_iov->vf2pf_request,
386 (u64) p_iov->vf2pf_request_phys,
387 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
388
389 /* Allocate Bulletin board */
390 p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
391 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
392 p_iov->bulletin.size,
393 &p_iov->bulletin.phys,
394 GFP_KERNEL);
395 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
396 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
397 p_iov->bulletin.p_virt,
398 (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
399
400 mutex_init(&p_iov->mutex);
401
402 p_hwfn->vf_iov_info = p_iov;
403
404 p_hwfn->hw_info.personality = QED_PCI_ETH;
405
406 return qed_vf_pf_acquire(p_hwfn);
407
408free_vf2pf_request:
409 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
410 sizeof(union vfpf_tlvs),
411 p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
412free_p_iov:
413 kfree(p_iov);
414
415 return -ENOMEM;
416}
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300417#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
418#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
419 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300420
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200421int
422qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
423 struct qed_queue_cid *p_cid,
424 u16 bd_max_bytes,
425 dma_addr_t bd_chain_phys_addr,
426 dma_addr_t cqe_pbl_addr,
427 u16 cqe_pbl_size, void __iomem **pp_prod)
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300428{
429 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
430 struct pfvf_start_queue_resp_tlv *resp;
431 struct vfpf_start_rxq_tlv *req;
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200432 u8 rx_qid = p_cid->rel.queue_id;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300433 int rc;
434
435 /* clear mailbox and prep first tlv */
436 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
437
438 req->rx_qid = rx_qid;
439 req->cqe_pbl_addr = cqe_pbl_addr;
440 req->cqe_pbl_size = cqe_pbl_size;
441 req->rxq_addr = bd_chain_phys_addr;
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200442 req->hw_sb = p_cid->rel.sb;
443 req->sb_index = p_cid->rel.sb_idx;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300444 req->bd_max_bytes = bd_max_bytes;
445 req->stat_id = -1;
446
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300447 /* If PF is legacy, we'll need to calculate producers ourselves
448 * as well as clean them.
449 */
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200450 if (p_iov->b_pre_fp_hsi) {
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300451 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
452 u32 init_prod_val = 0;
453
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200454 *pp_prod = (u8 __iomem *)
455 p_hwfn->regview +
456 MSTORM_QZONE_START(p_hwfn->cdev) +
457 hw_qid * MSTORM_QZONE_SIZE;
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300458
459 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
460 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
461 (u32 *)(&init_prod_val));
462 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300463 /* add list termination tlv */
464 qed_add_tlv(p_hwfn, &p_iov->offset,
465 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
466
467 resp = &p_iov->pf2vf_reply->queue_start;
468 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
469 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300470 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300471
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300472 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
473 rc = -EINVAL;
474 goto exit;
475 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300476
477 /* Learn the address of the producer from the response */
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200478 if (!p_iov->b_pre_fp_hsi) {
Yuval Mintzb21290b2016-07-27 14:45:21 +0300479 u32 init_prod_val = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300480
481 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
482 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
483 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
484 rx_qid, *pp_prod, resp->offset);
485
486 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
Yuval Mintzb21290b2016-07-27 14:45:21 +0300487 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300488 (u32 *)&init_prod_val);
489 }
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300490exit:
491 qed_vf_pf_req_end(p_hwfn, rc);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300492
493 return rc;
494}
495
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200496int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
497 struct qed_queue_cid *p_cid, bool cqe_completion)
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300498{
499 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
500 struct vfpf_stop_rxqs_tlv *req;
501 struct pfvf_def_resp_tlv *resp;
502 int rc;
503
504 /* clear mailbox and prep first tlv */
505 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
506
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200507 req->rx_qid = p_cid->rel.queue_id;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300508 req->num_rxqs = 1;
509 req->cqe_completion = cqe_completion;
510
511 /* add list termination tlv */
512 qed_add_tlv(p_hwfn, &p_iov->offset,
513 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
514
515 resp = &p_iov->pf2vf_reply->default_resp;
516 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
517 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300518 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300519
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300520 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
521 rc = -EINVAL;
522 goto exit;
523 }
524
525exit:
526 qed_vf_pf_req_end(p_hwfn, rc);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300527
528 return rc;
529}
530
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200531int
532qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
533 struct qed_queue_cid *p_cid,
534 dma_addr_t pbl_addr,
535 u16 pbl_size, void __iomem **pp_doorbell)
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300536{
537 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
Yuval Mintz5040acf2016-06-05 13:11:14 +0300538 struct pfvf_start_queue_resp_tlv *resp;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300539 struct vfpf_start_txq_tlv *req;
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200540 u16 qid = p_cid->rel.queue_id;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300541 int rc;
542
543 /* clear mailbox and prep first tlv */
544 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
545
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200546 req->tx_qid = qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300547
548 /* Tx */
549 req->pbl_addr = pbl_addr;
550 req->pbl_size = pbl_size;
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200551 req->hw_sb = p_cid->rel.sb;
552 req->sb_index = p_cid->rel.sb_idx;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300553
554 /* add list termination tlv */
555 qed_add_tlv(p_hwfn, &p_iov->offset,
556 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
557
Yuval Mintz5040acf2016-06-05 13:11:14 +0300558 resp = &p_iov->pf2vf_reply->queue_start;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300559 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
560 if (rc)
Yuval Mintz5040acf2016-06-05 13:11:14 +0300561 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300562
Yuval Mintz5040acf2016-06-05 13:11:14 +0300563 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
564 rc = -EINVAL;
565 goto exit;
566 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300567
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200568 /* Modern PFs provide the actual offsets, while legacy
569 * provided only the queue id.
570 */
571 if (!p_iov->b_pre_fp_hsi) {
572 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
573 } else {
574 u8 cid = p_iov->acquire_resp.resc.cid[qid];
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300575
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200576 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
577 qed_db_addr_vf(cid,
578 DQ_DEMS_LEGACY);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300579 }
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200580
581 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
582 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
583 qid, *pp_doorbell, resp->offset);
Yuval Mintz5040acf2016-06-05 13:11:14 +0300584exit:
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300585 qed_vf_pf_req_end(p_hwfn, rc);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300586
587 return rc;
588}
589
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200590int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300591{
592 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
593 struct vfpf_stop_txqs_tlv *req;
594 struct pfvf_def_resp_tlv *resp;
595 int rc;
596
597 /* clear mailbox and prep first tlv */
598 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
599
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200600 req->tx_qid = p_cid->rel.queue_id;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300601 req->num_txqs = 1;
602
603 /* add list termination tlv */
604 qed_add_tlv(p_hwfn, &p_iov->offset,
605 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
606
607 resp = &p_iov->pf2vf_reply->default_resp;
608 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
609 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300610 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300611
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300612 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
613 rc = -EINVAL;
614 goto exit;
615 }
616
617exit:
618 qed_vf_pf_req_end(p_hwfn, rc);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300619
620 return rc;
621}
622
623int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
624 u8 vport_id,
625 u16 mtu,
626 u8 inner_vlan_removal,
627 enum qed_tpa_mode tpa_mode,
Yuval Mintz08feecd2016-05-11 16:36:20 +0300628 u8 max_buffers_per_cqe, u8 only_untagged)
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300629{
630 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
631 struct vfpf_vport_start_tlv *req;
632 struct pfvf_def_resp_tlv *resp;
633 int rc, i;
634
635 /* clear mailbox and prep first tlv */
636 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
637
638 req->mtu = mtu;
639 req->vport_id = vport_id;
640 req->inner_vlan_removal = inner_vlan_removal;
641 req->tpa_mode = tpa_mode;
642 req->max_buffers_per_cqe = max_buffers_per_cqe;
Yuval Mintz08feecd2016-05-11 16:36:20 +0300643 req->only_untagged = only_untagged;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300644
645 /* status blocks */
646 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
647 if (p_hwfn->sbs_info[i])
648 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
649
650 /* add list termination tlv */
651 qed_add_tlv(p_hwfn, &p_iov->offset,
652 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
653
654 resp = &p_iov->pf2vf_reply->default_resp;
655 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
656 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300657 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300658
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300659 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
660 rc = -EINVAL;
661 goto exit;
662 }
663
664exit:
665 qed_vf_pf_req_end(p_hwfn, rc);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300666
667 return rc;
668}
669
670int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
671{
672 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
673 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
674 int rc;
675
676 /* clear mailbox and prep first tlv */
677 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
678 sizeof(struct vfpf_first_tlv));
679
680 /* add list termination tlv */
681 qed_add_tlv(p_hwfn, &p_iov->offset,
682 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
683
684 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
685 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300686 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300687
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300688 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
689 rc = -EINVAL;
690 goto exit;
691 }
692
693exit:
694 qed_vf_pf_req_end(p_hwfn, rc);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300695
696 return rc;
697}
698
699static bool
700qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
701 struct qed_sp_vport_update_params *p_data,
702 u16 tlv)
703{
704 switch (tlv) {
705 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
706 return !!(p_data->update_vport_active_rx_flg ||
707 p_data->update_vport_active_tx_flg);
Yuval Mintz17b235c2016-05-11 16:36:18 +0300708 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
709 return !!p_data->update_tx_switching_flg;
710 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
711 return !!p_data->update_inner_vlan_removal_flg;
Yuval Mintz08feecd2016-05-11 16:36:20 +0300712 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
713 return !!p_data->update_accept_any_vlan_flg;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300714 case CHANNEL_TLV_VPORT_UPDATE_MCAST:
715 return !!p_data->update_approx_mcast_flg;
716 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
717 return !!(p_data->accept_flags.update_rx_mode_config ||
718 p_data->accept_flags.update_tx_mode_config);
719 case CHANNEL_TLV_VPORT_UPDATE_RSS:
720 return !!p_data->rss_params;
Yuval Mintz17b235c2016-05-11 16:36:18 +0300721 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
722 return !!p_data->sge_tpa_params;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300723 default:
724 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
725 tlv);
726 return false;
727 }
728}
729
730static void
731qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
732 struct qed_sp_vport_update_params *p_data)
733{
734 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
735 struct pfvf_def_resp_tlv *p_resp;
736 u16 tlv;
737
738 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
739 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
740 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
741 continue;
742
743 p_resp = (struct pfvf_def_resp_tlv *)
744 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
745 tlv);
746 if (p_resp && p_resp->hdr.status)
747 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
748 "TLV[%d] Configuration %s\n",
749 tlv,
750 (p_resp && p_resp->hdr.status) ? "succeeded"
751 : "failed");
752 }
753}
754
755int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
756 struct qed_sp_vport_update_params *p_params)
757{
758 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
759 struct vfpf_vport_update_tlv *req;
760 struct pfvf_def_resp_tlv *resp;
761 u8 update_rx, update_tx;
762 u32 resp_size = 0;
763 u16 size, tlv;
764 int rc;
765
766 resp = &p_iov->pf2vf_reply->default_resp;
767 resp_size = sizeof(*resp);
768
769 update_rx = p_params->update_vport_active_rx_flg;
770 update_tx = p_params->update_vport_active_tx_flg;
771
772 /* clear mailbox and prep header tlv */
773 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
774
775 /* Prepare extended tlvs */
776 if (update_rx || update_tx) {
777 struct vfpf_vport_update_activate_tlv *p_act_tlv;
778
779 size = sizeof(struct vfpf_vport_update_activate_tlv);
780 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
781 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
782 size);
783 resp_size += sizeof(struct pfvf_def_resp_tlv);
784
785 if (update_rx) {
786 p_act_tlv->update_rx = update_rx;
787 p_act_tlv->active_rx = p_params->vport_active_rx_flg;
788 }
789
790 if (update_tx) {
791 p_act_tlv->update_tx = update_tx;
792 p_act_tlv->active_tx = p_params->vport_active_tx_flg;
793 }
794 }
795
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300796 if (p_params->update_tx_switching_flg) {
797 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
798
799 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
800 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
801 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
802 tlv, size);
803 resp_size += sizeof(struct pfvf_def_resp_tlv);
804
805 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
806 }
807
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300808 if (p_params->update_approx_mcast_flg) {
809 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
810
811 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
812 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
813 CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
814 resp_size += sizeof(struct pfvf_def_resp_tlv);
815
816 memcpy(p_mcast_tlv->bins, p_params->bins,
817 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
818 }
819
820 update_rx = p_params->accept_flags.update_rx_mode_config;
821 update_tx = p_params->accept_flags.update_tx_mode_config;
822
823 if (update_rx || update_tx) {
824 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
825
826 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
827 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
828 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
829 resp_size += sizeof(struct pfvf_def_resp_tlv);
830
831 if (update_rx) {
832 p_accept_tlv->update_rx_mode = update_rx;
833 p_accept_tlv->rx_accept_filter =
834 p_params->accept_flags.rx_accept_filter;
835 }
836
837 if (update_tx) {
838 p_accept_tlv->update_tx_mode = update_tx;
839 p_accept_tlv->tx_accept_filter =
840 p_params->accept_flags.tx_accept_filter;
841 }
842 }
843
844 if (p_params->rss_params) {
845 struct qed_rss_params *rss_params = p_params->rss_params;
846 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200847 int i, table_size;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300848
849 size = sizeof(struct vfpf_vport_update_rss_tlv);
850 p_rss_tlv = qed_add_tlv(p_hwfn,
851 &p_iov->offset,
852 CHANNEL_TLV_VPORT_UPDATE_RSS, size);
853 resp_size += sizeof(struct pfvf_def_resp_tlv);
854
855 if (rss_params->update_rss_config)
856 p_rss_tlv->update_rss_flags |=
857 VFPF_UPDATE_RSS_CONFIG_FLAG;
858 if (rss_params->update_rss_capabilities)
859 p_rss_tlv->update_rss_flags |=
860 VFPF_UPDATE_RSS_CAPS_FLAG;
861 if (rss_params->update_rss_ind_table)
862 p_rss_tlv->update_rss_flags |=
863 VFPF_UPDATE_RSS_IND_TABLE_FLAG;
864 if (rss_params->update_rss_key)
865 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
866
867 p_rss_tlv->rss_enable = rss_params->rss_enable;
868 p_rss_tlv->rss_caps = rss_params->rss_caps;
869 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200870
871 table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
872 1 << p_rss_tlv->rss_table_size_log);
873 for (i = 0; i < table_size; i++) {
874 struct qed_queue_cid *p_queue;
875
876 p_queue = rss_params->rss_ind_table[i];
877 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
878 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300879 memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
880 sizeof(rss_params->rss_key));
881 }
882
Yuval Mintz08feecd2016-05-11 16:36:20 +0300883 if (p_params->update_accept_any_vlan_flg) {
884 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
885
886 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
887 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
888 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
889
890 resp_size += sizeof(struct pfvf_def_resp_tlv);
891 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
892 p_any_vlan_tlv->update_accept_any_vlan_flg =
893 p_params->update_accept_any_vlan_flg;
894 }
895
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300896 /* add list termination tlv */
897 qed_add_tlv(p_hwfn, &p_iov->offset,
898 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
899
900 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
901 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300902 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300903
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300904 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
905 rc = -EINVAL;
906 goto exit;
907 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300908
909 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
910
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300911exit:
912 qed_vf_pf_req_end(p_hwfn, rc);
913
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300914 return rc;
915}
916
Yuval Mintz0b55e272016-05-11 16:36:15 +0300917int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
918{
919 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
920 struct pfvf_def_resp_tlv *resp;
921 struct vfpf_first_tlv *req;
922 int rc;
923
924 /* clear mailbox and prep first tlv */
925 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
926
927 /* add list termination tlv */
928 qed_add_tlv(p_hwfn, &p_iov->offset,
929 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
930
931 resp = &p_iov->pf2vf_reply->default_resp;
932 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
933 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300934 goto exit;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300935
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300936 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
937 rc = -EAGAIN;
938 goto exit;
939 }
Yuval Mintz0b55e272016-05-11 16:36:15 +0300940
941 p_hwfn->b_int_enabled = 0;
942
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300943exit:
944 qed_vf_pf_req_end(p_hwfn, rc);
945
946 return rc;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300947}
948
949int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
950{
951 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
952 struct pfvf_def_resp_tlv *resp;
953 struct vfpf_first_tlv *req;
954 u32 size;
955 int rc;
956
957 /* clear mailbox and prep first tlv */
958 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
959
960 /* add list termination tlv */
961 qed_add_tlv(p_hwfn, &p_iov->offset,
962 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
963
964 resp = &p_iov->pf2vf_reply->default_resp;
965 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
966
967 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
968 rc = -EAGAIN;
969
Yuval Mintzb0bccb62016-08-22 13:25:12 +0300970 qed_vf_pf_req_end(p_hwfn, rc);
971
Yuval Mintz0b55e272016-05-11 16:36:15 +0300972 p_hwfn->b_int_enabled = 0;
973
974 if (p_iov->vf2pf_request)
975 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
976 sizeof(union vfpf_tlvs),
977 p_iov->vf2pf_request,
978 p_iov->vf2pf_request_phys);
979 if (p_iov->pf2vf_reply)
980 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
981 sizeof(union pfvf_tlvs),
982 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
983
984 if (p_iov->bulletin.p_virt) {
985 size = sizeof(struct qed_bulletin_content);
986 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
987 size,
988 p_iov->bulletin.p_virt, p_iov->bulletin.phys);
989 }
990
991 kfree(p_hwfn->vf_iov_info);
992 p_hwfn->vf_iov_info = NULL;
993
994 return rc;
995}
996
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300997void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
998 struct qed_filter_mcast *p_filter_cmd)
999{
1000 struct qed_sp_vport_update_params sp_params;
1001 int i;
1002
1003 memset(&sp_params, 0, sizeof(sp_params));
1004 sp_params.update_approx_mcast_flg = 1;
1005
1006 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1007 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1008 u32 bit;
1009
1010 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1011 __set_bit(bit, sp_params.bins);
1012 }
1013 }
1014
1015 qed_vf_pf_vport_update(p_hwfn, &sp_params);
1016}
1017
1018int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1019 struct qed_filter_ucast *p_ucast)
1020{
1021 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1022 struct vfpf_ucast_filter_tlv *req;
1023 struct pfvf_def_resp_tlv *resp;
1024 int rc;
1025
1026 /* clear mailbox and prep first tlv */
1027 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1028 req->opcode = (u8) p_ucast->opcode;
1029 req->type = (u8) p_ucast->type;
1030 memcpy(req->mac, p_ucast->mac, ETH_ALEN);
1031 req->vlan = p_ucast->vlan;
1032
1033 /* add list termination tlv */
1034 qed_add_tlv(p_hwfn, &p_iov->offset,
1035 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1036
1037 resp = &p_iov->pf2vf_reply->default_resp;
1038 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1039 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +03001040 goto exit;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001041
Yuval Mintzb0bccb62016-08-22 13:25:12 +03001042 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1043 rc = -EAGAIN;
1044 goto exit;
1045 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001046
Yuval Mintzb0bccb62016-08-22 13:25:12 +03001047exit:
1048 qed_vf_pf_req_end(p_hwfn, rc);
1049
1050 return rc;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001051}
1052
Yuval Mintz0b55e272016-05-11 16:36:15 +03001053int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1054{
1055 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1056 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1057 int rc;
1058
1059 /* clear mailbox and prep first tlv */
1060 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1061 sizeof(struct vfpf_first_tlv));
1062
1063 /* add list termination tlv */
1064 qed_add_tlv(p_hwfn, &p_iov->offset,
1065 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
1066
1067 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1068 if (rc)
Yuval Mintzb0bccb62016-08-22 13:25:12 +03001069 goto exit;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001070
Yuval Mintzb0bccb62016-08-22 13:25:12 +03001071 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1072 rc = -EINVAL;
1073 goto exit;
1074 }
Yuval Mintz0b55e272016-05-11 16:36:15 +03001075
Yuval Mintzb0bccb62016-08-22 13:25:12 +03001076exit:
1077 qed_vf_pf_req_end(p_hwfn, rc);
1078
1079 return rc;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001080}
1081
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001082u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1083{
1084 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1085
1086 if (!p_iov) {
1087 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
1088 return 0;
1089 }
1090
1091 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1092}
1093
Yuval Mintz36558c32016-05-11 16:36:17 +03001094int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
1095{
1096 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1097 struct qed_bulletin_content shadow;
1098 u32 crc, crc_size;
1099
1100 crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1101 *p_change = 0;
1102
1103 /* Need to guarantee PF is not in the middle of writing it */
1104 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1105
1106 /* If version did not update, no need to do anything */
1107 if (shadow.version == p_iov->bulletin_shadow.version)
1108 return 0;
1109
1110 /* Verify the bulletin we see is valid */
1111 crc = crc32(0, (u8 *)&shadow + crc_size,
1112 p_iov->bulletin.size - crc_size);
1113 if (crc != shadow.crc)
1114 return -EAGAIN;
1115
1116 /* Set the shadow bulletin and process it */
1117 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1118
1119 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1120 "Read a bulletin update %08x\n", shadow.version);
1121
1122 *p_change = 1;
1123
1124 return 0;
1125}
1126
1127void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1128 struct qed_mcp_link_params *p_params,
1129 struct qed_bulletin_content *p_bulletin)
1130{
1131 memset(p_params, 0, sizeof(*p_params));
1132
1133 p_params->speed.autoneg = p_bulletin->req_autoneg;
1134 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1135 p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1136 p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1137 p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1138 p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1139 p_params->loopback_mode = p_bulletin->req_loopback;
1140}
1141
1142void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1143 struct qed_mcp_link_params *params)
1144{
1145 __qed_vf_get_link_params(p_hwfn, params,
1146 &(p_hwfn->vf_iov_info->bulletin_shadow));
1147}
1148
1149void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1150 struct qed_mcp_link_state *p_link,
1151 struct qed_bulletin_content *p_bulletin)
1152{
1153 memset(p_link, 0, sizeof(*p_link));
1154
1155 p_link->link_up = p_bulletin->link_up;
1156 p_link->speed = p_bulletin->speed;
1157 p_link->full_duplex = p_bulletin->full_duplex;
1158 p_link->an = p_bulletin->autoneg;
1159 p_link->an_complete = p_bulletin->autoneg_complete;
1160 p_link->parallel_detection = p_bulletin->parallel_detection;
1161 p_link->pfc_enabled = p_bulletin->pfc_enabled;
1162 p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1163 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1164 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1165 p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1166 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1167}
1168
1169void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1170 struct qed_mcp_link_state *link)
1171{
1172 __qed_vf_get_link_state(p_hwfn, link,
1173 &(p_hwfn->vf_iov_info->bulletin_shadow));
1174}
1175
1176void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1177 struct qed_mcp_link_capabilities *p_link_caps,
1178 struct qed_bulletin_content *p_bulletin)
1179{
1180 memset(p_link_caps, 0, sizeof(*p_link_caps));
1181 p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1182}
1183
1184void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1185 struct qed_mcp_link_capabilities *p_link_caps)
1186{
1187 __qed_vf_get_link_caps(p_hwfn, p_link_caps,
1188 &(p_hwfn->vf_iov_info->bulletin_shadow));
1189}
1190
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001191void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1192{
1193 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1194}
1195
1196void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1197{
1198 memcpy(port_mac,
1199 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
1200}
1201
1202void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
1203{
1204 struct qed_vf_iov *p_vf;
1205
1206 p_vf = p_hwfn->vf_iov_info;
1207 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1208}
1209
Mintz, Yuvalb0fca312016-10-31 22:26:54 +02001210void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
1211{
1212 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
1213
1214 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
1215}
1216
Yuval Mintzeff16962016-05-11 16:36:21 +03001217bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1218{
1219 struct qed_bulletin_content *bulletin;
1220
1221 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1222 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1223 return true;
1224
1225 /* Forbid VF from changing a MAC enforced by PF */
1226 if (ether_addr_equal(bulletin->mac, mac))
1227 return false;
1228
1229 return false;
1230}
1231
Baoyou Xieba569472016-09-09 09:21:15 +08001232static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
1233 u8 *dst_mac, u8 *p_is_forced)
Yuval Mintzeff16962016-05-11 16:36:21 +03001234{
1235 struct qed_bulletin_content *bulletin;
1236
1237 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1238
1239 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1240 if (p_is_forced)
1241 *p_is_forced = 1;
1242 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1243 if (p_is_forced)
1244 *p_is_forced = 0;
1245 } else {
1246 return false;
1247 }
1248
1249 ether_addr_copy(dst_mac, bulletin->mac);
1250
1251 return true;
1252}
1253
Chopra, Manish97379f12017-04-24 10:00:48 -07001254static void
1255qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
1256 u16 *p_vxlan_port, u16 *p_geneve_port)
1257{
1258 struct qed_bulletin_content *p_bulletin;
1259
1260 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1261
1262 *p_vxlan_port = p_bulletin->vxlan_udp_port;
1263 *p_geneve_port = p_bulletin->geneve_udp_port;
1264}
1265
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001266void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1267 u16 *fw_major, u16 *fw_minor,
1268 u16 *fw_rev, u16 *fw_eng)
1269{
1270 struct pf_vf_pfdev_info *info;
1271
1272 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1273
1274 *fw_major = info->fw_major;
1275 *fw_minor = info->fw_minor;
1276 *fw_rev = info->fw_rev;
1277 *fw_eng = info->fw_eng;
1278}
Yuval Mintz36558c32016-05-11 16:36:17 +03001279
1280static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
1281{
Yuval Mintzeff16962016-05-11 16:36:21 +03001282 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
1283 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
1284 void *cookie = hwfn->cdev->ops_cookie;
Chopra, Manish97379f12017-04-24 10:00:48 -07001285 u16 vxlan_port, geneve_port;
Yuval Mintzeff16962016-05-11 16:36:21 +03001286
Chopra, Manish97379f12017-04-24 10:00:48 -07001287 qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
Yuval Mintzeff16962016-05-11 16:36:21 +03001288 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
1289 &is_mac_forced);
Yuval Mintzc3aaa402016-10-14 05:19:17 -04001290 if (is_mac_exist && cookie)
1291 ops->force_mac(cookie, mac, !!is_mac_forced);
Yuval Mintzeff16962016-05-11 16:36:21 +03001292
Chopra, Manish97379f12017-04-24 10:00:48 -07001293 ops->ports_update(cookie, vxlan_port, geneve_port);
1294
Yuval Mintz36558c32016-05-11 16:36:17 +03001295 /* Always update link configuration according to bulletin */
1296 qed_link_update(hwfn);
1297}
1298
1299void qed_iov_vf_task(struct work_struct *work)
1300{
1301 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1302 iov_task.work);
1303 u8 change = 0;
1304
1305 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
1306 return;
1307
1308 /* Handle bulletin board changes */
1309 qed_vf_read_bulletin(hwfn, &change);
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +02001310 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
1311 &hwfn->iov_task_flags))
1312 change = 1;
Yuval Mintz36558c32016-05-11 16:36:17 +03001313 if (change)
1314 qed_handle_bulletin_change(hwfn);
1315
1316 /* As VF is polling bulletin board, need to constantly re-schedule */
1317 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
1318}