blob: a13503715f207aad128b41328fa087cb72df5a1a [file] [log] [blame]
Faisal Latif86dbcd02016-01-20 13:40:10 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include "i40iw_osdep.h"
36#include "i40iw_register.h"
37#include "i40iw_status.h"
38#include "i40iw_hmc.h"
39
40#include "i40iw_d.h"
41#include "i40iw_type.h"
42#include "i40iw_p.h"
43#include "i40iw_vf.h"
44#include "i40iw_virtchnl.h"
45
46/**
47 * i40iw_insert_wqe_hdr - write wqe header
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
50 */
51static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52{
53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe, 24, header);
55}
56
57/**
58 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
59 * @cqp: struct for cqp hw
60 * @val: cqp tail register value
61 * @tail:wqtail register value
62 * @error: cqp processing err
63 */
64static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
65 u32 *val,
66 u32 *tail,
67 u32 *error)
68{
69 if (cqp->dev->is_pf) {
70 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
71 *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
72 *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
73 } else {
74 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
75 *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
76 *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
77 }
78}
79
80/**
81 * i40iw_cqp_poll_registers - poll cqp registers
82 * @cqp: struct for cqp hw
83 * @tail:wqtail register value
84 * @count: how many times to try for completion
85 */
86static enum i40iw_status_code i40iw_cqp_poll_registers(
87 struct i40iw_sc_cqp *cqp,
88 u32 tail,
89 u32 count)
90{
91 u32 i = 0;
92 u32 newtail, error, val;
93
94 while (i < count) {
95 i++;
96 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
97 if (error) {
98 error = (cqp->dev->is_pf) ?
99 i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
100 i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
101 return I40IW_ERR_CQP_COMPL_ERROR;
102 }
103 if (newtail != tail) {
104 /* SUCCESS */
105 I40IW_RING_MOVE_TAIL(cqp->sq_ring);
106 return 0;
107 }
108 udelay(I40IW_SLEEP_COUNT);
109 }
110 return I40IW_ERR_TIMEOUT;
111}
112
113/**
114 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
115 * @buf: ptr to fpm commit buffer
116 * @info: ptr to i40iw_hmc_obj_info struct
Ismail, Mustafafa415372016-04-18 10:33:08 -0500117 * @sd: number of SDs for HMC objects
Faisal Latif86dbcd02016-01-20 13:40:10 -0600118 *
119 * parses fpm commit info and copy base value
120 * of hmc objects in hmc_info
121 */
122static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
123 u64 *buf,
Ismail, Mustafafa415372016-04-18 10:33:08 -0500124 struct i40iw_hmc_obj_info *info,
125 u32 *sd)
Faisal Latif86dbcd02016-01-20 13:40:10 -0600126{
127 u64 temp;
Ismail, Mustafafa415372016-04-18 10:33:08 -0500128 u64 size;
129 u64 base = 0;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600130 u32 i, j;
Ismail, Mustafafa415372016-04-18 10:33:08 -0500131 u32 k = 0;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600132 u32 low;
133
134 /* copy base values in obj_info */
135 for (i = I40IW_HMC_IW_QP, j = 0;
136 i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
137 get_64bit_val(buf, j, &temp);
138 info[i].base = RS_64_1(temp, 32) * 512;
Ismail, Mustafafa415372016-04-18 10:33:08 -0500139 if (info[i].base > base) {
140 base = info[i].base;
141 k = i;
142 }
Faisal Latif86dbcd02016-01-20 13:40:10 -0600143 low = (u32)(temp);
144 if (low)
145 info[i].cnt = low;
146 }
Ismail, Mustafafa415372016-04-18 10:33:08 -0500147 size = info[k].cnt * info[k].size + info[k].base;
148 if (size & 0x1FFFFF)
149 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
150 else
151 *sd = (u32)(size >> 21);
152
Faisal Latif86dbcd02016-01-20 13:40:10 -0600153 return 0;
154}
155
156/**
157 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
158 * @buf: ptr to fpm query buffer
159 * @info: ptr to i40iw_hmc_obj_info struct
160 * @hmc_fpm_misc: ptr to fpm data
161 *
162 * parses fpm query buffer and copy max_cnt and
163 * size value of hmc objects in hmc_info
164 */
165static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
166 u64 *buf,
167 struct i40iw_hmc_info *hmc_info,
168 struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
169{
170 u64 temp;
171 struct i40iw_hmc_obj_info *obj_info;
172 u32 i, j, size;
173 u16 max_pe_sds;
174
175 obj_info = hmc_info->hmc_obj;
176
177 get_64bit_val(buf, 0, &temp);
178 hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
179 max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
180
181 /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
182 if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
183 max_pe_sds--;
184 hmc_fpm_misc->max_sds = max_pe_sds;
185 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
186
187 for (i = I40IW_HMC_IW_QP, j = 8;
188 i <= I40IW_HMC_IW_ARP; i++, j += 8) {
189 get_64bit_val(buf, j, &temp);
190 if (i == I40IW_HMC_IW_QP)
191 obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
192 else if (i == I40IW_HMC_IW_CQ)
193 obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
194 else
195 obj_info[i].max_cnt = (u32)temp;
196
197 size = (u32)RS_64_1(temp, 32);
198 obj_info[i].size = ((u64)1 << size);
199 }
200 for (i = I40IW_HMC_IW_MR, j = 48;
201 i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
202 get_64bit_val(buf, j, &temp);
203 obj_info[i].max_cnt = (u32)temp;
204 size = (u32)RS_64_1(temp, 32);
205 obj_info[i].size = LS_64_1(1, size);
206 }
207
208 get_64bit_val(buf, 120, &temp);
209 hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
210 get_64bit_val(buf, 120, &temp);
211 hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
212 get_64bit_val(buf, 120, &temp);
213 hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
214 get_64bit_val(buf, 64, &temp);
215 hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
216 if (!hmc_fpm_misc->xf_block_size)
217 return I40IW_ERR_INVALID_SIZE;
218 get_64bit_val(buf, 80, &temp);
219 hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
220 if (!hmc_fpm_misc->q1_block_size)
221 return I40IW_ERR_INVALID_SIZE;
222 return 0;
223}
224
225/**
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500226 * i40iw_fill_qos_list - Change all unknown qs handles to available ones
227 * @qs_list: list of qs_handles to be fixed with valid qs_handles
228 */
229static void i40iw_fill_qos_list(u16 *qs_list)
230{
231 u16 qshandle = qs_list[0];
232 int i;
233
234 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
235 if (qs_list[i] == QS_HANDLE_UNKNOWN)
236 qs_list[i] = qshandle;
237 else
238 qshandle = qs_list[i];
239 }
240}
241
242/**
243 * i40iw_qp_from_entry - Given entry, get to the qp structure
244 * @entry: Points to list of qp structure
245 */
246static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
247{
248 if (!entry)
249 return NULL;
250
251 return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
252}
253
254/**
255 * i40iw_get_qp - get the next qp from the list given current qp
256 * @head: Listhead of qp's
257 * @qp: current qp
258 */
259static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
260{
261 struct list_head *entry = NULL;
262 struct list_head *lastentry;
263
264 if (list_empty(head))
265 return NULL;
266
267 if (!qp) {
268 entry = head->next;
269 } else {
270 lastentry = &qp->list;
271 entry = (lastentry != head) ? lastentry->next : NULL;
272 }
273
274 return i40iw_qp_from_entry(entry);
275}
276
277/**
278 * i40iw_change_l2params - given the new l2 parameters, change all qp
279 * @dev: IWARP device pointer
280 * @l2params: New paramaters from l2
281 */
282void i40iw_change_l2params(struct i40iw_sc_dev *dev, struct i40iw_l2params *l2params)
283{
284 struct i40iw_sc_qp *qp = NULL;
285 bool qs_handle_change = false;
286 bool mss_change = false;
287 unsigned long flags;
288 u16 qs_handle;
289 int i;
290
291 if (dev->mss != l2params->mss) {
292 mss_change = true;
293 dev->mss = l2params->mss;
294 }
295
296 i40iw_fill_qos_list(l2params->qs_handle_list);
297 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
298 qs_handle = l2params->qs_handle_list[i];
299 if (dev->qos[i].qs_handle != qs_handle)
300 qs_handle_change = true;
301 else if (!mss_change)
302 continue; /* no MSS nor qs handle change */
303 spin_lock_irqsave(&dev->qos[i].lock, flags);
304 qp = i40iw_get_qp(&dev->qos[i].qplist, qp);
305 while (qp) {
306 if (mss_change)
307 i40iw_qp_mss_modify(dev, qp);
308 if (qs_handle_change) {
309 qp->qs_handle = qs_handle;
310 /* issue cqp suspend command */
311 i40iw_qp_suspend_resume(dev, qp, true);
312 }
313 qp = i40iw_get_qp(&dev->qos[i].qplist, qp);
314 }
315 spin_unlock_irqrestore(&dev->qos[i].lock, flags);
316 dev->qos[i].qs_handle = qs_handle;
317 }
318}
319
320/**
321 * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
322 * @dev: IWARP device pointer
323 * @qp: qp to be removed from qos
324 */
325static void i40iw_qp_rem_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
326{
327 unsigned long flags;
328
329 if (!qp->on_qoslist)
330 return;
331 spin_lock_irqsave(&dev->qos[qp->user_pri].lock, flags);
332 list_del(&qp->list);
333 spin_unlock_irqrestore(&dev->qos[qp->user_pri].lock, flags);
334}
335
336/**
337 * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
338 * @dev: IWARP device pointer
339 * @qp: qp to be added to qos
340 */
341void i40iw_qp_add_qos(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
342{
343 unsigned long flags;
344
345 spin_lock_irqsave(&dev->qos[qp->user_pri].lock, flags);
346 qp->qs_handle = dev->qos[qp->user_pri].qs_handle;
347 list_add(&qp->list, &dev->qos[qp->user_pri].qplist);
348 qp->on_qoslist = true;
349 spin_unlock_irqrestore(&dev->qos[qp->user_pri].lock, flags);
350}
351
352/**
Faisal Latif86dbcd02016-01-20 13:40:10 -0600353 * i40iw_sc_pd_init - initialize sc pd struct
354 * @dev: sc device struct
355 * @pd: sc pd ptr
356 * @pd_id: pd_id for allocated pd
357 */
358static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
359 struct i40iw_sc_pd *pd,
360 u16 pd_id)
361{
362 pd->size = sizeof(*pd);
363 pd->pd_id = pd_id;
364 pd->dev = dev;
365}
366
367/**
368 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
369 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
370 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
371 */
372u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
373{
374 u8 encoded_size = 0;
375
376 /* cqp sq's hw coded value starts from 1 for size of 4
377 * while it starts from 0 for qp' wq's.
378 */
379 if (cqpsq)
380 encoded_size = 1;
381 wqsize >>= 2;
382 while (wqsize >>= 1)
383 encoded_size++;
384 return encoded_size;
385}
386
387/**
388 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
389 * @cqp: IWARP control queue pair pointer
390 * @info: IWARP control queue pair init info pointer
391 *
392 * Initializes the object and context buffers for a control Queue Pair.
393 */
394static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
395 struct i40iw_cqp_init_info *info)
396{
397 u8 hw_sq_size;
398
399 if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
400 (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
401 ((info->sq_size & (info->sq_size - 1))))
402 return I40IW_ERR_INVALID_SIZE;
403
404 hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
405 cqp->size = sizeof(*cqp);
406 cqp->sq_size = info->sq_size;
407 cqp->hw_sq_size = hw_sq_size;
408 cqp->sq_base = info->sq;
409 cqp->host_ctx = info->host_ctx;
410 cqp->sq_pa = info->sq_pa;
411 cqp->host_ctx_pa = info->host_ctx_pa;
412 cqp->dev = info->dev;
413 cqp->struct_ver = info->struct_ver;
414 cqp->scratch_array = info->scratch_array;
415 cqp->polarity = 0;
416 cqp->en_datacenter_tcp = info->en_datacenter_tcp;
417 cqp->enabled_vf_count = info->enabled_vf_count;
418 cqp->hmc_profile = info->hmc_profile;
419 info->dev->cqp = cqp;
420
421 I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
422 i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
423 "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
424 __func__, cqp->sq_size, cqp->hw_sq_size,
425 cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
426 return 0;
427}
428
429/**
430 * i40iw_sc_cqp_create - create cqp during bringup
431 * @cqp: struct for cqp hw
Faisal Latif86dbcd02016-01-20 13:40:10 -0600432 * @maj_err: If error, major err number
433 * @min_err: If error, minor err number
434 */
435static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
Faisal Latif86dbcd02016-01-20 13:40:10 -0600436 u16 *maj_err,
437 u16 *min_err)
438{
439 u64 temp;
440 u32 cnt = 0, p1, p2, val = 0, err_code;
441 enum i40iw_status_code ret_code;
442
443 ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
444 &cqp->sdbuf,
445 128,
446 I40IW_SD_BUF_ALIGNMENT);
447
448 if (ret_code)
449 goto exit;
450
451 temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
452 LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
453
Faisal Latif86dbcd02016-01-20 13:40:10 -0600454 set_64bit_val(cqp->host_ctx, 0, temp);
455 set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
456 temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
457 LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
458 set_64bit_val(cqp->host_ctx, 16, temp);
459 set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
460 set_64bit_val(cqp->host_ctx, 32, 0);
461 set_64bit_val(cqp->host_ctx, 40, 0);
462 set_64bit_val(cqp->host_ctx, 48, 0);
463 set_64bit_val(cqp->host_ctx, 56, 0);
464
465 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
466 cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
467
468 p1 = RS_32_1(cqp->host_ctx_pa, 32);
469 p2 = (u32)cqp->host_ctx_pa;
470
471 if (cqp->dev->is_pf) {
472 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
473 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
474 } else {
475 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
476 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
477 }
478 do {
479 if (cnt++ > I40IW_DONE_COUNT) {
480 i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
481 ret_code = I40IW_ERR_TIMEOUT;
482 /*
483 * read PFPE_CQPERRORCODES register to get the minor
484 * and major error code
485 */
486 if (cqp->dev->is_pf)
487 err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
488 else
489 err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
490 *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
491 *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
492 goto exit;
493 }
494 udelay(I40IW_SLEEP_COUNT);
495 if (cqp->dev->is_pf)
496 val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
497 else
498 val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
499 } while (!val);
500
501exit:
502 if (!ret_code)
503 cqp->process_cqp_sds = i40iw_update_sds_noccq;
504 return ret_code;
505}
506
507/**
508 * i40iw_sc_cqp_post_sq - post of cqp's sq
509 * @cqp: struct for cqp hw
510 */
511void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
512{
513 if (cqp->dev->is_pf)
514 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
515 else
516 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
517
518 i40iw_debug(cqp->dev,
519 I40IW_DEBUG_WQE,
520 "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
521 __func__,
522 cqp->sq_ring.head,
523 cqp->sq_ring.tail,
524 cqp->sq_ring.size);
525}
526
527/**
528 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
529 * @cqp: struct for cqp hw
530 * @wqe_idx: we index of cqp ring
531 */
532u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
533{
534 u64 *wqe = NULL;
535 u32 wqe_idx;
536 enum i40iw_status_code ret_code;
537
538 if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
539 i40iw_debug(cqp->dev,
540 I40IW_DEBUG_WQE,
541 "%s: ring is full head %x tail %x size %x\n",
542 __func__,
543 cqp->sq_ring.head,
544 cqp->sq_ring.tail,
545 cqp->sq_ring.size);
546 return NULL;
547 }
548 I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
549 if (ret_code)
550 return NULL;
551 if (!wqe_idx)
552 cqp->polarity = !cqp->polarity;
553
554 wqe = cqp->sq_base[wqe_idx].elem;
555 cqp->scratch_array[wqe_idx] = scratch;
556 I40IW_CQP_INIT_WQE(wqe);
557
558 return wqe;
559}
560
561/**
562 * i40iw_sc_cqp_destroy - destroy cqp during close
563 * @cqp: struct for cqp hw
564 */
565static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
566{
567 u32 cnt = 0, val = 1;
568 enum i40iw_status_code ret_code = 0;
569 u32 cqpstat_addr;
570
571 if (cqp->dev->is_pf) {
572 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
573 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
574 cqpstat_addr = I40E_PFPE_CCQPSTATUS;
575 } else {
576 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
577 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
578 cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
579 }
580 do {
581 if (cnt++ > I40IW_DONE_COUNT) {
582 ret_code = I40IW_ERR_TIMEOUT;
583 break;
584 }
585 udelay(I40IW_SLEEP_COUNT);
586 val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
587 } while (val);
588
589 i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
590 return ret_code;
591}
592
593/**
594 * i40iw_sc_ccq_arm - enable intr for control cq
595 * @ccq: ccq sc struct
596 */
597static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
598{
599 u64 temp_val;
600 u16 sw_cq_sel;
601 u8 arm_next_se;
602 u8 arm_seq_num;
603
604 /* write to cq doorbell shadow area */
605 /* arm next se should always be zero */
606 get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
607
608 sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
609 arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
610
611 arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
612 arm_seq_num++;
613
614 temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
615 LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
616 LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
617 LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
618
619 set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
620
621 wmb(); /* make sure shadow area is updated before arming */
622
623 if (ccq->dev->is_pf)
624 i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
625 else
626 i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
627}
628
629/**
630 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
631 * @ccq: ccq sc struct
632 * @info: completion q entry to return
633 */
634static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
635 struct i40iw_sc_cq *ccq,
636 struct i40iw_ccq_cqe_info *info)
637{
638 u64 qp_ctx, temp, temp1;
639 u64 *cqe;
640 struct i40iw_sc_cqp *cqp;
641 u32 wqe_idx;
642 u8 polarity;
643 enum i40iw_status_code ret_code = 0;
644
645 if (ccq->cq_uk.avoid_mem_cflct)
646 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
647 else
648 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
649
650 get_64bit_val(cqe, 24, &temp);
651 polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
652 if (polarity != ccq->cq_uk.polarity)
653 return I40IW_ERR_QUEUE_EMPTY;
654
655 get_64bit_val(cqe, 8, &qp_ctx);
656 cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
657 info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
658 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
659 if (info->error) {
660 info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
661 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
662 }
663 wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
664 info->scratch = cqp->scratch_array[wqe_idx];
665
666 get_64bit_val(cqe, 16, &temp1);
667 info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
668 get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
669 info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
670 info->cqp = cqp;
671
672 /* move the head for cq */
673 I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
674 if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
675 ccq->cq_uk.polarity ^= 1;
676
677 /* update cq tail in cq shadow memory also */
678 I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
679 set_64bit_val(ccq->cq_uk.shadow_area,
680 0,
681 I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
682 wmb(); /* write shadow area before tail */
683 I40IW_RING_MOVE_TAIL(cqp->sq_ring);
684 return ret_code;
685}
686
687/**
688 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
689 * @cqp: struct for cqp hw
690 * @op_code: cqp opcode for completion
691 * @info: completion q entry to return
692 */
693static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
694 struct i40iw_sc_cqp *cqp,
695 u8 op_code,
696 struct i40iw_ccq_cqe_info *compl_info)
697{
698 struct i40iw_ccq_cqe_info info;
699 struct i40iw_sc_cq *ccq;
700 enum i40iw_status_code ret_code = 0;
701 u32 cnt = 0;
702
703 memset(&info, 0, sizeof(info));
704 ccq = cqp->dev->ccq;
705 while (1) {
706 if (cnt++ > I40IW_DONE_COUNT)
707 return I40IW_ERR_TIMEOUT;
708
709 if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
710 udelay(I40IW_SLEEP_COUNT);
711 continue;
712 }
713
714 if (info.error) {
715 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
716 break;
717 }
718 /* check if opcode is cq create */
719 if (op_code != info.op_code) {
720 i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
721 "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
722 __func__, op_code, info.op_code);
723 }
724 /* success, exit out of the loop */
725 if (op_code == info.op_code)
726 break;
727 }
728
729 if (compl_info)
730 memcpy(compl_info, &info, sizeof(*compl_info));
731
732 return ret_code;
733}
734
735/**
736 * i40iw_sc_manage_push_page - Handle push page
737 * @cqp: struct for cqp hw
738 * @info: push page info
739 * @scratch: u64 saved to be used during cqp completion
740 * @post_sq: flag for cqp db to ring
741 */
742static enum i40iw_status_code i40iw_sc_manage_push_page(
743 struct i40iw_sc_cqp *cqp,
744 struct i40iw_cqp_manage_push_page_info *info,
745 u64 scratch,
746 bool post_sq)
747{
748 u64 *wqe;
749 u64 header;
750
751 if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
752 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
753
754 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
755 if (!wqe)
756 return I40IW_ERR_RING_FULL;
757
758 set_64bit_val(wqe, 16, info->qs_handle);
759
760 header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
761 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
762 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
763 LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
764
765 i40iw_insert_wqe_hdr(wqe, header);
766
767 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
768 wqe, I40IW_CQP_WQE_SIZE * 8);
769
770 if (post_sq)
771 i40iw_sc_cqp_post_sq(cqp);
772 return 0;
773}
774
775/**
776 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
777 * @cqp: struct for cqp hw
778 * @scratch: u64 saved to be used during cqp completion
779 * @vf_index: vf index for cqp
780 * @free_pm_fcn: function number
781 * @post_sq: flag for cqp db to ring
782 */
783static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
784 struct i40iw_sc_cqp *cqp,
785 u64 scratch,
786 u8 vf_index,
787 bool free_pm_fcn,
788 bool post_sq)
789{
790 u64 *wqe;
791 u64 header;
792
793 if (vf_index >= I40IW_MAX_VF_PER_PF)
794 return I40IW_ERR_INVALID_VF_ID;
795 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
796 if (!wqe)
797 return I40IW_ERR_RING_FULL;
798
799 header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
800 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
801 LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
802 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
803
804 i40iw_insert_wqe_hdr(wqe, header);
805 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
806 wqe, I40IW_CQP_WQE_SIZE * 8);
807 if (post_sq)
808 i40iw_sc_cqp_post_sq(cqp);
809 return 0;
810}
811
812/**
813 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
814 * @cqp: struct for cqp hw
815 * @scratch: u64 saved to be used during cqp completion
816 * @hmc_profile_type: type of profile to set
817 * @vf_num: vf number for profile
818 * @post_sq: flag for cqp db to ring
819 * @poll_registers: flag to poll register for cqp completion
820 */
821static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
822 struct i40iw_sc_cqp *cqp,
823 u64 scratch,
824 u8 hmc_profile_type,
825 u8 vf_num, bool post_sq,
826 bool poll_registers)
827{
828 u64 *wqe;
829 u64 header;
830 u32 val, tail, error;
831 enum i40iw_status_code ret_code = 0;
832
833 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
834 if (!wqe)
835 return I40IW_ERR_RING_FULL;
836
837 set_64bit_val(wqe, 16,
838 (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
839 LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
840
841 header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
842 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
843
844 i40iw_insert_wqe_hdr(wqe, header);
845
846 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
847 wqe, I40IW_CQP_WQE_SIZE * 8);
848
849 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
850 if (error)
851 return I40IW_ERR_CQP_COMPL_ERROR;
852
853 if (post_sq) {
854 i40iw_sc_cqp_post_sq(cqp);
855 if (poll_registers)
856 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
857 else
858 ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
859 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
860 NULL);
861 }
862
863 return ret_code;
864}
865
866/**
867 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
868 * @cqp: struct for cqp hw
869 */
870static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
871{
872 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
873}
874
875/**
876 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
877 * @cqp: struct for cqp hw
878 */
879static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
880{
881 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
882}
883
884/**
885 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
886 * @cqp: struct for cqp hw
887 * @scratch: u64 saved to be used during cqp completion
888 * @hmc_fn_id: hmc function id
889 * @commit_fpm_mem; Memory for fpm values
890 * @post_sq: flag for cqp db to ring
891 * @wait_type: poll ccq or cqp registers for cqp completion
892 */
893static enum i40iw_status_code i40iw_sc_commit_fpm_values(
894 struct i40iw_sc_cqp *cqp,
895 u64 scratch,
896 u8 hmc_fn_id,
897 struct i40iw_dma_mem *commit_fpm_mem,
898 bool post_sq,
899 u8 wait_type)
900{
901 u64 *wqe;
902 u64 header;
903 u32 tail, val, error;
904 enum i40iw_status_code ret_code = 0;
905
906 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
907 if (!wqe)
908 return I40IW_ERR_RING_FULL;
909
910 set_64bit_val(wqe, 16, hmc_fn_id);
911 set_64bit_val(wqe, 32, commit_fpm_mem->pa);
912
913 header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
914 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
915
916 i40iw_insert_wqe_hdr(wqe, header);
917
918 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
919 wqe, I40IW_CQP_WQE_SIZE * 8);
920
921 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
922 if (error)
923 return I40IW_ERR_CQP_COMPL_ERROR;
924
925 if (post_sq) {
926 i40iw_sc_cqp_post_sq(cqp);
927
928 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
929 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
930 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
931 ret_code = i40iw_sc_commit_fpm_values_done(cqp);
932 }
933
934 return ret_code;
935}
936
937/**
938 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
939 * @cqp: struct for cqp hw
940 */
941static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
942{
943 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
944}
945
946/**
947 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
948 * @cqp: struct for cqp hw
949 * @scratch: u64 saved to be used during cqp completion
950 * @hmc_fn_id: hmc function id
951 * @query_fpm_mem: memory for return fpm values
952 * @post_sq: flag for cqp db to ring
953 * @wait_type: poll ccq or cqp registers for cqp completion
954 */
955static enum i40iw_status_code i40iw_sc_query_fpm_values(
956 struct i40iw_sc_cqp *cqp,
957 u64 scratch,
958 u8 hmc_fn_id,
959 struct i40iw_dma_mem *query_fpm_mem,
960 bool post_sq,
961 u8 wait_type)
962{
963 u64 *wqe;
964 u64 header;
965 u32 tail, val, error;
966 enum i40iw_status_code ret_code = 0;
967
968 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
969 if (!wqe)
970 return I40IW_ERR_RING_FULL;
971
972 set_64bit_val(wqe, 16, hmc_fn_id);
973 set_64bit_val(wqe, 32, query_fpm_mem->pa);
974
975 header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
976 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
977
978 i40iw_insert_wqe_hdr(wqe, header);
979
980 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
981 wqe, I40IW_CQP_WQE_SIZE * 8);
982
983 /* read the tail from CQP_TAIL register */
984 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
985
986 if (error)
987 return I40IW_ERR_CQP_COMPL_ERROR;
988
989 if (post_sq) {
990 i40iw_sc_cqp_post_sq(cqp);
991 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
992 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
993 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
994 ret_code = i40iw_sc_query_fpm_values_done(cqp);
995 }
996
997 return ret_code;
998}
999
1000/**
1001 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1002 * @cqp: struct for cqp hw
1003 * @info: arp entry information
1004 * @scratch: u64 saved to be used during cqp completion
1005 * @post_sq: flag for cqp db to ring
1006 */
1007static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
1008 struct i40iw_sc_cqp *cqp,
1009 struct i40iw_add_arp_cache_entry_info *info,
1010 u64 scratch,
1011 bool post_sq)
1012{
1013 u64 *wqe;
1014 u64 temp, header;
1015
1016 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1017 if (!wqe)
1018 return I40IW_ERR_RING_FULL;
1019 set_64bit_val(wqe, 8, info->reach_max);
1020
1021 temp = info->mac_addr[5] |
1022 LS_64_1(info->mac_addr[4], 8) |
1023 LS_64_1(info->mac_addr[3], 16) |
1024 LS_64_1(info->mac_addr[2], 24) |
1025 LS_64_1(info->mac_addr[1], 32) |
1026 LS_64_1(info->mac_addr[0], 40);
1027
1028 set_64bit_val(wqe, 16, temp);
1029
1030 header = info->arp_index |
1031 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1032 LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
1033 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
1034 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1035
1036 i40iw_insert_wqe_hdr(wqe, header);
1037
1038 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1039 wqe, I40IW_CQP_WQE_SIZE * 8);
1040
1041 if (post_sq)
1042 i40iw_sc_cqp_post_sq(cqp);
1043 return 0;
1044}
1045
1046/**
1047 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1048 * @cqp: struct for cqp hw
1049 * @scratch: u64 saved to be used during cqp completion
1050 * @arp_index: arp index to delete arp entry
1051 * @post_sq: flag for cqp db to ring
1052 */
1053static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
1054 struct i40iw_sc_cqp *cqp,
1055 u64 scratch,
1056 u16 arp_index,
1057 bool post_sq)
1058{
1059 u64 *wqe;
1060 u64 header;
1061
1062 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1063 if (!wqe)
1064 return I40IW_ERR_RING_FULL;
1065
1066 header = arp_index |
1067 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1068 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1069 i40iw_insert_wqe_hdr(wqe, header);
1070
1071 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1072 wqe, I40IW_CQP_WQE_SIZE * 8);
1073
1074 if (post_sq)
1075 i40iw_sc_cqp_post_sq(cqp);
1076 return 0;
1077}
1078
1079/**
1080 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1081 * @cqp: struct for cqp hw
1082 * @scratch: u64 saved to be used during cqp completion
1083 * @arp_index: arp index to delete arp entry
1084 * @post_sq: flag for cqp db to ring
1085 */
1086static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
1087 struct i40iw_sc_cqp *cqp,
1088 u64 scratch,
1089 u16 arp_index,
1090 bool post_sq)
1091{
1092 u64 *wqe;
1093 u64 header;
1094
1095 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1096 if (!wqe)
1097 return I40IW_ERR_RING_FULL;
1098
1099 header = arp_index |
1100 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1101 LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
1102 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1103
1104 i40iw_insert_wqe_hdr(wqe, header);
1105
1106 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1107 wqe, I40IW_CQP_WQE_SIZE * 8);
1108
1109 if (post_sq)
1110 i40iw_sc_cqp_post_sq(cqp);
1111 return 0;
1112}
1113
1114/**
1115 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1116 * @cqp: struct for cqp hw
1117 * @info: info for apbvt entry to add or delete
1118 * @scratch: u64 saved to be used during cqp completion
1119 * @post_sq: flag for cqp db to ring
1120 */
1121static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1122 struct i40iw_sc_cqp *cqp,
1123 struct i40iw_apbvt_info *info,
1124 u64 scratch,
1125 bool post_sq)
1126{
1127 u64 *wqe;
1128 u64 header;
1129
1130 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1131 if (!wqe)
1132 return I40IW_ERR_RING_FULL;
1133
1134 set_64bit_val(wqe, 16, info->port);
1135
1136 header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1137 LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1138 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1139
1140 i40iw_insert_wqe_hdr(wqe, header);
1141
1142 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1143 wqe, I40IW_CQP_WQE_SIZE * 8);
1144
1145 if (post_sq)
1146 i40iw_sc_cqp_post_sq(cqp);
1147 return 0;
1148}
1149
1150/**
1151 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1152 * @cqp: struct for cqp hw
1153 * @info: info for quad hash to manage
1154 * @scratch: u64 saved to be used during cqp completion
1155 * @post_sq: flag for cqp db to ring
1156 *
1157 * This is called before connection establishment is started. For passive connections, when
1158 * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
1159 * ip address and tcp port. When SYN is received (passive connections) or
1160 * sent (active connections), this routine is called with entry type of
1161 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1162 *
1163 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1164 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1165 */
1166static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1167 struct i40iw_sc_cqp *cqp,
1168 struct i40iw_qhash_table_info *info,
1169 u64 scratch,
1170 bool post_sq)
1171{
1172 u64 *wqe;
1173 u64 qw1 = 0;
1174 u64 qw2 = 0;
1175 u64 temp;
1176
1177 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1178 if (!wqe)
1179 return I40IW_ERR_RING_FULL;
1180
1181 temp = info->mac_addr[5] |
1182 LS_64_1(info->mac_addr[4], 8) |
1183 LS_64_1(info->mac_addr[3], 16) |
1184 LS_64_1(info->mac_addr[2], 24) |
1185 LS_64_1(info->mac_addr[1], 32) |
1186 LS_64_1(info->mac_addr[0], 40);
1187
1188 set_64bit_val(wqe, 0, temp);
1189
1190 qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1191 LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1192 if (info->ipv4_valid) {
1193 set_64bit_val(wqe,
1194 48,
1195 LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1196 } else {
1197 set_64bit_val(wqe,
1198 56,
1199 LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1200 LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1201
1202 set_64bit_val(wqe,
1203 48,
1204 LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1205 LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1206 }
Henry Orosco0fc2dc52016-10-10 21:12:10 -05001207 qw2 = LS_64(cqp->dev->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
Faisal Latif86dbcd02016-01-20 13:40:10 -06001208 if (info->vlan_valid)
1209 qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1210 set_64bit_val(wqe, 16, qw2);
1211 if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1212 qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1213 if (!info->ipv4_valid) {
1214 set_64bit_val(wqe,
1215 40,
1216 LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1217 LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1218 set_64bit_val(wqe,
1219 32,
1220 LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1221 LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1222 } else {
1223 set_64bit_val(wqe,
1224 32,
1225 LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1226 }
1227 }
1228
1229 set_64bit_val(wqe, 8, qw1);
1230 temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1231 LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1232 LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1233 LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1234 LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1235 LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1236
1237 i40iw_insert_wqe_hdr(wqe, temp);
1238
1239 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1240 wqe, I40IW_CQP_WQE_SIZE * 8);
1241
1242 if (post_sq)
1243 i40iw_sc_cqp_post_sq(cqp);
1244 return 0;
1245}
1246
1247/**
1248 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1249 * @cqp: struct for cqp hw
1250 * @scratch: u64 saved to be used during cqp completion
1251 * @post_sq: flag for cqp db to ring
1252 */
1253static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1254 struct i40iw_sc_cqp *cqp,
1255 u64 scratch,
1256 bool post_sq)
1257{
1258 u64 *wqe;
1259 u64 header;
1260
1261 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1262 if (!wqe)
1263 return I40IW_ERR_RING_FULL;
1264 header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1265 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1266
1267 i40iw_insert_wqe_hdr(wqe, header);
1268 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1269 wqe, I40IW_CQP_WQE_SIZE * 8);
1270 if (post_sq)
1271 i40iw_sc_cqp_post_sq(cqp);
1272 return 0;
1273}
1274
1275/**
1276 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1277 * @cqp: struct for cqp hw
1278 * @info:mac addr info
1279 * @scratch: u64 saved to be used during cqp completion
1280 * @post_sq: flag for cqp db to ring
1281 */
1282static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1283 struct i40iw_sc_cqp *cqp,
1284 struct i40iw_local_mac_ipaddr_entry_info *info,
1285 u64 scratch,
1286 bool post_sq)
1287{
1288 u64 *wqe;
1289 u64 temp, header;
1290
1291 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1292 if (!wqe)
1293 return I40IW_ERR_RING_FULL;
1294 temp = info->mac_addr[5] |
1295 LS_64_1(info->mac_addr[4], 8) |
1296 LS_64_1(info->mac_addr[3], 16) |
1297 LS_64_1(info->mac_addr[2], 24) |
1298 LS_64_1(info->mac_addr[1], 32) |
1299 LS_64_1(info->mac_addr[0], 40);
1300
1301 set_64bit_val(wqe, 32, temp);
1302
1303 header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1304 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1305 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1306
1307 i40iw_insert_wqe_hdr(wqe, header);
1308
1309 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1310 wqe, I40IW_CQP_WQE_SIZE * 8);
1311
1312 if (post_sq)
1313 i40iw_sc_cqp_post_sq(cqp);
1314 return 0;
1315}
1316
1317/**
1318 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1319 * @cqp: struct for cqp hw
1320 * @scratch: u64 saved to be used during cqp completion
1321 * @entry_idx: index of mac entry
1322 * @ ignore_ref_count: to force mac adde delete
1323 * @post_sq: flag for cqp db to ring
1324 */
1325static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1326 struct i40iw_sc_cqp *cqp,
1327 u64 scratch,
1328 u8 entry_idx,
1329 u8 ignore_ref_count,
1330 bool post_sq)
1331{
1332 u64 *wqe;
1333 u64 header;
1334
1335 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1336 if (!wqe)
1337 return I40IW_ERR_RING_FULL;
1338 header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1339 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1340 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1341 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1342 LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1343
1344 i40iw_insert_wqe_hdr(wqe, header);
1345
1346 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1347 wqe, I40IW_CQP_WQE_SIZE * 8);
1348
1349 if (post_sq)
1350 i40iw_sc_cqp_post_sq(cqp);
1351 return 0;
1352}
1353
1354/**
1355 * i40iw_sc_cqp_nop - send a nop wqe
1356 * @cqp: struct for cqp hw
1357 * @scratch: u64 saved to be used during cqp completion
1358 * @post_sq: flag for cqp db to ring
1359 */
1360static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1361 u64 scratch,
1362 bool post_sq)
1363{
1364 u64 *wqe;
1365 u64 header;
1366
1367 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1368 if (!wqe)
1369 return I40IW_ERR_RING_FULL;
1370 header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1371 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1372 i40iw_insert_wqe_hdr(wqe, header);
1373 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1374 wqe, I40IW_CQP_WQE_SIZE * 8);
1375
1376 if (post_sq)
1377 i40iw_sc_cqp_post_sq(cqp);
1378 return 0;
1379}
1380
1381/**
1382 * i40iw_sc_ceq_init - initialize ceq
1383 * @ceq: ceq sc structure
1384 * @info: ceq initialization info
1385 */
1386static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1387 struct i40iw_ceq_init_info *info)
1388{
1389 u32 pble_obj_cnt;
1390
1391 if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1392 (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1393 return I40IW_ERR_INVALID_SIZE;
1394
1395 if (info->ceq_id >= I40IW_MAX_CEQID)
1396 return I40IW_ERR_INVALID_CEQ_ID;
1397
1398 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1399
1400 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1401 return I40IW_ERR_INVALID_PBLE_INDEX;
1402
1403 ceq->size = sizeof(*ceq);
1404 ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1405 ceq->ceq_id = info->ceq_id;
1406 ceq->dev = info->dev;
1407 ceq->elem_cnt = info->elem_cnt;
1408 ceq->ceq_elem_pa = info->ceqe_pa;
1409 ceq->virtual_map = info->virtual_map;
1410
1411 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1412 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1413 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1414
1415 ceq->tph_en = info->tph_en;
1416 ceq->tph_val = info->tph_val;
1417 ceq->polarity = 1;
1418 I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1419 ceq->dev->ceq[info->ceq_id] = ceq;
1420
1421 return 0;
1422}
1423
1424/**
1425 * i40iw_sc_ceq_create - create ceq wqe
1426 * @ceq: ceq sc structure
1427 * @scratch: u64 saved to be used during cqp completion
1428 * @post_sq: flag for cqp db to ring
1429 */
1430static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1431 u64 scratch,
1432 bool post_sq)
1433{
1434 struct i40iw_sc_cqp *cqp;
1435 u64 *wqe;
1436 u64 header;
1437
1438 cqp = ceq->dev->cqp;
1439 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1440 if (!wqe)
1441 return I40IW_ERR_RING_FULL;
1442 set_64bit_val(wqe, 16, ceq->elem_cnt);
1443 set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1444 set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1445 set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1446
1447 header = ceq->ceq_id |
1448 LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1449 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1450 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1451 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1452 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1453
1454 i40iw_insert_wqe_hdr(wqe, header);
1455
1456 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1457 wqe, I40IW_CQP_WQE_SIZE * 8);
1458
1459 if (post_sq)
1460 i40iw_sc_cqp_post_sq(cqp);
1461 return 0;
1462}
1463
1464/**
1465 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1466 * @ceq: ceq sc structure
1467 */
1468static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1469{
1470 struct i40iw_sc_cqp *cqp;
1471
1472 cqp = ceq->dev->cqp;
1473 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1474}
1475
1476/**
1477 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1478 * @ceq: ceq sc structure
1479 */
1480static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1481{
1482 struct i40iw_sc_cqp *cqp;
1483
1484 cqp = ceq->dev->cqp;
1485 cqp->process_cqp_sds = i40iw_update_sds_noccq;
1486 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1487}
1488
1489/**
1490 * i40iw_sc_cceq_create - create cceq
1491 * @ceq: ceq sc structure
1492 * @scratch: u64 saved to be used during cqp completion
1493 */
1494static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1495{
1496 enum i40iw_status_code ret_code;
1497
1498 ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1499 if (!ret_code)
1500 ret_code = i40iw_sc_cceq_create_done(ceq);
1501 return ret_code;
1502}
1503
1504/**
1505 * i40iw_sc_ceq_destroy - destroy ceq
1506 * @ceq: ceq sc structure
1507 * @scratch: u64 saved to be used during cqp completion
1508 * @post_sq: flag for cqp db to ring
1509 */
1510static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1511 u64 scratch,
1512 bool post_sq)
1513{
1514 struct i40iw_sc_cqp *cqp;
1515 u64 *wqe;
1516 u64 header;
1517
1518 cqp = ceq->dev->cqp;
1519 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1520 if (!wqe)
1521 return I40IW_ERR_RING_FULL;
1522 set_64bit_val(wqe, 16, ceq->elem_cnt);
1523 set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1524 header = ceq->ceq_id |
1525 LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1526 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1527 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1528 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1529 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1530 i40iw_insert_wqe_hdr(wqe, header);
1531 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1532 wqe, I40IW_CQP_WQE_SIZE * 8);
1533
1534 if (post_sq)
1535 i40iw_sc_cqp_post_sq(cqp);
1536 return 0;
1537}
1538
1539/**
1540 * i40iw_sc_process_ceq - process ceq
1541 * @dev: sc device struct
1542 * @ceq: ceq sc structure
1543 */
1544static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1545{
1546 u64 temp;
1547 u64 *ceqe;
1548 struct i40iw_sc_cq *cq = NULL;
1549 u8 polarity;
1550
1551 ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1552 get_64bit_val(ceqe, 0, &temp);
1553 polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1554 if (polarity != ceq->polarity)
1555 return cq;
1556
1557 cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1558
1559 I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1560 if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1561 ceq->polarity ^= 1;
1562
1563 if (dev->is_pf)
1564 i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1565 else
1566 i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1567
1568 return cq;
1569}
1570
1571/**
1572 * i40iw_sc_aeq_init - initialize aeq
1573 * @aeq: aeq structure ptr
1574 * @info: aeq initialization info
1575 */
1576static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1577 struct i40iw_aeq_init_info *info)
1578{
1579 u32 pble_obj_cnt;
1580
1581 if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1582 (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1583 return I40IW_ERR_INVALID_SIZE;
1584 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1585
1586 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1587 return I40IW_ERR_INVALID_PBLE_INDEX;
1588
1589 aeq->size = sizeof(*aeq);
1590 aeq->polarity = 1;
1591 aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1592 aeq->dev = info->dev;
1593 aeq->elem_cnt = info->elem_cnt;
1594
1595 aeq->aeq_elem_pa = info->aeq_elem_pa;
1596 I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1597 info->dev->aeq = aeq;
1598
1599 aeq->virtual_map = info->virtual_map;
1600 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1601 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1602 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1603 info->dev->aeq = aeq;
1604 return 0;
1605}
1606
1607/**
1608 * i40iw_sc_aeq_create - create aeq
1609 * @aeq: aeq structure ptr
1610 * @scratch: u64 saved to be used during cqp completion
1611 * @post_sq: flag for cqp db to ring
1612 */
1613static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1614 u64 scratch,
1615 bool post_sq)
1616{
1617 u64 *wqe;
1618 struct i40iw_sc_cqp *cqp;
1619 u64 header;
1620
1621 cqp = aeq->dev->cqp;
1622 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1623 if (!wqe)
1624 return I40IW_ERR_RING_FULL;
1625 set_64bit_val(wqe, 16, aeq->elem_cnt);
1626 set_64bit_val(wqe, 32,
1627 (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1628 set_64bit_val(wqe, 48,
1629 (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1630
1631 header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1632 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1633 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1634 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1635
1636 i40iw_insert_wqe_hdr(wqe, header);
1637 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1638 wqe, I40IW_CQP_WQE_SIZE * 8);
1639 if (post_sq)
1640 i40iw_sc_cqp_post_sq(cqp);
1641 return 0;
1642}
1643
1644/**
1645 * i40iw_sc_aeq_destroy - destroy aeq during close
1646 * @aeq: aeq structure ptr
1647 * @scratch: u64 saved to be used during cqp completion
1648 * @post_sq: flag for cqp db to ring
1649 */
1650static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1651 u64 scratch,
1652 bool post_sq)
1653{
1654 u64 *wqe;
1655 struct i40iw_sc_cqp *cqp;
1656 u64 header;
1657
1658 cqp = aeq->dev->cqp;
1659 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1660 if (!wqe)
1661 return I40IW_ERR_RING_FULL;
1662 set_64bit_val(wqe, 16, aeq->elem_cnt);
1663 set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1664 header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1665 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1666 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1667 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1668 i40iw_insert_wqe_hdr(wqe, header);
1669
1670 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1671 wqe, I40IW_CQP_WQE_SIZE * 8);
1672 if (post_sq)
1673 i40iw_sc_cqp_post_sq(cqp);
1674 return 0;
1675}
1676
1677/**
1678 * i40iw_sc_get_next_aeqe - get next aeq entry
1679 * @aeq: aeq structure ptr
1680 * @info: aeqe info to be returned
1681 */
1682static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1683 struct i40iw_aeqe_info *info)
1684{
1685 u64 temp, compl_ctx;
1686 u64 *aeqe;
1687 u16 wqe_idx;
1688 u8 ae_src;
1689 u8 polarity;
1690
1691 aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1692 get_64bit_val(aeqe, 0, &compl_ctx);
1693 get_64bit_val(aeqe, 8, &temp);
1694 polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1695
1696 if (aeq->polarity != polarity)
1697 return I40IW_ERR_QUEUE_EMPTY;
1698
1699 i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1700
1701 ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1702 wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1703 info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1704 info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1705 info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1706 info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1707 info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1708 info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
1709 switch (ae_src) {
1710 case I40IW_AE_SOURCE_RQ:
1711 case I40IW_AE_SOURCE_RQ_0011:
1712 info->qp = true;
1713 info->wqe_idx = wqe_idx;
1714 info->compl_ctx = compl_ctx;
1715 break;
1716 case I40IW_AE_SOURCE_CQ:
1717 case I40IW_AE_SOURCE_CQ_0110:
1718 case I40IW_AE_SOURCE_CQ_1010:
1719 case I40IW_AE_SOURCE_CQ_1110:
1720 info->cq = true;
1721 info->compl_ctx = LS_64_1(compl_ctx, 1);
1722 break;
1723 case I40IW_AE_SOURCE_SQ:
1724 case I40IW_AE_SOURCE_SQ_0111:
1725 info->qp = true;
1726 info->sq = true;
1727 info->wqe_idx = wqe_idx;
1728 info->compl_ctx = compl_ctx;
1729 break;
1730 case I40IW_AE_SOURCE_IN_RR_WR:
1731 case I40IW_AE_SOURCE_IN_RR_WR_1011:
1732 info->qp = true;
1733 info->compl_ctx = compl_ctx;
1734 info->in_rdrsp_wr = true;
1735 break;
1736 case I40IW_AE_SOURCE_OUT_RR:
1737 case I40IW_AE_SOURCE_OUT_RR_1111:
1738 info->qp = true;
1739 info->compl_ctx = compl_ctx;
1740 info->out_rdrsp = true;
1741 break;
1742 default:
1743 break;
1744 }
1745 I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1746 if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1747 aeq->polarity ^= 1;
1748 return 0;
1749}
1750
1751/**
1752 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1753 * @dev: sc device struct
1754 * @count: allocate count
1755 */
1756static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1757 u32 count)
1758{
1759 if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
1760 return I40IW_ERR_INVALID_SIZE;
1761
1762 if (dev->is_pf)
1763 i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1764 else
1765 i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1766
1767 return 0;
1768}
1769
1770/**
1771 * i40iw_sc_aeq_create_done - create aeq
1772 * @aeq: aeq structure ptr
1773 */
1774static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1775{
1776 struct i40iw_sc_cqp *cqp;
1777
1778 cqp = aeq->dev->cqp;
1779 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
1780}
1781
1782/**
1783 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1784 * @aeq: aeq structure ptr
1785 */
1786static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
1787{
1788 struct i40iw_sc_cqp *cqp;
1789
1790 cqp = aeq->dev->cqp;
1791 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
1792}
1793
1794/**
1795 * i40iw_sc_ccq_init - initialize control cq
1796 * @cq: sc's cq ctruct
1797 * @info: info for control cq initialization
1798 */
1799static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
1800 struct i40iw_ccq_init_info *info)
1801{
1802 u32 pble_obj_cnt;
1803
1804 if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
1805 return I40IW_ERR_INVALID_SIZE;
1806
1807 if (info->ceq_id > I40IW_MAX_CEQID)
1808 return I40IW_ERR_INVALID_CEQ_ID;
1809
1810 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1811
1812 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1813 return I40IW_ERR_INVALID_PBLE_INDEX;
1814
1815 cq->cq_pa = info->cq_pa;
1816 cq->cq_uk.cq_base = info->cq_base;
1817 cq->shadow_area_pa = info->shadow_area_pa;
1818 cq->cq_uk.shadow_area = info->shadow_area;
1819 cq->shadow_read_threshold = info->shadow_read_threshold;
1820 cq->dev = info->dev;
1821 cq->ceq_id = info->ceq_id;
1822 cq->cq_uk.cq_size = info->num_elem;
1823 cq->cq_type = I40IW_CQ_TYPE_CQP;
1824 cq->ceqe_mask = info->ceqe_mask;
1825 I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
1826
1827 cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
1828 cq->ceq_id_valid = info->ceq_id_valid;
1829 cq->tph_en = info->tph_en;
1830 cq->tph_val = info->tph_val;
1831 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
1832
1833 cq->pbl_list = info->pbl_list;
1834 cq->virtual_map = info->virtual_map;
1835 cq->pbl_chunk_size = info->pbl_chunk_size;
1836 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1837 cq->cq_uk.polarity = true;
1838
1839 /* following are only for iw cqs so initialize them to zero */
1840 cq->cq_uk.cqe_alloc_reg = NULL;
1841 info->dev->ccq = cq;
1842 return 0;
1843}
1844
1845/**
1846 * i40iw_sc_ccq_create_done - poll cqp for ccq create
1847 * @ccq: ccq sc struct
1848 */
1849static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
1850{
1851 struct i40iw_sc_cqp *cqp;
1852
1853 cqp = ccq->dev->cqp;
1854 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
1855}
1856
1857/**
1858 * i40iw_sc_ccq_create - create control cq
1859 * @ccq: ccq sc struct
1860 * @scratch: u64 saved to be used during cqp completion
1861 * @check_overflow: overlow flag for ccq
1862 * @post_sq: flag for cqp db to ring
1863 */
1864static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
1865 u64 scratch,
1866 bool check_overflow,
1867 bool post_sq)
1868{
1869 u64 *wqe;
1870 struct i40iw_sc_cqp *cqp;
1871 u64 header;
1872 enum i40iw_status_code ret_code;
1873
1874 cqp = ccq->dev->cqp;
1875 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1876 if (!wqe)
1877 return I40IW_ERR_RING_FULL;
1878 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1879 set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1880 set_64bit_val(wqe, 16,
1881 LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
1882 set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
1883 set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1884 set_64bit_val(wqe, 48,
1885 (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
1886 set_64bit_val(wqe, 56,
1887 LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
1888
1889 header = ccq->cq_uk.cq_id |
1890 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1891 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
1892 LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
1893 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
1894 LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
1895 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1896 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1897 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1898 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1899 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1900
1901 i40iw_insert_wqe_hdr(wqe, header);
1902
1903 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
1904 wqe, I40IW_CQP_WQE_SIZE * 8);
1905
1906 if (post_sq) {
1907 i40iw_sc_cqp_post_sq(cqp);
1908 ret_code = i40iw_sc_ccq_create_done(ccq);
1909 if (ret_code)
1910 return ret_code;
1911 }
1912 cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
1913
1914 return 0;
1915}
1916
1917/**
1918 * i40iw_sc_ccq_destroy - destroy ccq during close
1919 * @ccq: ccq sc struct
1920 * @scratch: u64 saved to be used during cqp completion
1921 * @post_sq: flag for cqp db to ring
1922 */
1923static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
1924 u64 scratch,
1925 bool post_sq)
1926{
1927 struct i40iw_sc_cqp *cqp;
1928 u64 *wqe;
1929 u64 header;
1930 enum i40iw_status_code ret_code = 0;
1931 u32 tail, val, error;
1932
1933 cqp = ccq->dev->cqp;
1934 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1935 if (!wqe)
1936 return I40IW_ERR_RING_FULL;
1937 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1938 set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1939 set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1940
1941 header = ccq->cq_uk.cq_id |
1942 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1943 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
1944 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1945 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1946 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1947 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1948 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1949
1950 i40iw_insert_wqe_hdr(wqe, header);
1951
1952 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
1953 wqe, I40IW_CQP_WQE_SIZE * 8);
1954
1955 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1956 if (error)
1957 return I40IW_ERR_CQP_COMPL_ERROR;
1958
1959 if (post_sq) {
1960 i40iw_sc_cqp_post_sq(cqp);
1961 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
1962 }
1963
1964 return ret_code;
1965}
1966
1967/**
1968 * i40iw_sc_cq_init - initialize completion q
1969 * @cq: cq struct
1970 * @info: cq initialization info
1971 */
1972static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
1973 struct i40iw_cq_init_info *info)
1974{
1975 u32 __iomem *cqe_alloc_reg = NULL;
1976 enum i40iw_status_code ret_code;
1977 u32 pble_obj_cnt;
1978 u32 arm_offset;
1979
1980 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1981
1982 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1983 return I40IW_ERR_INVALID_PBLE_INDEX;
1984
1985 cq->cq_pa = info->cq_base_pa;
1986 cq->dev = info->dev;
1987 cq->ceq_id = info->ceq_id;
1988 arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
1989 if (i40iw_get_hw_addr(cq->dev))
1990 cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
1991 arm_offset);
1992 info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
1993 ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
1994 if (ret_code)
1995 return ret_code;
1996 cq->virtual_map = info->virtual_map;
1997 cq->pbl_chunk_size = info->pbl_chunk_size;
1998 cq->ceqe_mask = info->ceqe_mask;
1999 cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
2000
2001 cq->shadow_area_pa = info->shadow_area_pa;
2002 cq->shadow_read_threshold = info->shadow_read_threshold;
2003
2004 cq->ceq_id_valid = info->ceq_id_valid;
2005 cq->tph_en = info->tph_en;
2006 cq->tph_val = info->tph_val;
2007
2008 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2009
2010 return 0;
2011}
2012
2013/**
2014 * i40iw_sc_cq_create - create completion q
2015 * @cq: cq struct
2016 * @scratch: u64 saved to be used during cqp completion
2017 * @check_overflow: flag for overflow check
2018 * @post_sq: flag for cqp db to ring
2019 */
2020static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
2021 u64 scratch,
2022 bool check_overflow,
2023 bool post_sq)
2024{
2025 u64 *wqe;
2026 struct i40iw_sc_cqp *cqp;
2027 u64 header;
2028
2029 if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
2030 return I40IW_ERR_INVALID_CQ_ID;
2031
2032 if (cq->ceq_id > I40IW_MAX_CEQID)
2033 return I40IW_ERR_INVALID_CEQ_ID;
2034
2035 cqp = cq->dev->cqp;
2036 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2037 if (!wqe)
2038 return I40IW_ERR_RING_FULL;
2039
2040 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2041 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2042 set_64bit_val(wqe,
2043 16,
2044 LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2045
2046 set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2047
2048 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2049 set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2050 set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2051
2052 header = cq->cq_uk.cq_id |
2053 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2054 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2055 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2056 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2057 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2058 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2059 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2060 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2061 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2062 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2063
2064 i40iw_insert_wqe_hdr(wqe, header);
2065
2066 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2067 wqe, I40IW_CQP_WQE_SIZE * 8);
2068
2069 if (post_sq)
2070 i40iw_sc_cqp_post_sq(cqp);
2071 return 0;
2072}
2073
2074/**
2075 * i40iw_sc_cq_destroy - destroy completion q
2076 * @cq: cq struct
2077 * @scratch: u64 saved to be used during cqp completion
2078 * @post_sq: flag for cqp db to ring
2079 */
2080static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
2081 u64 scratch,
2082 bool post_sq)
2083{
2084 struct i40iw_sc_cqp *cqp;
2085 u64 *wqe;
2086 u64 header;
2087
2088 cqp = cq->dev->cqp;
2089 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2090 if (!wqe)
2091 return I40IW_ERR_RING_FULL;
2092 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2093 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2094 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2095 set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2096
2097 header = cq->cq_uk.cq_id |
2098 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2099 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2100 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2101 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2102 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2103 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2104 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2105 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2106 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2107
2108 i40iw_insert_wqe_hdr(wqe, header);
2109
2110 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2111 wqe, I40IW_CQP_WQE_SIZE * 8);
2112
2113 if (post_sq)
2114 i40iw_sc_cqp_post_sq(cqp);
2115 return 0;
2116}
2117
2118/**
2119 * i40iw_sc_cq_modify - modify a Completion Queue
2120 * @cq: cq struct
2121 * @info: modification info struct
2122 * @scratch:
2123 * @post_sq: flag to post to sq
2124 */
2125static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2126 struct i40iw_modify_cq_info *info,
2127 u64 scratch,
2128 bool post_sq)
2129{
2130 struct i40iw_sc_cqp *cqp;
2131 u64 *wqe;
2132 u64 header;
2133 u32 cq_size, ceq_id, first_pm_pbl_idx;
2134 u8 pbl_chunk_size;
2135 bool virtual_map, ceq_id_valid, check_overflow;
2136 u32 pble_obj_cnt;
2137
2138 if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2139 return I40IW_ERR_INVALID_CEQ_ID;
2140
2141 pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2142
2143 if (info->cq_resize && info->virtual_map &&
2144 (info->first_pm_pbl_idx >= pble_obj_cnt))
2145 return I40IW_ERR_INVALID_PBLE_INDEX;
2146
2147 cqp = cq->dev->cqp;
2148 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2149 if (!wqe)
2150 return I40IW_ERR_RING_FULL;
2151
2152 cq->pbl_list = info->pbl_list;
2153 cq->cq_pa = info->cq_pa;
2154 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2155
2156 cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2157 if (info->ceq_change) {
2158 ceq_id_valid = true;
2159 ceq_id = info->ceq_id;
2160 } else {
2161 ceq_id_valid = cq->ceq_id_valid;
2162 ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2163 }
2164 virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2165 first_pm_pbl_idx = (info->cq_resize ?
2166 (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2167 (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2168 pbl_chunk_size = (info->cq_resize ?
2169 (info->virtual_map ? info->pbl_chunk_size : 0) :
2170 (cq->virtual_map ? cq->pbl_chunk_size : 0));
2171 check_overflow = info->check_overflow_change ? info->check_overflow :
2172 cq->check_overflow;
2173 cq->cq_uk.cq_size = cq_size;
2174 cq->ceq_id_valid = ceq_id_valid;
2175 cq->ceq_id = ceq_id;
2176 cq->virtual_map = virtual_map;
2177 cq->first_pm_pbl_idx = first_pm_pbl_idx;
2178 cq->pbl_chunk_size = pbl_chunk_size;
2179 cq->check_overflow = check_overflow;
2180
2181 set_64bit_val(wqe, 0, cq_size);
2182 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2183 set_64bit_val(wqe, 16,
2184 LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2185 set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2186 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2187 set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2188 set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2189
2190 header = cq->cq_uk.cq_id |
2191 LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2192 LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2193 LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2194 LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2195 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2196 LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2197 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2198 LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2199 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2200 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2201 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2202
2203 i40iw_insert_wqe_hdr(wqe, header);
2204
2205 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2206 wqe, I40IW_CQP_WQE_SIZE * 8);
2207
2208 if (post_sq)
2209 i40iw_sc_cqp_post_sq(cqp);
2210 return 0;
2211}
2212
2213/**
2214 * i40iw_sc_qp_init - initialize qp
2215 * @qp: sc qp
2216 * @info: initialization qp info
2217 */
2218static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2219 struct i40iw_qp_init_info *info)
2220{
2221 u32 __iomem *wqe_alloc_reg = NULL;
2222 enum i40iw_status_code ret_code;
2223 u32 pble_obj_cnt;
2224 u8 wqe_size;
2225 u32 offset;
2226
2227 qp->dev = info->pd->dev;
2228 qp->sq_pa = info->sq_pa;
2229 qp->rq_pa = info->rq_pa;
2230 qp->hw_host_ctx_pa = info->host_ctx_pa;
2231 qp->q2_pa = info->q2_pa;
2232 qp->shadow_area_pa = info->shadow_area_pa;
2233
2234 qp->q2_buf = info->q2;
2235 qp->pd = info->pd;
2236 qp->hw_host_ctx = info->host_ctx;
2237 offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2238 if (i40iw_get_hw_addr(qp->pd->dev))
2239 wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2240 offset);
2241
2242 info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
2243 ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2244 if (ret_code)
2245 return ret_code;
2246 qp->virtual_map = info->virtual_map;
2247
2248 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2249
2250 if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2251 (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2252 return I40IW_ERR_INVALID_PBLE_INDEX;
2253
2254 qp->llp_stream_handle = (void *)(-1);
2255 qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2256
2257 qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2258 false);
2259 i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2260 __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
2261 ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2262 &wqe_size);
2263 if (ret_code)
2264 return ret_code;
2265 qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2266 (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2267 i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2268 "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2269 __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2270 qp->sq_tph_val = info->sq_tph_val;
2271 qp->rq_tph_val = info->rq_tph_val;
2272 qp->sq_tph_en = info->sq_tph_en;
2273 qp->rq_tph_en = info->rq_tph_en;
2274 qp->rcv_tph_en = info->rcv_tph_en;
2275 qp->xmit_tph_en = info->xmit_tph_en;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05002276 qp->qs_handle = qp->pd->dev->qos[qp->user_pri].qs_handle;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002277 qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
2278
2279 return 0;
2280}
2281
2282/**
2283 * i40iw_sc_qp_create - create qp
2284 * @qp: sc qp
2285 * @info: qp create info
2286 * @scratch: u64 saved to be used during cqp completion
2287 * @post_sq: flag for cqp db to ring
2288 */
2289static enum i40iw_status_code i40iw_sc_qp_create(
2290 struct i40iw_sc_qp *qp,
2291 struct i40iw_create_qp_info *info,
2292 u64 scratch,
2293 bool post_sq)
2294{
2295 struct i40iw_sc_cqp *cqp;
2296 u64 *wqe;
2297 u64 header;
2298
2299 if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2300 (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2301 return I40IW_ERR_INVALID_QP_ID;
2302
2303 cqp = qp->pd->dev->cqp;
2304 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2305 if (!wqe)
2306 return I40IW_ERR_RING_FULL;
2307
2308 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2309
2310 set_64bit_val(wqe, 40, qp->shadow_area_pa);
2311
2312 header = qp->qp_uk.qp_id |
2313 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2314 LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2315 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2316 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2317 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2318 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2319 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2320 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2321 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2322 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2323
2324 i40iw_insert_wqe_hdr(wqe, header);
2325 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2326 wqe, I40IW_CQP_WQE_SIZE * 8);
2327
2328 if (post_sq)
2329 i40iw_sc_cqp_post_sq(cqp);
2330 return 0;
2331}
2332
2333/**
2334 * i40iw_sc_qp_modify - modify qp cqp wqe
2335 * @qp: sc qp
2336 * @info: modify qp info
2337 * @scratch: u64 saved to be used during cqp completion
2338 * @post_sq: flag for cqp db to ring
2339 */
2340static enum i40iw_status_code i40iw_sc_qp_modify(
2341 struct i40iw_sc_qp *qp,
2342 struct i40iw_modify_qp_info *info,
2343 u64 scratch,
2344 bool post_sq)
2345{
2346 u64 *wqe;
2347 struct i40iw_sc_cqp *cqp;
2348 u64 header;
2349 u8 term_actions = 0;
2350 u8 term_len = 0;
2351
2352 cqp = qp->pd->dev->cqp;
2353 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2354 if (!wqe)
2355 return I40IW_ERR_RING_FULL;
2356 if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2357 if (info->dont_send_fin)
2358 term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2359 if (info->dont_send_term)
2360 term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2361 if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2362 (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2363 term_len = info->termlen;
2364 }
2365
2366 set_64bit_val(wqe,
2367 8,
2368 LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
2369 LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2370
2371 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2372 set_64bit_val(wqe, 40, qp->shadow_area_pa);
2373
2374 header = qp->qp_uk.qp_id |
2375 LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2376 LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2377 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2378 LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2379 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2380 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2381 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2382 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2383 LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
2384 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2385 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2386 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2387 LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2388 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2389 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2390 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2391
2392 i40iw_insert_wqe_hdr(wqe, header);
2393
2394 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2395 wqe, I40IW_CQP_WQE_SIZE * 8);
2396
2397 if (post_sq)
2398 i40iw_sc_cqp_post_sq(cqp);
2399 return 0;
2400}
2401
2402/**
2403 * i40iw_sc_qp_destroy - cqp destroy qp
2404 * @qp: sc qp
2405 * @scratch: u64 saved to be used during cqp completion
2406 * @remove_hash_idx: flag if to remove hash idx
2407 * @ignore_mw_bnd: memory window bind flag
2408 * @post_sq: flag for cqp db to ring
2409 */
2410static enum i40iw_status_code i40iw_sc_qp_destroy(
2411 struct i40iw_sc_qp *qp,
2412 u64 scratch,
2413 bool remove_hash_idx,
2414 bool ignore_mw_bnd,
2415 bool post_sq)
2416{
2417 u64 *wqe;
2418 struct i40iw_sc_cqp *cqp;
2419 u64 header;
2420
Henry Orosco0fc2dc52016-10-10 21:12:10 -05002421 i40iw_qp_rem_qos(qp->pd->dev, qp);
Faisal Latif86dbcd02016-01-20 13:40:10 -06002422 cqp = qp->pd->dev->cqp;
2423 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2424 if (!wqe)
2425 return I40IW_ERR_RING_FULL;
2426 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2427 set_64bit_val(wqe, 40, qp->shadow_area_pa);
2428
2429 header = qp->qp_uk.qp_id |
2430 LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2431 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2432 LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2433 LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2434 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2435
2436 i40iw_insert_wqe_hdr(wqe, header);
2437 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2438 wqe, I40IW_CQP_WQE_SIZE * 8);
2439
2440 if (post_sq)
2441 i40iw_sc_cqp_post_sq(cqp);
2442 return 0;
2443}
2444
2445/**
2446 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2447 * @qp: sc qp
2448 * @info: dlush information
2449 * @scratch: u64 saved to be used during cqp completion
2450 * @post_sq: flag for cqp db to ring
2451 */
2452static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2453 struct i40iw_sc_qp *qp,
2454 struct i40iw_qp_flush_info *info,
2455 u64 scratch,
2456 bool post_sq)
2457{
2458 u64 temp = 0;
2459 u64 *wqe;
2460 struct i40iw_sc_cqp *cqp;
2461 u64 header;
2462 bool flush_sq = false, flush_rq = false;
2463
2464 if (info->rq && !qp->flush_rq)
2465 flush_rq = true;
2466
2467 if (info->sq && !qp->flush_sq)
2468 flush_sq = true;
2469
2470 qp->flush_sq |= flush_sq;
2471 qp->flush_rq |= flush_rq;
2472 if (!flush_sq && !flush_rq) {
2473 if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
2474 return 0;
2475 }
2476
2477 cqp = qp->pd->dev->cqp;
2478 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2479 if (!wqe)
2480 return I40IW_ERR_RING_FULL;
2481 if (info->userflushcode) {
2482 if (flush_rq) {
2483 temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2484 LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2485 }
2486 if (flush_sq) {
2487 temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2488 LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2489 }
2490 }
2491 set_64bit_val(wqe, 16, temp);
2492
2493 temp = (info->generate_ae) ?
2494 info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2495
2496 set_64bit_val(wqe, 8, temp);
2497
2498 header = qp->qp_uk.qp_id |
2499 LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2500 LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2501 LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2502 LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2503 LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2504 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2505
2506 i40iw_insert_wqe_hdr(wqe, header);
2507
2508 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2509 wqe, I40IW_CQP_WQE_SIZE * 8);
2510
2511 if (post_sq)
2512 i40iw_sc_cqp_post_sq(cqp);
2513 return 0;
2514}
2515
2516/**
2517 * i40iw_sc_qp_upload_context - upload qp's context
2518 * @dev: sc device struct
2519 * @info: upload context info ptr for return
2520 * @scratch: u64 saved to be used during cqp completion
2521 * @post_sq: flag for cqp db to ring
2522 */
2523static enum i40iw_status_code i40iw_sc_qp_upload_context(
2524 struct i40iw_sc_dev *dev,
2525 struct i40iw_upload_context_info *info,
2526 u64 scratch,
2527 bool post_sq)
2528{
2529 u64 *wqe;
2530 struct i40iw_sc_cqp *cqp;
2531 u64 header;
2532
2533 cqp = dev->cqp;
2534 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2535 if (!wqe)
2536 return I40IW_ERR_RING_FULL;
2537 set_64bit_val(wqe, 16, info->buf_pa);
2538
2539 header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2540 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2541 LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2542 LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2543 LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2544 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2545
2546 i40iw_insert_wqe_hdr(wqe, header);
2547
2548 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2549 wqe, I40IW_CQP_WQE_SIZE * 8);
2550
2551 if (post_sq)
2552 i40iw_sc_cqp_post_sq(cqp);
2553 return 0;
2554}
2555
2556/**
2557 * i40iw_sc_qp_setctx - set qp's context
2558 * @qp: sc qp
2559 * @qp_ctx: context ptr
2560 * @info: ctx info
2561 */
2562static enum i40iw_status_code i40iw_sc_qp_setctx(
2563 struct i40iw_sc_qp *qp,
2564 u64 *qp_ctx,
2565 struct i40iw_qp_host_ctx_info *info)
2566{
2567 struct i40iwarp_offload_info *iw;
2568 struct i40iw_tcp_offload_info *tcp;
2569 u64 qw0, qw3, qw7 = 0;
2570
2571 iw = info->iwarp_info;
2572 tcp = info->tcp_info;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05002573 if (info->add_to_qoslist) {
2574 qp->user_pri = info->user_pri;
2575 i40iw_qp_add_qos(qp->pd->dev, qp);
2576 i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
2577 __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
2578 }
Faisal Latif86dbcd02016-01-20 13:40:10 -06002579 qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2580 LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2581 LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2582 LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2583 LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2584 LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2585 LS_64(info->push_idx, I40IWQPC_PPIDX) |
2586 LS_64(info->push_mode_en, I40IWQPC_PMENA);
2587
2588 set_64bit_val(qp_ctx, 8, qp->sq_pa);
2589 set_64bit_val(qp_ctx, 16, qp->rq_pa);
2590
2591 qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2592 LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2593 LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2594
2595 set_64bit_val(qp_ctx,
2596 128,
2597 LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2598
2599 set_64bit_val(qp_ctx,
2600 136,
2601 LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2602 LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2603
2604 set_64bit_val(qp_ctx,
2605 168,
2606 LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2607 set_64bit_val(qp_ctx,
2608 176,
2609 LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2610 LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2611 LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
2612 LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
2613
2614 if (info->iwarp_info_valid) {
2615 qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2616 LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2617
2618 qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
2619 set_64bit_val(qp_ctx, 144, qp->q2_pa);
2620 set_64bit_val(qp_ctx,
2621 152,
2622 LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2623
Faisal Latif86dbcd02016-01-20 13:40:10 -06002624 set_64bit_val(qp_ctx,
2625 160,
2626 LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2627 LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2628 LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2629 LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2630 LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2631 LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2632 LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2633 LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
2634 LS_64(1, I40IWQPC_IWARPMODE) |
2635 LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2636 LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2637 LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2638 LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2639 LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2640 }
2641 if (info->tcp_info_valid) {
2642 qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2643 LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2644 LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2645 LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2646 LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2647 LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2648 LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2649
2650 qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2651 LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2652 LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2653 LS_64(tcp->tos, I40IWQPC_TOS) |
2654 LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2655 LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2656
2657 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2658 set_64bit_val(qp_ctx,
2659 32,
2660 LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2661 LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2662
2663 set_64bit_val(qp_ctx,
2664 40,
2665 LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2666 LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2667
2668 set_64bit_val(qp_ctx,
2669 48,
2670 LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2671 LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2672 LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2673
2674 qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2675 LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2676 LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2677 LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2678 LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2679 LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2680 LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2681
2682 set_64bit_val(qp_ctx,
2683 72,
2684 LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2685 LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2686 set_64bit_val(qp_ctx,
2687 80,
2688 LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2689 LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2690
2691 set_64bit_val(qp_ctx,
2692 88,
2693 LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2694 LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2695 set_64bit_val(qp_ctx,
2696 96,
2697 LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2698 LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2699 set_64bit_val(qp_ctx,
2700 104,
2701 LS_64(tcp->srtt, I40IWQPC_SRTT) |
2702 LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2703 set_64bit_val(qp_ctx,
2704 112,
2705 LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2706 LS_64(tcp->cwnd, I40IWQPC_CWND));
2707 set_64bit_val(qp_ctx,
2708 120,
2709 LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2710 LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2711 set_64bit_val(qp_ctx,
2712 128,
2713 LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2714 LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2715 set_64bit_val(qp_ctx,
2716 184,
2717 LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
2718 LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
2719 set_64bit_val(qp_ctx,
2720 192,
2721 LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
2722 LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
2723 }
2724
2725 set_64bit_val(qp_ctx, 0, qw0);
2726 set_64bit_val(qp_ctx, 24, qw3);
2727 set_64bit_val(qp_ctx, 56, qw7);
2728
2729 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
2730 qp_ctx, I40IW_QP_CTX_SIZE);
2731 return 0;
2732}
2733
2734/**
2735 * i40iw_sc_alloc_stag - mr stag alloc
2736 * @dev: sc device struct
2737 * @info: stag info
2738 * @scratch: u64 saved to be used during cqp completion
2739 * @post_sq: flag for cqp db to ring
2740 */
2741static enum i40iw_status_code i40iw_sc_alloc_stag(
2742 struct i40iw_sc_dev *dev,
2743 struct i40iw_allocate_stag_info *info,
2744 u64 scratch,
2745 bool post_sq)
2746{
2747 u64 *wqe;
2748 struct i40iw_sc_cqp *cqp;
2749 u64 header;
Henry Orosco68583ca2016-11-19 20:26:25 -06002750 enum i40iw_page_size page_size;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002751
Henry Orosco68583ca2016-11-19 20:26:25 -06002752 page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002753 cqp = dev->cqp;
2754 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2755 if (!wqe)
2756 return I40IW_ERR_RING_FULL;
2757 set_64bit_val(wqe,
2758 8,
2759 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
2760 LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
2761 set_64bit_val(wqe,
2762 16,
2763 LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2764 set_64bit_val(wqe,
2765 40,
2766 LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
2767
2768 header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2769 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2770 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2771 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
Henry Orosco68583ca2016-11-19 20:26:25 -06002772 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002773 LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2774 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2775 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2776 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2777
2778 i40iw_insert_wqe_hdr(wqe, header);
2779
2780 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
2781 wqe, I40IW_CQP_WQE_SIZE * 8);
2782
2783 if (post_sq)
2784 i40iw_sc_cqp_post_sq(cqp);
2785 return 0;
2786}
2787
2788/**
2789 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2790 * @dev: sc device struct
2791 * @info: mr info
2792 * @scratch: u64 saved to be used during cqp completion
2793 * @post_sq: flag for cqp db to ring
2794 */
2795static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
2796 struct i40iw_sc_dev *dev,
2797 struct i40iw_reg_ns_stag_info *info,
2798 u64 scratch,
2799 bool post_sq)
2800{
2801 u64 *wqe;
2802 u64 temp;
2803 struct i40iw_sc_cqp *cqp;
2804 u64 header;
2805 u32 pble_obj_cnt;
2806 bool remote_access;
2807 u8 addr_type;
Henry Orosco68583ca2016-11-19 20:26:25 -06002808 enum i40iw_page_size page_size;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002809
Henry Orosco68583ca2016-11-19 20:26:25 -06002810 page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002811 if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2812 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2813 remote_access = true;
2814 else
2815 remote_access = false;
2816
2817 pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2818
2819 if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
2820 return I40IW_ERR_INVALID_PBLE_INDEX;
2821
2822 cqp = dev->cqp;
2823 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2824 if (!wqe)
2825 return I40IW_ERR_RING_FULL;
2826
2827 temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2828 set_64bit_val(wqe, 0, temp);
2829
2830 set_64bit_val(wqe,
2831 8,
2832 LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
2833 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2834
2835 set_64bit_val(wqe,
2836 16,
2837 LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
2838 LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2839 if (!info->chunk_size) {
2840 set_64bit_val(wqe, 32, info->reg_addr_pa);
2841 set_64bit_val(wqe, 48, 0);
2842 } else {
2843 set_64bit_val(wqe, 32, 0);
2844 set_64bit_val(wqe, 48, info->first_pm_pbl_index);
2845 }
2846 set_64bit_val(wqe, 40, info->hmc_fcn_index);
2847 set_64bit_val(wqe, 56, 0);
2848
2849 addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2850 header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
2851 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2852 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
Henry Orosco68583ca2016-11-19 20:26:25 -06002853 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002854 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2855 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2856 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2857 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2858 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2859 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2860
2861 i40iw_insert_wqe_hdr(wqe, header);
2862
2863 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
2864 wqe, I40IW_CQP_WQE_SIZE * 8);
2865
2866 if (post_sq)
2867 i40iw_sc_cqp_post_sq(cqp);
2868 return 0;
2869}
2870
2871/**
2872 * i40iw_sc_mr_reg_shared - registered shared memory region
2873 * @dev: sc device struct
2874 * @info: info for shared memory registeration
2875 * @scratch: u64 saved to be used during cqp completion
2876 * @post_sq: flag for cqp db to ring
2877 */
2878static enum i40iw_status_code i40iw_sc_mr_reg_shared(
2879 struct i40iw_sc_dev *dev,
2880 struct i40iw_register_shared_stag *info,
2881 u64 scratch,
2882 bool post_sq)
2883{
2884 u64 *wqe;
2885 struct i40iw_sc_cqp *cqp;
2886 u64 temp, va64, fbo, header;
2887 u32 va32;
2888 bool remote_access;
2889 u8 addr_type;
2890
2891 if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2892 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2893 remote_access = true;
2894 else
2895 remote_access = false;
2896 cqp = dev->cqp;
2897 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2898 if (!wqe)
2899 return I40IW_ERR_RING_FULL;
2900 va64 = (uintptr_t)(info->va);
2901 va32 = (u32)(va64 & 0x00000000FFFFFFFF);
2902 fbo = (u64)(va32 & (4096 - 1));
2903
2904 set_64bit_val(wqe,
2905 0,
2906 (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
2907
2908 set_64bit_val(wqe,
2909 8,
2910 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2911 temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
2912 LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
2913 LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
2914 set_64bit_val(wqe, 16, temp);
2915
2916 addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2917 header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
2918 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2919 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2920 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2921 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2922 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2923
2924 i40iw_insert_wqe_hdr(wqe, header);
2925
2926 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
2927 wqe, I40IW_CQP_WQE_SIZE * 8);
2928
2929 if (post_sq)
2930 i40iw_sc_cqp_post_sq(cqp);
2931 return 0;
2932}
2933
2934/**
2935 * i40iw_sc_dealloc_stag - deallocate stag
2936 * @dev: sc device struct
2937 * @info: dealloc stag info
2938 * @scratch: u64 saved to be used during cqp completion
2939 * @post_sq: flag for cqp db to ring
2940 */
2941static enum i40iw_status_code i40iw_sc_dealloc_stag(
2942 struct i40iw_sc_dev *dev,
2943 struct i40iw_dealloc_stag_info *info,
2944 u64 scratch,
2945 bool post_sq)
2946{
2947 u64 header;
2948 u64 *wqe;
2949 struct i40iw_sc_cqp *cqp;
2950
2951 cqp = dev->cqp;
2952 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2953 if (!wqe)
2954 return I40IW_ERR_RING_FULL;
2955 set_64bit_val(wqe,
2956 8,
2957 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2958 set_64bit_val(wqe,
2959 16,
2960 LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2961
2962 header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2963 LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
2964 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2965
2966 i40iw_insert_wqe_hdr(wqe, header);
2967
2968 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
2969 wqe, I40IW_CQP_WQE_SIZE * 8);
2970
2971 if (post_sq)
2972 i40iw_sc_cqp_post_sq(cqp);
2973 return 0;
2974}
2975
2976/**
2977 * i40iw_sc_query_stag - query hardware for stag
2978 * @dev: sc device struct
2979 * @scratch: u64 saved to be used during cqp completion
2980 * @stag_index: stag index for query
2981 * @post_sq: flag for cqp db to ring
2982 */
2983static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
2984 u64 scratch,
2985 u32 stag_index,
2986 bool post_sq)
2987{
2988 u64 header;
2989 u64 *wqe;
2990 struct i40iw_sc_cqp *cqp;
2991
2992 cqp = dev->cqp;
2993 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2994 if (!wqe)
2995 return I40IW_ERR_RING_FULL;
2996 set_64bit_val(wqe,
2997 16,
2998 LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
2999
3000 header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
3001 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3002
3003 i40iw_insert_wqe_hdr(wqe, header);
3004
3005 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
3006 wqe, I40IW_CQP_WQE_SIZE * 8);
3007
3008 if (post_sq)
3009 i40iw_sc_cqp_post_sq(cqp);
3010 return 0;
3011}
3012
3013/**
3014 * i40iw_sc_mw_alloc - mw allocate
3015 * @dev: sc device struct
3016 * @scratch: u64 saved to be used during cqp completion
3017 * @mw_stag_index:stag index
3018 * @pd_id: pd is for this mw
3019 * @post_sq: flag for cqp db to ring
3020 */
3021static enum i40iw_status_code i40iw_sc_mw_alloc(
3022 struct i40iw_sc_dev *dev,
3023 u64 scratch,
3024 u32 mw_stag_index,
3025 u16 pd_id,
3026 bool post_sq)
3027{
3028 u64 header;
3029 struct i40iw_sc_cqp *cqp;
3030 u64 *wqe;
3031
3032 cqp = dev->cqp;
3033 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3034 if (!wqe)
3035 return I40IW_ERR_RING_FULL;
3036 set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
3037 set_64bit_val(wqe,
3038 16,
3039 LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
3040
3041 header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3042 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3043
3044 i40iw_insert_wqe_hdr(wqe, header);
3045
3046 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
3047 wqe, I40IW_CQP_WQE_SIZE * 8);
3048
3049 if (post_sq)
3050 i40iw_sc_cqp_post_sq(cqp);
3051 return 0;
3052}
3053
3054/**
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003055 * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3056 * @qp: sc qp struct
3057 * @info: fast mr info
3058 * @post_sq: flag for cqp db to ring
3059 */
3060enum i40iw_status_code i40iw_sc_mr_fast_register(
3061 struct i40iw_sc_qp *qp,
3062 struct i40iw_fast_reg_stag_info *info,
3063 bool post_sq)
3064{
3065 u64 temp, header;
3066 u64 *wqe;
3067 u32 wqe_idx;
Henry Orosco68583ca2016-11-19 20:26:25 -06003068 enum i40iw_page_size page_size;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003069
Henry Orosco68583ca2016-11-19 20:26:25 -06003070 page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003071 wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
3072 0, info->wr_id);
3073 if (!wqe)
3074 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3075
3076 i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3077 __func__, info->wr_id, wqe_idx,
3078 &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
3079 temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3080 set_64bit_val(wqe, 0, temp);
3081
3082 temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
3083 set_64bit_val(wqe,
3084 8,
3085 LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
3086 LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
3087
3088 set_64bit_val(wqe,
3089 16,
3090 info->total_len |
3091 LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
3092
3093 header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
3094 LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
3095 LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
3096 LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
Henry Orosco68583ca2016-11-19 20:26:25 -06003097 LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003098 LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
3099 LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
3100 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
3101 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
3102 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
3103 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3104
3105 i40iw_insert_wqe_hdr(wqe, header);
3106
3107 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
3108 wqe, I40IW_QP_WQE_MIN_SIZE);
3109
3110 if (post_sq)
3111 i40iw_qp_post_wr(&qp->qp_uk);
3112 return 0;
3113}
3114
3115/**
Faisal Latif86dbcd02016-01-20 13:40:10 -06003116 * i40iw_sc_send_lsmm - send last streaming mode message
3117 * @qp: sc qp struct
3118 * @lsmm_buf: buffer with lsmm message
3119 * @size: size of lsmm buffer
3120 * @stag: stag of lsmm buffer
3121 */
3122static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
3123 void *lsmm_buf,
3124 u32 size,
3125 i40iw_stag stag)
3126{
3127 u64 *wqe;
3128 u64 header;
3129 struct i40iw_qp_uk *qp_uk;
3130
3131 qp_uk = &qp->qp_uk;
3132 wqe = qp_uk->sq_base->elem;
3133
3134 set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3135
3136 set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3137
3138 set_64bit_val(wqe, 16, 0);
3139
3140 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3141 LS_64(1, I40IWQPSQ_STREAMMODE) |
3142 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3143 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3144
3145 i40iw_insert_wqe_hdr(wqe, header);
3146
3147 i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3148 wqe, I40IW_QP_WQE_MIN_SIZE);
3149}
3150
3151/**
3152 * i40iw_sc_send_lsmm_nostag - for privilege qp
3153 * @qp: sc qp struct
3154 * @lsmm_buf: buffer with lsmm message
3155 * @size: size of lsmm buffer
3156 */
3157static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3158 void *lsmm_buf,
3159 u32 size)
3160{
3161 u64 *wqe;
3162 u64 header;
3163 struct i40iw_qp_uk *qp_uk;
3164
3165 qp_uk = &qp->qp_uk;
3166 wqe = qp_uk->sq_base->elem;
3167
3168 set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3169
3170 set_64bit_val(wqe, 8, size);
3171
3172 set_64bit_val(wqe, 16, 0);
3173
3174 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3175 LS_64(1, I40IWQPSQ_STREAMMODE) |
3176 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3177 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3178
3179 i40iw_insert_wqe_hdr(wqe, header);
3180
3181 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3182 wqe, I40IW_QP_WQE_MIN_SIZE);
3183}
3184
3185/**
3186 * i40iw_sc_send_rtt - send last read0 or write0
3187 * @qp: sc qp struct
3188 * @read: Do read0 or write0
3189 */
3190static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3191{
3192 u64 *wqe;
3193 u64 header;
3194 struct i40iw_qp_uk *qp_uk;
3195
3196 qp_uk = &qp->qp_uk;
3197 wqe = qp_uk->sq_base->elem;
3198
3199 set_64bit_val(wqe, 0, 0);
3200 set_64bit_val(wqe, 8, 0);
3201 set_64bit_val(wqe, 16, 0);
3202 if (read) {
3203 header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3204 LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3205 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3206 set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3207 } else {
3208 header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3209 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3210 }
3211
3212 i40iw_insert_wqe_hdr(wqe, header);
3213
3214 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3215 wqe, I40IW_QP_WQE_MIN_SIZE);
3216}
3217
3218/**
3219 * i40iw_sc_post_wqe0 - send wqe with opcode
3220 * @qp: sc qp struct
3221 * @opcode: opcode to use for wqe0
3222 */
3223static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3224{
3225 u64 *wqe;
3226 u64 header;
3227 struct i40iw_qp_uk *qp_uk;
3228
3229 qp_uk = &qp->qp_uk;
3230 wqe = qp_uk->sq_base->elem;
3231
3232 if (!wqe)
3233 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3234 switch (opcode) {
3235 case I40IWQP_OP_NOP:
3236 set_64bit_val(wqe, 0, 0);
3237 set_64bit_val(wqe, 8, 0);
3238 set_64bit_val(wqe, 16, 0);
3239 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3240 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3241
3242 i40iw_insert_wqe_hdr(wqe, header);
3243 break;
3244 case I40IWQP_OP_RDMA_SEND:
3245 set_64bit_val(wqe, 0, 0);
3246 set_64bit_val(wqe, 8, 0);
3247 set_64bit_val(wqe, 16, 0);
3248 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3249 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3250 LS_64(1, I40IWQPSQ_STREAMMODE) |
3251 LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3252
3253 i40iw_insert_wqe_hdr(wqe, header);
3254 break;
3255 default:
3256 i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3257 __func__);
3258 break;
3259 }
3260 return 0;
3261}
3262
3263/**
3264 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3265 * @dev : ptr to i40iw_dev struct
3266 * @hmc_fn_id: hmc function id
3267 */
3268enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3269{
3270 struct i40iw_hmc_info *hmc_info;
3271 struct i40iw_dma_mem query_fpm_mem;
3272 struct i40iw_virt_mem virt_mem;
3273 struct i40iw_vfdev *vf_dev = NULL;
3274 u32 mem_size;
3275 enum i40iw_status_code ret_code = 0;
3276 bool poll_registers = true;
3277 u16 iw_vf_idx;
3278 u8 wait_type;
3279
3280 if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3281 (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3282 return I40IW_ERR_INVALID_HMCFN_ID;
3283
3284 i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3285 dev->hmc_fn_id);
3286 if (hmc_fn_id == dev->hmc_fn_id) {
3287 hmc_info = dev->hmc_info;
3288 query_fpm_mem.pa = dev->fpm_query_buf_pa;
3289 query_fpm_mem.va = dev->fpm_query_buf;
3290 } else {
3291 vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3292 if (!vf_dev)
3293 return I40IW_ERR_INVALID_VF_ID;
3294
3295 hmc_info = &vf_dev->hmc_info;
3296 iw_vf_idx = vf_dev->iw_vf_idx;
3297 i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3298 hmc_info, hmc_info->hmc_obj);
3299 if (!vf_dev->fpm_query_buf) {
3300 if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3301 ret_code = i40iw_alloc_query_fpm_buf(dev,
3302 &dev->vf_fpm_query_buf[iw_vf_idx]);
3303 if (ret_code)
3304 return ret_code;
3305 }
3306 vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3307 vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3308 }
3309 query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3310 query_fpm_mem.va = vf_dev->fpm_query_buf;
3311 /**
3312 * It is HARDWARE specific:
3313 * this call is done by PF for VF and
3314 * i40iw_sc_query_fpm_values needs ccq poll
3315 * because PF ccq is already created.
3316 */
3317 poll_registers = false;
3318 }
3319
3320 hmc_info->hmc_fn_id = hmc_fn_id;
3321
3322 if (hmc_fn_id != dev->hmc_fn_id) {
3323 ret_code =
3324 i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3325 } else {
3326 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3327 (u8)I40IW_CQP_WAIT_POLL_CQ;
3328
3329 ret_code = i40iw_sc_query_fpm_values(
3330 dev->cqp,
3331 0,
3332 hmc_info->hmc_fn_id,
3333 &query_fpm_mem,
3334 true,
3335 wait_type);
3336 }
3337 if (ret_code)
3338 return ret_code;
3339
3340 /* parse the fpm_query_buf and fill hmc obj info */
3341 ret_code =
3342 i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3343 hmc_info,
3344 &dev->hmc_fpm_misc);
3345 if (ret_code)
3346 return ret_code;
3347 i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3348 query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3349
3350 if (hmc_fn_id != dev->hmc_fn_id) {
3351 i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3352
3353 /* parse the fpm_commit_buf and fill hmc obj info */
Ismail, Mustafafa415372016-04-18 10:33:08 -05003354 i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003355 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3356 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3357 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3358 if (ret_code)
3359 return ret_code;
3360 hmc_info->sd_table.sd_entry = virt_mem.va;
3361 }
3362
3363 /* fill size of objects which are fixed */
3364 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
3365 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
3366 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
3367 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
3368 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
3369
3370 return ret_code;
3371}
3372
3373/**
3374 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3375 * populates fpm base address in hmc_info
3376 * @dev : ptr to i40iw_dev struct
3377 * @hmc_fn_id: hmc function id
3378 */
3379static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3380 u8 hmc_fn_id)
3381{
3382 struct i40iw_hmc_info *hmc_info;
3383 struct i40iw_hmc_obj_info *obj_info;
3384 u64 *buf;
3385 struct i40iw_dma_mem commit_fpm_mem;
3386 u32 i, j;
3387 enum i40iw_status_code ret_code = 0;
3388 bool poll_registers = true;
3389 u8 wait_type;
3390
3391 if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3392 (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3393 return I40IW_ERR_INVALID_HMCFN_ID;
3394
3395 if (hmc_fn_id == dev->hmc_fn_id) {
3396 hmc_info = dev->hmc_info;
3397 } else {
3398 hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3399 poll_registers = false;
3400 }
3401 if (!hmc_info)
3402 return I40IW_ERR_BAD_PTR;
3403
3404 obj_info = hmc_info->hmc_obj;
3405 buf = dev->fpm_commit_buf;
3406
3407 /* copy cnt values in commit buf */
3408 for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3409 i++, j += 8)
3410 set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3411
3412 set_64bit_val(buf, 40, 0); /* APBVT rsvd */
3413
3414 commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3415 commit_fpm_mem.va = dev->fpm_commit_buf;
3416 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3417 (u8)I40IW_CQP_WAIT_POLL_CQ;
3418 ret_code = i40iw_sc_commit_fpm_values(
3419 dev->cqp,
3420 0,
3421 hmc_info->hmc_fn_id,
3422 &commit_fpm_mem,
3423 true,
3424 wait_type);
3425
3426 /* parse the fpm_commit_buf and fill hmc obj info */
3427 if (!ret_code)
Ismail, Mustafafa415372016-04-18 10:33:08 -05003428 ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3429 hmc_info->hmc_obj,
3430 &hmc_info->sd_table.sd_cnt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003431
3432 i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3433 commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3434
3435 return ret_code;
3436}
3437
3438/**
3439 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3440 * @cqp: struct for cqp hw
3441 * @info; sd info for wqe
3442 * @scratch: u64 saved to be used during cqp completion
3443 */
3444static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3445 struct i40iw_update_sds_info *info,
3446 u64 scratch)
3447{
3448 u64 data;
3449 u64 header;
3450 u64 *wqe;
3451 int mem_entries, wqe_entries;
3452 struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3453
3454 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3455 if (!wqe)
3456 return I40IW_ERR_RING_FULL;
3457
3458 I40IW_CQP_INIT_WQE(wqe);
3459 wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3460 mem_entries = info->cnt - wqe_entries;
3461
3462 header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3463 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3464 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3465
3466 if (mem_entries) {
3467 memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
3468 data = sdbuf->pa;
3469 } else {
3470 data = 0;
3471 }
3472 data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3473
3474 set_64bit_val(wqe, 16, data);
3475
3476 switch (wqe_entries) {
3477 case 3:
3478 set_64bit_val(wqe, 48,
3479 (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3480 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3481
3482 set_64bit_val(wqe, 56, info->entry[2].data);
3483 /* fallthrough */
3484 case 2:
3485 set_64bit_val(wqe, 32,
3486 (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3487 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3488
3489 set_64bit_val(wqe, 40, info->entry[1].data);
3490 /* fallthrough */
3491 case 1:
3492 set_64bit_val(wqe, 0,
3493 LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3494
3495 set_64bit_val(wqe, 8, info->entry[0].data);
3496 break;
3497 default:
3498 break;
3499 }
3500
3501 i40iw_insert_wqe_hdr(wqe, header);
3502
3503 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3504 wqe, I40IW_CQP_WQE_SIZE * 8);
3505 return 0;
3506}
3507
3508/**
3509 * i40iw_update_pe_sds - cqp wqe for sd
3510 * @dev: ptr to i40iw_dev struct
3511 * @info: sd info for sd's
3512 * @scratch: u64 saved to be used during cqp completion
3513 */
3514static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3515 struct i40iw_update_sds_info *info,
3516 u64 scratch)
3517{
3518 struct i40iw_sc_cqp *cqp = dev->cqp;
3519 enum i40iw_status_code ret_code;
3520
3521 ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3522 if (!ret_code)
3523 i40iw_sc_cqp_post_sq(cqp);
3524
3525 return ret_code;
3526}
3527
3528/**
3529 * i40iw_update_sds_noccq - update sd before ccq created
3530 * @dev: sc device struct
3531 * @info: sd info for sd's
3532 */
3533enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3534 struct i40iw_update_sds_info *info)
3535{
3536 u32 error, val, tail;
3537 struct i40iw_sc_cqp *cqp = dev->cqp;
3538 enum i40iw_status_code ret_code;
3539
3540 ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3541 if (ret_code)
3542 return ret_code;
3543 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3544 if (error)
3545 return I40IW_ERR_CQP_COMPL_ERROR;
3546
3547 i40iw_sc_cqp_post_sq(cqp);
3548 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3549
3550 return ret_code;
3551}
3552
3553/**
3554 * i40iw_sc_suspend_qp - suspend qp for param change
3555 * @cqp: struct for cqp hw
3556 * @qp: sc qp struct
3557 * @scratch: u64 saved to be used during cqp completion
3558 */
3559enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3560 struct i40iw_sc_qp *qp,
3561 u64 scratch)
3562{
3563 u64 header;
3564 u64 *wqe;
3565
3566 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3567 if (!wqe)
3568 return I40IW_ERR_RING_FULL;
3569 header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3570 LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3571 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3572
3573 i40iw_insert_wqe_hdr(wqe, header);
3574
3575 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3576 wqe, I40IW_CQP_WQE_SIZE * 8);
3577
3578 i40iw_sc_cqp_post_sq(cqp);
3579 return 0;
3580}
3581
3582/**
3583 * i40iw_sc_resume_qp - resume qp after suspend
3584 * @cqp: struct for cqp hw
3585 * @qp: sc qp struct
3586 * @scratch: u64 saved to be used during cqp completion
3587 */
3588enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3589 struct i40iw_sc_qp *qp,
3590 u64 scratch)
3591{
3592 u64 header;
3593 u64 *wqe;
3594
3595 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3596 if (!wqe)
3597 return I40IW_ERR_RING_FULL;
3598 set_64bit_val(wqe,
3599 16,
3600 LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3601
3602 header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3603 LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3604 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3605
3606 i40iw_insert_wqe_hdr(wqe, header);
3607
3608 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3609 wqe, I40IW_CQP_WQE_SIZE * 8);
3610
3611 i40iw_sc_cqp_post_sq(cqp);
3612 return 0;
3613}
3614
3615/**
3616 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3617 * @cqp: struct for cqp hw
3618 * @scratch: u64 saved to be used during cqp completion
3619 * @hmc_fn_id: hmc function id
3620 * @post_sq: flag for cqp db to ring
3621 * @poll_registers: flag to poll register for cqp completion
3622 */
3623enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3624 struct i40iw_sc_cqp *cqp,
3625 u64 scratch,
3626 u8 hmc_fn_id,
3627 bool post_sq,
3628 bool poll_registers)
3629{
3630 u64 header;
3631 u64 *wqe;
3632 u32 tail, val, error;
3633 enum i40iw_status_code ret_code = 0;
3634
3635 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3636 if (!wqe)
3637 return I40IW_ERR_RING_FULL;
3638 set_64bit_val(wqe,
3639 16,
3640 LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3641
3642 header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3643 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3644
3645 i40iw_insert_wqe_hdr(wqe, header);
3646
3647 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3648 wqe, I40IW_CQP_WQE_SIZE * 8);
3649 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3650 if (error) {
3651 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3652 return ret_code;
3653 }
3654 if (post_sq) {
3655 i40iw_sc_cqp_post_sq(cqp);
3656 if (poll_registers)
3657 /* check for cqp sq tail update */
3658 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3659 else
3660 ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3661 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3662 NULL);
3663 }
3664
3665 return ret_code;
3666}
3667
3668/**
3669 * i40iw_ring_full - check if cqp ring is full
3670 * @cqp: struct for cqp hw
3671 */
3672static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3673{
3674 return I40IW_RING_FULL_ERR(cqp->sq_ring);
3675}
3676
3677/**
Ismail, Mustafafa415372016-04-18 10:33:08 -05003678 * i40iw_est_sd - returns approximate number of SDs for HMC
3679 * @dev: sc device struct
3680 * @hmc_info: hmc structure, size and count for HMC objects
3681 */
3682static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3683{
3684 int i;
3685 u64 size = 0;
3686 u64 sd;
3687
3688 for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3689 size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3690
3691 if (dev->is_pf)
3692 size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3693
3694 if (size & 0x1FFFFF)
3695 sd = (size >> 21) + 1; /* add 1 for remainder */
3696 else
3697 sd = size >> 21;
3698
3699 if (!dev->is_pf) {
3700 /* 2MB alignment for VF PBLE HMC */
3701 size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3702 if (size & 0x1FFFFF)
3703 sd += (size >> 21) + 1; /* add 1 for remainder */
3704 else
3705 sd += size >> 21;
3706 }
3707
3708 return sd;
3709}
3710
3711/**
Faisal Latif86dbcd02016-01-20 13:40:10 -06003712 * i40iw_config_fpm_values - configure HMC objects
3713 * @dev: sc device struct
3714 * @qp_count: desired qp count
3715 */
3716enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
3717{
3718 struct i40iw_virt_mem virt_mem;
3719 u32 i, mem_size;
3720 u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
3721 u32 powerof2;
Ismail, Mustafafa415372016-04-18 10:33:08 -05003722 u64 sd_needed;
Faisal Latif86dbcd02016-01-20 13:40:10 -06003723 u32 loop_count = 0;
3724
3725 struct i40iw_hmc_info *hmc_info;
3726 struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
3727 enum i40iw_status_code ret_code = 0;
3728
3729 hmc_info = dev->hmc_info;
3730 hmc_fpm_misc = &dev->hmc_fpm_misc;
3731
3732 ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
3733 if (ret_code) {
3734 i40iw_debug(dev, I40IW_DEBUG_HMC,
3735 "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3736 ret_code);
3737 return ret_code;
3738 }
3739
Ismail, Mustafafa415372016-04-18 10:33:08 -05003740 for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
Faisal Latif86dbcd02016-01-20 13:40:10 -06003741 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
Ismail, Mustafafa415372016-04-18 10:33:08 -05003742 sd_needed = i40iw_est_sd(dev, hmc_info);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003743 i40iw_debug(dev, I40IW_DEBUG_HMC,
3744 "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3745 __func__, sd_needed, hmc_info->first_sd_index);
3746 i40iw_debug(dev, I40IW_DEBUG_HMC,
Ismail, Mustafafa415372016-04-18 10:33:08 -05003747 "%s: sd count %d where max sd is %d\n",
3748 __func__, hmc_info->sd_table.sd_cnt,
Faisal Latif86dbcd02016-01-20 13:40:10 -06003749 hmc_fpm_misc->max_sds);
3750
3751 qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
3752 qpwantedoriginal = qpwanted;
3753 mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
3754 pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
3755
3756 i40iw_debug(dev, I40IW_DEBUG_HMC,
3757 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3758 qp_count, hmc_fpm_misc->max_sds,
3759 hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
3760 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
3761 hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
3762 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
3763
3764 do {
3765 ++loop_count;
3766 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
3767 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
3768 min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
3769 hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
3770 hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
3771 qpwanted * hmc_fpm_misc->ht_multiplier;
3772 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
3773 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
3774 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
3775 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
3776
3777 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
3778 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
3779 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
3780 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
3781 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
3782 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
3783 hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
3784 ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
3785 hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
3786 hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
3787 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
3788
3789 /* How much memory is needed for all the objects. */
Ismail, Mustafafa415372016-04-18 10:33:08 -05003790 sd_needed = i40iw_est_sd(dev, hmc_info);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003791 if ((loop_count > 1000) ||
3792 ((!(loop_count % 10)) &&
3793 (qpwanted > qpwantedoriginal * 2 / 3))) {
3794 if (qpwanted > FPM_MULTIPLIER) {
3795 qpwanted -= FPM_MULTIPLIER;
3796 powerof2 = 1;
3797 while (powerof2 < qpwanted)
3798 powerof2 *= 2;
3799 powerof2 /= 2;
3800 qpwanted = powerof2;
3801 } else {
3802 qpwanted /= 2;
3803 }
3804 }
3805 if (mrwanted > FPM_MULTIPLIER * 10)
3806 mrwanted -= FPM_MULTIPLIER * 10;
3807 if (pblewanted > FPM_MULTIPLIER * 1000)
3808 pblewanted -= FPM_MULTIPLIER * 1000;
3809 } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
3810
Ismail, Mustafafa415372016-04-18 10:33:08 -05003811 sd_needed = i40iw_est_sd(dev, hmc_info);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003812
3813 i40iw_debug(dev, I40IW_DEBUG_HMC,
3814 "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3815 loop_count, sd_needed,
3816 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
3817 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
3818 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
3819 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
3820
3821 ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
3822 if (ret_code) {
3823 i40iw_debug(dev, I40IW_DEBUG_HMC,
3824 "configure_iw_fpm returned error_code[x%08X]\n",
3825 i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
3826 return ret_code;
3827 }
3828
Faisal Latif86dbcd02016-01-20 13:40:10 -06003829 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3830 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
3831 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3832 if (ret_code) {
3833 i40iw_debug(dev, I40IW_DEBUG_HMC,
3834 "%s: failed to allocate memory for sd_entry buffer\n",
3835 __func__);
3836 return ret_code;
3837 }
3838 hmc_info->sd_table.sd_entry = virt_mem.va;
3839
3840 return ret_code;
3841}
3842
3843/**
3844 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3845 * @dev: rdma device
3846 * @pcmdinfo: cqp command info
3847 */
3848static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
3849 struct cqp_commands_info *pcmdinfo)
3850{
3851 enum i40iw_status_code status;
3852 struct i40iw_dma_mem values_mem;
3853
3854 dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
3855 switch (pcmdinfo->cqp_cmd) {
3856 case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
3857 status = i40iw_sc_del_local_mac_ipaddr_entry(
3858 pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
3859 pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
3860 pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
3861 pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
3862 pcmdinfo->post_sq);
3863 break;
3864 case OP_CEQ_DESTROY:
3865 status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
3866 pcmdinfo->in.u.ceq_destroy.scratch,
3867 pcmdinfo->post_sq);
3868 break;
3869 case OP_AEQ_DESTROY:
3870 status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
3871 pcmdinfo->in.u.aeq_destroy.scratch,
3872 pcmdinfo->post_sq);
3873
3874 break;
3875 case OP_DELETE_ARP_CACHE_ENTRY:
3876 status = i40iw_sc_del_arp_cache_entry(
3877 pcmdinfo->in.u.del_arp_cache_entry.cqp,
3878 pcmdinfo->in.u.del_arp_cache_entry.scratch,
3879 pcmdinfo->in.u.del_arp_cache_entry.arp_index,
3880 pcmdinfo->post_sq);
3881 break;
3882 case OP_MANAGE_APBVT_ENTRY:
3883 status = i40iw_sc_manage_apbvt_entry(
3884 pcmdinfo->in.u.manage_apbvt_entry.cqp,
3885 &pcmdinfo->in.u.manage_apbvt_entry.info,
3886 pcmdinfo->in.u.manage_apbvt_entry.scratch,
3887 pcmdinfo->post_sq);
3888 break;
3889 case OP_CEQ_CREATE:
3890 status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
3891 pcmdinfo->in.u.ceq_create.scratch,
3892 pcmdinfo->post_sq);
3893 break;
3894 case OP_AEQ_CREATE:
3895 status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
3896 pcmdinfo->in.u.aeq_create.scratch,
3897 pcmdinfo->post_sq);
3898 break;
3899 case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
3900 status = i40iw_sc_alloc_local_mac_ipaddr_entry(
3901 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
3902 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
3903 pcmdinfo->post_sq);
3904 break;
3905 case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
3906 status = i40iw_sc_add_local_mac_ipaddr_entry(
3907 pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
3908 &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
3909 pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
3910 pcmdinfo->post_sq);
3911 break;
3912 case OP_MANAGE_QHASH_TABLE_ENTRY:
3913 status = i40iw_sc_manage_qhash_table_entry(
3914 pcmdinfo->in.u.manage_qhash_table_entry.cqp,
3915 &pcmdinfo->in.u.manage_qhash_table_entry.info,
3916 pcmdinfo->in.u.manage_qhash_table_entry.scratch,
3917 pcmdinfo->post_sq);
3918
3919 break;
3920 case OP_QP_MODIFY:
3921 status = i40iw_sc_qp_modify(
3922 pcmdinfo->in.u.qp_modify.qp,
3923 &pcmdinfo->in.u.qp_modify.info,
3924 pcmdinfo->in.u.qp_modify.scratch,
3925 pcmdinfo->post_sq);
3926
3927 break;
3928 case OP_QP_UPLOAD_CONTEXT:
3929 status = i40iw_sc_qp_upload_context(
3930 pcmdinfo->in.u.qp_upload_context.dev,
3931 &pcmdinfo->in.u.qp_upload_context.info,
3932 pcmdinfo->in.u.qp_upload_context.scratch,
3933 pcmdinfo->post_sq);
3934
3935 break;
3936 case OP_CQ_CREATE:
3937 status = i40iw_sc_cq_create(
3938 pcmdinfo->in.u.cq_create.cq,
3939 pcmdinfo->in.u.cq_create.scratch,
3940 pcmdinfo->in.u.cq_create.check_overflow,
3941 pcmdinfo->post_sq);
3942 break;
3943 case OP_CQ_DESTROY:
3944 status = i40iw_sc_cq_destroy(
3945 pcmdinfo->in.u.cq_destroy.cq,
3946 pcmdinfo->in.u.cq_destroy.scratch,
3947 pcmdinfo->post_sq);
3948
3949 break;
3950 case OP_QP_CREATE:
3951 status = i40iw_sc_qp_create(
3952 pcmdinfo->in.u.qp_create.qp,
3953 &pcmdinfo->in.u.qp_create.info,
3954 pcmdinfo->in.u.qp_create.scratch,
3955 pcmdinfo->post_sq);
3956 break;
3957 case OP_QP_DESTROY:
3958 status = i40iw_sc_qp_destroy(
3959 pcmdinfo->in.u.qp_destroy.qp,
3960 pcmdinfo->in.u.qp_destroy.scratch,
3961 pcmdinfo->in.u.qp_destroy.remove_hash_idx,
3962 pcmdinfo->in.u.qp_destroy.
3963 ignore_mw_bnd,
3964 pcmdinfo->post_sq);
3965
3966 break;
3967 case OP_ALLOC_STAG:
3968 status = i40iw_sc_alloc_stag(
3969 pcmdinfo->in.u.alloc_stag.dev,
3970 &pcmdinfo->in.u.alloc_stag.info,
3971 pcmdinfo->in.u.alloc_stag.scratch,
3972 pcmdinfo->post_sq);
3973 break;
3974 case OP_MR_REG_NON_SHARED:
3975 status = i40iw_sc_mr_reg_non_shared(
3976 pcmdinfo->in.u.mr_reg_non_shared.dev,
3977 &pcmdinfo->in.u.mr_reg_non_shared.info,
3978 pcmdinfo->in.u.mr_reg_non_shared.scratch,
3979 pcmdinfo->post_sq);
3980
3981 break;
3982 case OP_DEALLOC_STAG:
3983 status = i40iw_sc_dealloc_stag(
3984 pcmdinfo->in.u.dealloc_stag.dev,
3985 &pcmdinfo->in.u.dealloc_stag.info,
3986 pcmdinfo->in.u.dealloc_stag.scratch,
3987 pcmdinfo->post_sq);
3988
3989 break;
3990 case OP_MW_ALLOC:
3991 status = i40iw_sc_mw_alloc(
3992 pcmdinfo->in.u.mw_alloc.dev,
3993 pcmdinfo->in.u.mw_alloc.scratch,
3994 pcmdinfo->in.u.mw_alloc.mw_stag_index,
3995 pcmdinfo->in.u.mw_alloc.pd_id,
3996 pcmdinfo->post_sq);
3997
3998 break;
3999 case OP_QP_FLUSH_WQES:
4000 status = i40iw_sc_qp_flush_wqes(
4001 pcmdinfo->in.u.qp_flush_wqes.qp,
4002 &pcmdinfo->in.u.qp_flush_wqes.info,
4003 pcmdinfo->in.u.qp_flush_wqes.
4004 scratch, pcmdinfo->post_sq);
4005 break;
4006 case OP_ADD_ARP_CACHE_ENTRY:
4007 status = i40iw_sc_add_arp_cache_entry(
4008 pcmdinfo->in.u.add_arp_cache_entry.cqp,
4009 &pcmdinfo->in.u.add_arp_cache_entry.info,
4010 pcmdinfo->in.u.add_arp_cache_entry.scratch,
4011 pcmdinfo->post_sq);
4012 break;
4013 case OP_MANAGE_PUSH_PAGE:
4014 status = i40iw_sc_manage_push_page(
4015 pcmdinfo->in.u.manage_push_page.cqp,
4016 &pcmdinfo->in.u.manage_push_page.info,
4017 pcmdinfo->in.u.manage_push_page.scratch,
4018 pcmdinfo->post_sq);
4019 break;
4020 case OP_UPDATE_PE_SDS:
4021 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
4022 status = i40iw_update_pe_sds(
4023 pcmdinfo->in.u.update_pe_sds.dev,
4024 &pcmdinfo->in.u.update_pe_sds.info,
4025 pcmdinfo->in.u.update_pe_sds.
4026 scratch);
4027
4028 break;
4029 case OP_MANAGE_HMC_PM_FUNC_TABLE:
4030 status = i40iw_sc_manage_hmc_pm_func_table(
4031 pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4032 pcmdinfo->in.u.manage_hmc_pm.scratch,
4033 (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
4034 pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
4035 true);
4036 break;
4037 case OP_SUSPEND:
4038 status = i40iw_sc_suspend_qp(
4039 pcmdinfo->in.u.suspend_resume.cqp,
4040 pcmdinfo->in.u.suspend_resume.qp,
4041 pcmdinfo->in.u.suspend_resume.scratch);
4042 break;
4043 case OP_RESUME:
4044 status = i40iw_sc_resume_qp(
4045 pcmdinfo->in.u.suspend_resume.cqp,
4046 pcmdinfo->in.u.suspend_resume.qp,
4047 pcmdinfo->in.u.suspend_resume.scratch);
4048 break;
4049 case OP_MANAGE_VF_PBLE_BP:
4050 status = i40iw_manage_vf_pble_bp(
4051 pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4052 &pcmdinfo->in.u.manage_vf_pble_bp.info,
4053 pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
4054 break;
4055 case OP_QUERY_FPM_VALUES:
4056 values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
4057 values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
4058 status = i40iw_sc_query_fpm_values(
4059 pcmdinfo->in.u.query_fpm_values.cqp,
4060 pcmdinfo->in.u.query_fpm_values.scratch,
4061 pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
4062 &values_mem, true, I40IW_CQP_WAIT_EVENT);
4063 break;
4064 case OP_COMMIT_FPM_VALUES:
4065 values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
4066 values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
4067 status = i40iw_sc_commit_fpm_values(
4068 pcmdinfo->in.u.commit_fpm_values.cqp,
4069 pcmdinfo->in.u.commit_fpm_values.scratch,
4070 pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
4071 &values_mem,
4072 true,
4073 I40IW_CQP_WAIT_EVENT);
4074 break;
4075 default:
4076 status = I40IW_NOT_SUPPORTED;
4077 break;
4078 }
4079
4080 return status;
4081}
4082
4083/**
4084 * i40iw_process_cqp_cmd - process all cqp commands
4085 * @dev: sc device struct
4086 * @pcmdinfo: cqp command info
4087 */
4088enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
4089 struct cqp_commands_info *pcmdinfo)
4090{
4091 enum i40iw_status_code status = 0;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05004092 unsigned long flags;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004093
4094 spin_lock_irqsave(&dev->cqp_lock, flags);
4095 if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4096 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4097 else
4098 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
4099 spin_unlock_irqrestore(&dev->cqp_lock, flags);
4100 return status;
4101}
4102
4103/**
4104 * i40iw_process_bh - called from tasklet for cqp list
4105 * @dev: sc device struct
4106 */
4107enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
4108{
4109 enum i40iw_status_code status = 0;
4110 struct cqp_commands_info *pcmdinfo;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05004111 unsigned long flags;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004112
4113 spin_lock_irqsave(&dev->cqp_lock, flags);
4114 while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
4115 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
4116
4117 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4118 if (status)
4119 break;
4120 }
4121 spin_unlock_irqrestore(&dev->cqp_lock, flags);
4122 return status;
4123}
4124
4125/**
4126 * i40iw_iwarp_opcode - determine if incoming is rdma layer
4127 * @info: aeq info for the packet
4128 * @pkt: packet for error
4129 */
4130static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4131{
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004132 __be16 *mpa;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004133 u32 opcode = 0xffffffff;
4134
4135 if (info->q2_data_written) {
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004136 mpa = (__be16 *)pkt;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004137 opcode = ntohs(mpa[1]) & 0xf;
4138 }
4139 return opcode;
4140}
4141
4142/**
4143 * i40iw_locate_mpa - return pointer to mpa in the pkt
4144 * @pkt: packet with data
4145 */
4146static u8 *i40iw_locate_mpa(u8 *pkt)
4147{
4148 /* skip over ethernet header */
4149 pkt += I40IW_MAC_HLEN;
4150
4151 /* Skip over IP and TCP headers */
4152 pkt += 4 * (pkt[0] & 0x0f);
4153 pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4154 return pkt;
4155}
4156
4157/**
4158 * i40iw_setup_termhdr - termhdr for terminate pkt
4159 * @qp: sc qp ptr for pkt
4160 * @hdr: term hdr
4161 * @opcode: flush opcode for termhdr
4162 * @layer_etype: error layer + error type
4163 * @err: error cod ein the header
4164 */
4165static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4166 struct i40iw_terminate_hdr *hdr,
4167 enum i40iw_flush_opcode opcode,
4168 u8 layer_etype,
4169 u8 err)
4170{
4171 qp->flush_code = opcode;
4172 hdr->layer_etype = layer_etype;
4173 hdr->error_code = err;
4174}
4175
4176/**
4177 * i40iw_bld_terminate_hdr - build terminate message header
4178 * @qp: qp associated with received terminate AE
4179 * @info: the struct contiaing AE information
4180 */
4181static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4182 struct i40iw_aeqe_info *info)
4183{
4184 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4185 u16 ddp_seg_len;
4186 int copy_len = 0;
4187 u8 is_tagged = 0;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004188 u32 opcode;
4189 struct i40iw_terminate_hdr *termhdr;
4190
4191 termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4192 memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4193
4194 if (info->q2_data_written) {
4195 /* Use data from offending packet to fill in ddp & rdma hdrs */
4196 pkt = i40iw_locate_mpa(pkt);
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004197 ddp_seg_len = ntohs(*(__be16 *)pkt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004198 if (ddp_seg_len) {
4199 copy_len = 2;
4200 termhdr->hdrct = DDP_LEN_FLAG;
4201 if (pkt[2] & 0x80) {
4202 is_tagged = 1;
4203 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4204 copy_len += TERM_DDP_LEN_TAGGED;
4205 termhdr->hdrct |= DDP_HDR_FLAG;
4206 }
4207 } else {
4208 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4209 copy_len += TERM_DDP_LEN_UNTAGGED;
4210 termhdr->hdrct |= DDP_HDR_FLAG;
4211 }
4212
4213 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4214 if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4215 copy_len += TERM_RDMA_LEN;
4216 termhdr->hdrct |= RDMA_HDR_FLAG;
4217 }
4218 }
4219 }
4220 }
4221 }
4222
4223 opcode = i40iw_iwarp_opcode(info, pkt);
4224
4225 switch (info->ae_id) {
4226 case I40IW_AE_AMP_UNALLOCATED_STAG:
4227 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4228 if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4229 i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4230 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4231 else
4232 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4233 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4234 break;
4235 case I40IW_AE_AMP_BOUNDS_VIOLATION:
4236 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4237 if (info->q2_data_written)
4238 i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4239 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4240 else
4241 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4242 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4243 break;
4244 case I40IW_AE_AMP_BAD_PD:
4245 switch (opcode) {
4246 case I40IW_OP_TYPE_RDMA_WRITE:
4247 i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4248 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4249 break;
4250 case I40IW_OP_TYPE_SEND_INV:
4251 case I40IW_OP_TYPE_SEND_SOL_INV:
4252 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4253 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4254 break;
4255 default:
4256 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4257 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4258 }
4259 break;
4260 case I40IW_AE_AMP_INVALID_STAG:
4261 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4262 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4263 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4264 break;
4265 case I40IW_AE_AMP_BAD_QP:
4266 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4267 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4268 break;
4269 case I40IW_AE_AMP_BAD_STAG_KEY:
4270 case I40IW_AE_AMP_BAD_STAG_INDEX:
4271 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4272 switch (opcode) {
4273 case I40IW_OP_TYPE_SEND_INV:
4274 case I40IW_OP_TYPE_SEND_SOL_INV:
4275 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4276 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4277 break;
4278 default:
4279 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4280 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4281 }
4282 break;
4283 case I40IW_AE_AMP_RIGHTS_VIOLATION:
4284 case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4285 case I40IW_AE_PRIV_OPERATION_DENIED:
4286 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4287 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4288 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4289 break;
4290 case I40IW_AE_AMP_TO_WRAP:
4291 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4292 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4293 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4294 break;
4295 case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
4296 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4297 (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
4298 break;
4299 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4300 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4301 (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4302 break;
4303 case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4304 case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4305 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4306 (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4307 break;
4308 case I40IW_AE_LCE_QP_CATASTROPHIC:
4309 case I40IW_AE_DDP_NO_L_BIT:
4310 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4311 (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4312 break;
4313 case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
4314 case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
4315 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4316 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4317 break;
4318 case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4319 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4320 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4321 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4322 break;
4323 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4324 if (is_tagged)
4325 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4326 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4327 else
4328 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4329 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4330 break;
4331 case I40IW_AE_DDP_UBE_INVALID_MO:
4332 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4333 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4334 break;
4335 case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4336 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4337 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4338 break;
4339 case I40IW_AE_DDP_UBE_INVALID_QN:
4340 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4341 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4342 break;
4343 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4344 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4345 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4346 break;
4347 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4348 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4349 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4350 break;
4351 default:
4352 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4353 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4354 break;
4355 }
4356
4357 if (copy_len)
4358 memcpy(termhdr + 1, pkt, copy_len);
4359
Faisal Latif86dbcd02016-01-20 13:40:10 -06004360 return sizeof(struct i40iw_terminate_hdr) + copy_len;
4361}
4362
4363/**
4364 * i40iw_terminate_send_fin() - Send fin for terminate message
4365 * @qp: qp associated with received terminate AE
4366 */
4367void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4368{
4369 /* Send the fin only */
4370 i40iw_term_modify_qp(qp,
4371 I40IW_QP_STATE_TERMINATE,
4372 I40IWQP_TERM_SEND_FIN_ONLY,
4373 0);
4374}
4375
4376/**
4377 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4378 * @qp: qp associated with received terminate AE
4379 * @info: the struct contiaing AE information
4380 */
4381void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4382{
4383 u8 termlen = 0;
4384
4385 if (qp->term_flags & I40IW_TERM_SENT)
4386 return; /* Sanity check */
4387
4388 /* Eventtype can change from bld_terminate_hdr */
4389 qp->eventtype = TERM_EVENT_QP_FATAL;
4390 termlen = i40iw_bld_terminate_hdr(qp, info);
4391 i40iw_terminate_start_timer(qp);
4392 qp->term_flags |= I40IW_TERM_SENT;
4393 i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4394 I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4395}
4396
4397/**
4398 * i40iw_terminate_received - handle terminate received AE
4399 * @qp: qp associated with received terminate AE
4400 * @info: the struct contiaing AE information
4401 */
4402void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4403{
4404 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004405 __be32 *mpa;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004406 u8 ddp_ctl;
4407 u8 rdma_ctl;
4408 u16 aeq_id = 0;
4409 struct i40iw_terminate_hdr *termhdr;
4410
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004411 mpa = (__be32 *)i40iw_locate_mpa(pkt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004412 if (info->q2_data_written) {
4413 /* did not validate the frame - do it now */
4414 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4415 rdma_ctl = ntohl(mpa[0]) & 0xff;
4416 if ((ddp_ctl & 0xc0) != 0x40)
4417 aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4418 else if ((ddp_ctl & 0x03) != 1)
4419 aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4420 else if (ntohl(mpa[2]) != 2)
4421 aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4422 else if (ntohl(mpa[3]) != 1)
4423 aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4424 else if (ntohl(mpa[4]) != 0)
4425 aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4426 else if ((rdma_ctl & 0xc0) != 0x40)
4427 aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4428
4429 info->ae_id = aeq_id;
4430 if (info->ae_id) {
4431 /* Bad terminate recvd - send back a terminate */
4432 i40iw_terminate_connection(qp, info);
4433 return;
4434 }
4435 }
4436
4437 qp->term_flags |= I40IW_TERM_RCVD;
4438 qp->eventtype = TERM_EVENT_QP_FATAL;
4439 termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4440 if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4441 termhdr->layer_etype == RDMAP_REMOTE_OP) {
4442 i40iw_terminate_done(qp, 0);
4443 } else {
4444 i40iw_terminate_start_timer(qp);
4445 i40iw_terminate_send_fin(qp);
4446 }
4447}
4448
4449/**
4450 * i40iw_hw_stat_init - Initiliaze HW stats table
4451 * @devstat: pestat struct
4452 * @fcn_idx: PCI fn id
4453 * @hw: PF i40iw_hw structure.
4454 * @is_pf: Is it a PF?
4455 *
4456 * Populate the HW stat table with register offset addr for each
4457 * stat. And start the perioidic stats timer.
4458 */
4459static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
4460 u8 fcn_idx,
4461 struct i40iw_hw *hw, bool is_pf)
4462{
4463 u32 stat_reg_offset;
4464 u32 stat_index;
4465 struct i40iw_dev_hw_stat_offsets *stat_table =
4466 &devstat->hw_stat_offsets;
4467 struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
4468
4469 devstat->hw = hw;
4470
4471 if (is_pf) {
4472 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4473 I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
4474 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4475 I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
4476 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4477 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
4478 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4479 I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
4480 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4481 I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
4482 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4483 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
4484 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4485 I40E_GLPES_PFTCPRTXSEG(fcn_idx);
4486 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4487 I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
4488 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4489 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4490
4491 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4492 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
4493 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4494 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
4495 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4496 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
4497 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4498 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
4499 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4500 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
4501 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4502 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
4503 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4504 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
4505 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4506 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
4507 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4508 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
4509 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4510 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
4511 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4512 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
4513 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4514 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
4515 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4516 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
4517 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4518 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4519 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4520 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4521 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4522 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
4523 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4524 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
4525 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4526 I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
4527 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4528 I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
4529 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4530 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
4531 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4532 I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
4533 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4534 I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
4535 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4536 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
4537 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4538 I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
4539 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4540 I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
4541 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4542 I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4543 } else {
4544 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4545 I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
4546 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4547 I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
4548 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4549 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
4550 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4551 I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
4552 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4553 I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
4554 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4555 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
4556 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4557 I40E_GLPES_VFTCPRTXSEG(fcn_idx);
4558 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4559 I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
4560 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4561 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4562
4563 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4564 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
4565 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4566 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
4567 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4568 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
4569 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4570 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
4571 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4572 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
4573 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4574 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
4575 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4576 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
4577 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4578 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
4579 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4580 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
4581 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4582 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
4583 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4584 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
4585 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4586 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
4587 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4588 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
4589 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4590 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4591 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4592 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4593 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4594 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
4595 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4596 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
4597 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4598 I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
4599 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4600 I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
4601 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4602 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
4603 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4604 I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
4605 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4606 I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
4607 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4608 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
4609 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4610 I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
4611 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4612 I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
4613 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4614 I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4615 }
4616
4617 for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
4618 stat_index++) {
4619 stat_reg_offset = stat_table->stat_offset_64[stat_index];
4620 last_rd_stats->stat_value_64[stat_index] =
4621 readq(devstat->hw->hw_addr + stat_reg_offset);
4622 }
4623
4624 for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
4625 stat_index++) {
4626 stat_reg_offset = stat_table->stat_offset_32[stat_index];
4627 last_rd_stats->stat_value_32[stat_index] =
4628 i40iw_rd32(devstat->hw, stat_reg_offset);
4629 }
4630}
4631
4632/**
4633 * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
4634 * @devstat: pestat struct
4635 * @index: index in HW stat table which contains offset reg-addr
4636 * @value: hw stat value
4637 */
4638static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
4639 enum i40iw_hw_stat_index_32b index,
4640 u64 *value)
4641{
4642 struct i40iw_dev_hw_stat_offsets *stat_table =
4643 &devstat->hw_stat_offsets;
4644 struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
4645 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
4646 u64 new_stat_value = 0;
4647 u32 stat_reg_offset = stat_table->stat_offset_32[index];
4648
4649 new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
4650 /*roll-over case */
4651 if (new_stat_value < last_rd_stats->stat_value_32[index])
4652 hw_stats->stat_value_32[index] += new_stat_value;
4653 else
4654 hw_stats->stat_value_32[index] +=
4655 new_stat_value - last_rd_stats->stat_value_32[index];
4656 last_rd_stats->stat_value_32[index] = new_stat_value;
4657 *value = hw_stats->stat_value_32[index];
4658}
4659
4660/**
4661 * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
4662 * @devstat: pestat struct
4663 * @index: index in HW stat table which contains offset reg-addr
4664 * @value: hw stat value
4665 */
4666static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
4667 enum i40iw_hw_stat_index_64b index,
4668 u64 *value)
4669{
4670 struct i40iw_dev_hw_stat_offsets *stat_table =
4671 &devstat->hw_stat_offsets;
4672 struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
4673 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
4674 u64 new_stat_value = 0;
4675 u32 stat_reg_offset = stat_table->stat_offset_64[index];
4676
4677 new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
4678 /*roll-over case */
4679 if (new_stat_value < last_rd_stats->stat_value_64[index])
4680 hw_stats->stat_value_64[index] += new_stat_value;
4681 else
4682 hw_stats->stat_value_64[index] +=
4683 new_stat_value - last_rd_stats->stat_value_64[index];
4684 last_rd_stats->stat_value_64[index] = new_stat_value;
4685 *value = hw_stats->stat_value_64[index];
4686}
4687
4688/**
4689 * i40iw_hw_stat_read_all - read all HW stat counters
4690 * @devstat: pestat struct
4691 * @stat_values: hw stats structure
4692 *
4693 * Read all the HW stat counters and populates hw_stats structure
4694 * of passed-in dev's pestat as well as copy created in stat_values.
4695 */
4696static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
4697 struct i40iw_dev_hw_stats *stat_values)
4698{
4699 u32 stat_index;
4700
4701 for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
4702 stat_index++)
4703 i40iw_hw_stat_read_32(devstat, stat_index,
4704 &stat_values->stat_value_32[stat_index]);
4705 for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
4706 stat_index++)
4707 i40iw_hw_stat_read_64(devstat, stat_index,
4708 &stat_values->stat_value_64[stat_index]);
4709}
4710
4711/**
4712 * i40iw_hw_stat_refresh_all - Update all HW stat structs
4713 * @devstat: pestat struct
4714 * @stat_values: hw stats structure
4715 *
4716 * Read all the HW stat counters to refresh values in hw_stats structure
4717 * of passed-in dev's pestat
4718 */
4719static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
4720{
4721 u64 stat_value;
4722 u32 stat_index;
4723
4724 for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
4725 stat_index++)
4726 i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
4727 for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
4728 stat_index++)
4729 i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
4730}
4731
4732static struct i40iw_cqp_ops iw_cqp_ops = {
4733 i40iw_sc_cqp_init,
4734 i40iw_sc_cqp_create,
4735 i40iw_sc_cqp_post_sq,
4736 i40iw_sc_cqp_get_next_send_wqe,
4737 i40iw_sc_cqp_destroy,
4738 i40iw_sc_poll_for_cqp_op_done
4739};
4740
4741static struct i40iw_ccq_ops iw_ccq_ops = {
4742 i40iw_sc_ccq_init,
4743 i40iw_sc_ccq_create,
4744 i40iw_sc_ccq_destroy,
4745 i40iw_sc_ccq_create_done,
4746 i40iw_sc_ccq_get_cqe_info,
4747 i40iw_sc_ccq_arm
4748};
4749
4750static struct i40iw_ceq_ops iw_ceq_ops = {
4751 i40iw_sc_ceq_init,
4752 i40iw_sc_ceq_create,
4753 i40iw_sc_cceq_create_done,
4754 i40iw_sc_cceq_destroy_done,
4755 i40iw_sc_cceq_create,
4756 i40iw_sc_ceq_destroy,
4757 i40iw_sc_process_ceq
4758};
4759
4760static struct i40iw_aeq_ops iw_aeq_ops = {
4761 i40iw_sc_aeq_init,
4762 i40iw_sc_aeq_create,
4763 i40iw_sc_aeq_destroy,
4764 i40iw_sc_get_next_aeqe,
4765 i40iw_sc_repost_aeq_entries,
4766 i40iw_sc_aeq_create_done,
4767 i40iw_sc_aeq_destroy_done
4768};
4769
4770/* iwarp pd ops */
4771static struct i40iw_pd_ops iw_pd_ops = {
4772 i40iw_sc_pd_init,
4773};
4774
4775static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
Ismail, Mustafab7aee852016-04-18 10:33:06 -05004776 .qp_init = i40iw_sc_qp_init,
4777 .qp_create = i40iw_sc_qp_create,
4778 .qp_modify = i40iw_sc_qp_modify,
4779 .qp_destroy = i40iw_sc_qp_destroy,
4780 .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
4781 .qp_upload_context = i40iw_sc_qp_upload_context,
4782 .qp_setctx = i40iw_sc_qp_setctx,
4783 .qp_send_lsmm = i40iw_sc_send_lsmm,
4784 .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
4785 .qp_send_rtt = i40iw_sc_send_rtt,
4786 .qp_post_wqe0 = i40iw_sc_post_wqe0,
4787 .iw_mr_fast_register = i40iw_sc_mr_fast_register
Faisal Latif86dbcd02016-01-20 13:40:10 -06004788};
4789
4790static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
4791 i40iw_sc_cq_init,
4792 i40iw_sc_cq_create,
4793 i40iw_sc_cq_destroy,
4794 i40iw_sc_cq_modify,
4795};
4796
4797static struct i40iw_mr_ops iw_mr_ops = {
4798 i40iw_sc_alloc_stag,
4799 i40iw_sc_mr_reg_non_shared,
4800 i40iw_sc_mr_reg_shared,
4801 i40iw_sc_dealloc_stag,
4802 i40iw_sc_query_stag,
4803 i40iw_sc_mw_alloc
4804};
4805
4806static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
4807 i40iw_sc_manage_push_page,
4808 i40iw_sc_manage_hmc_pm_func_table,
4809 i40iw_sc_set_hmc_resource_profile,
4810 i40iw_sc_commit_fpm_values,
4811 i40iw_sc_query_fpm_values,
4812 i40iw_sc_static_hmc_pages_allocated,
4813 i40iw_sc_add_arp_cache_entry,
4814 i40iw_sc_del_arp_cache_entry,
4815 i40iw_sc_query_arp_cache_entry,
4816 i40iw_sc_manage_apbvt_entry,
4817 i40iw_sc_manage_qhash_table_entry,
4818 i40iw_sc_alloc_local_mac_ipaddr_entry,
4819 i40iw_sc_add_local_mac_ipaddr_entry,
4820 i40iw_sc_del_local_mac_ipaddr_entry,
4821 i40iw_sc_cqp_nop,
4822 i40iw_sc_commit_fpm_values_done,
4823 i40iw_sc_query_fpm_values_done,
4824 i40iw_sc_manage_hmc_pm_func_table_done,
4825 i40iw_sc_suspend_qp,
4826 i40iw_sc_resume_qp
4827};
4828
4829static struct i40iw_hmc_ops iw_hmc_ops = {
4830 i40iw_sc_init_iw_hmc,
4831 i40iw_sc_parse_fpm_query_buf,
4832 i40iw_sc_configure_iw_fpm,
4833 i40iw_sc_parse_fpm_commit_buf,
4834 i40iw_sc_create_hmc_obj,
4835 i40iw_sc_del_hmc_obj,
4836 NULL,
4837 NULL
4838};
4839
4840static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
4841 i40iw_hw_stat_init,
4842 i40iw_hw_stat_read_32,
4843 i40iw_hw_stat_read_64,
4844 i40iw_hw_stat_read_all,
4845 i40iw_hw_stat_refresh_all
4846};
4847
4848/**
4849 * i40iw_device_init_pestat - Initialize the pestat structure
4850 * @dev: pestat struct
4851 */
Henry Oroscodfd9c432016-11-09 21:42:26 -06004852void i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
Faisal Latif86dbcd02016-01-20 13:40:10 -06004853{
4854 devstat->ops = iw_device_pestat_ops;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004855}
4856
4857/**
4858 * i40iw_device_init - Initialize IWARP device
4859 * @dev: IWARP device pointer
4860 * @info: IWARP init info
4861 */
4862enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
4863 struct i40iw_device_init_info *info)
4864{
4865 u32 val;
4866 u32 vchnl_ver = 0;
4867 u16 hmc_fcn = 0;
4868 enum i40iw_status_code ret_code = 0;
4869 u8 db_size;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05004870 int i;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004871
4872 spin_lock_init(&dev->cqp_lock);
4873 INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
4874
4875 i40iw_device_init_uk(&dev->dev_uk);
4876
4877 dev->debug_mask = info->debug_mask;
4878
Henry Oroscodfd9c432016-11-09 21:42:26 -06004879 i40iw_device_init_pestat(&dev->dev_pestat);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004880 dev->hmc_fn_id = info->hmc_fn_id;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05004881 i40iw_fill_qos_list(info->l2params.qs_handle_list);
4882 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
4883 dev->qos[i].qs_handle = info->l2params.qs_handle_list[i];
4884 i40iw_debug(dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, dev->qos[i].qs_handle);
4885 spin_lock_init(&dev->qos[i].lock);
4886 INIT_LIST_HEAD(&dev->qos[i].qplist);
4887 }
Faisal Latif86dbcd02016-01-20 13:40:10 -06004888 dev->exception_lan_queue = info->exception_lan_queue;
4889 dev->is_pf = info->is_pf;
4890
4891 dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
4892 dev->fpm_query_buf = info->fpm_query_buf;
4893
4894 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
4895 dev->fpm_commit_buf = info->fpm_commit_buf;
4896
4897 dev->hw = info->hw;
4898 dev->hw->hw_addr = info->bar0;
4899
4900 val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
4901 dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
4902
4903 if (dev->is_pf) {
4904 dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
4905 dev->hmc_fn_id, dev->hw, true);
4906 spin_lock_init(&dev->dev_pestat.stats_lock);
4907 /*start the periodic stats_timer */
4908 i40iw_hw_stats_start_timer(dev);
4909 val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
4910 db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
4911 if ((db_size != I40IW_PE_DB_SIZE_4M) &&
4912 (db_size != I40IW_PE_DB_SIZE_8M)) {
4913 i40iw_debug(dev, I40IW_DEBUG_DEV,
4914 "%s: PE doorbell is not enabled in CSR val 0x%x\n",
4915 __func__, val);
4916 ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
4917 return ret_code;
4918 }
4919 dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
4920 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
4921 } else {
4922 dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
4923 }
4924
4925 dev->cqp_ops = &iw_cqp_ops;
4926 dev->ccq_ops = &iw_ccq_ops;
4927 dev->ceq_ops = &iw_ceq_ops;
4928 dev->aeq_ops = &iw_aeq_ops;
4929 dev->cqp_misc_ops = &iw_cqp_misc_ops;
4930 dev->iw_pd_ops = &iw_pd_ops;
4931 dev->iw_priv_qp_ops = &iw_priv_qp_ops;
4932 dev->iw_priv_cq_ops = &iw_priv_cq_ops;
4933 dev->mr_ops = &iw_mr_ops;
4934 dev->hmc_ops = &iw_hmc_ops;
4935 dev->vchnl_if.vchnl_send = info->vchnl_send;
4936 if (dev->vchnl_if.vchnl_send)
4937 dev->vchnl_up = true;
4938 else
4939 dev->vchnl_up = false;
4940 if (!dev->is_pf) {
4941 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
4942 ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
4943 if (!ret_code) {
4944 i40iw_debug(dev, I40IW_DEBUG_DEV,
4945 "%s: Get Channel version rc = 0x%0x, version is %u\n",
4946 __func__, ret_code, vchnl_ver);
4947 ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
4948 if (!ret_code) {
4949 i40iw_debug(dev, I40IW_DEBUG_DEV,
4950 "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
4951 __func__, ret_code, hmc_fcn);
4952 dev->hmc_fn_id = (u8)hmc_fcn;
4953 }
4954 }
4955 }
4956 dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
4957
4958 return ret_code;
4959}