blob: c8a40b3fbc3113c4b3101e1aadef53948799ab2f [file] [log] [blame]
Faisal Latif86dbcd02016-01-20 13:40:10 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include "i40iw_osdep.h"
36#include "i40iw_register.h"
37#include "i40iw_status.h"
38#include "i40iw_hmc.h"
39
40#include "i40iw_d.h"
41#include "i40iw_type.h"
42#include "i40iw_p.h"
43#include "i40iw_vf.h"
44#include "i40iw_virtchnl.h"
45
46/**
47 * i40iw_insert_wqe_hdr - write wqe header
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
50 */
Mustafa Ismail43bfc242017-10-03 11:11:49 -050051void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
Faisal Latif86dbcd02016-01-20 13:40:10 -060052{
53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe, 24, header);
55}
56
Shiraz Saleemd26875b2017-08-08 20:38:45 -050057void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
58{
59 if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
60 cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
61 cqp_timeout->count = 0;
62 } else {
63 if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
64 cqp_timeout->count++;
65 }
66}
67
Faisal Latif86dbcd02016-01-20 13:40:10 -060068/**
69 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
70 * @cqp: struct for cqp hw
71 * @val: cqp tail register value
72 * @tail:wqtail register value
73 * @error: cqp processing err
74 */
75static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
76 u32 *val,
77 u32 *tail,
78 u32 *error)
79{
80 if (cqp->dev->is_pf) {
81 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
82 *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
83 *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
84 } else {
85 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
86 *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
87 *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
88 }
89}
90
91/**
92 * i40iw_cqp_poll_registers - poll cqp registers
93 * @cqp: struct for cqp hw
94 * @tail:wqtail register value
95 * @count: how many times to try for completion
96 */
97static enum i40iw_status_code i40iw_cqp_poll_registers(
98 struct i40iw_sc_cqp *cqp,
99 u32 tail,
100 u32 count)
101{
102 u32 i = 0;
103 u32 newtail, error, val;
104
105 while (i < count) {
106 i++;
107 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
108 if (error) {
109 error = (cqp->dev->is_pf) ?
110 i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
111 i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
112 return I40IW_ERR_CQP_COMPL_ERROR;
113 }
114 if (newtail != tail) {
115 /* SUCCESS */
116 I40IW_RING_MOVE_TAIL(cqp->sq_ring);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600117 cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600118 return 0;
119 }
120 udelay(I40IW_SLEEP_COUNT);
121 }
122 return I40IW_ERR_TIMEOUT;
123}
124
125/**
126 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
127 * @buf: ptr to fpm commit buffer
128 * @info: ptr to i40iw_hmc_obj_info struct
Ismail, Mustafafa415372016-04-18 10:33:08 -0500129 * @sd: number of SDs for HMC objects
Faisal Latif86dbcd02016-01-20 13:40:10 -0600130 *
131 * parses fpm commit info and copy base value
132 * of hmc objects in hmc_info
133 */
134static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
135 u64 *buf,
Ismail, Mustafafa415372016-04-18 10:33:08 -0500136 struct i40iw_hmc_obj_info *info,
137 u32 *sd)
Faisal Latif86dbcd02016-01-20 13:40:10 -0600138{
139 u64 temp;
Ismail, Mustafafa415372016-04-18 10:33:08 -0500140 u64 size;
141 u64 base = 0;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600142 u32 i, j;
Ismail, Mustafafa415372016-04-18 10:33:08 -0500143 u32 k = 0;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600144
145 /* copy base values in obj_info */
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500146 for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
147 if ((i == I40IW_HMC_IW_SRQ) ||
148 (i == I40IW_HMC_IW_FSIMC) ||
149 (i == I40IW_HMC_IW_FSIAV)) {
150 info[i].base = 0;
151 info[i].cnt = 0;
152 continue;
153 }
Faisal Latif86dbcd02016-01-20 13:40:10 -0600154 get_64bit_val(buf, j, &temp);
155 info[i].base = RS_64_1(temp, 32) * 512;
Ismail, Mustafafa415372016-04-18 10:33:08 -0500156 if (info[i].base > base) {
157 base = info[i].base;
158 k = i;
159 }
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500160 if (i == I40IW_HMC_IW_APBVT_ENTRY) {
161 info[i].cnt = 1;
162 continue;
163 }
164 if (i == I40IW_HMC_IW_QP)
165 info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
166 else if (i == I40IW_HMC_IW_CQ)
167 info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
168 else
169 info[i].cnt = (u32)(temp);
Faisal Latif86dbcd02016-01-20 13:40:10 -0600170 }
Ismail, Mustafafa415372016-04-18 10:33:08 -0500171 size = info[k].cnt * info[k].size + info[k].base;
172 if (size & 0x1FFFFF)
173 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
174 else
175 *sd = (u32)(size >> 21);
176
Faisal Latif86dbcd02016-01-20 13:40:10 -0600177 return 0;
178}
179
180/**
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500181 * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
182 * @buf: ptr to fpm query buffer
183 * @buf_idx: index into buf
184 * @info: ptr to i40iw_hmc_obj_info struct
185 * @rsrc_idx: resource index into info
186 *
187 * Decode a 64 bit value from fpm query buffer into max count and size
188 */
189static u64 i40iw_sc_decode_fpm_query(u64 *buf,
190 u32 buf_idx,
191 struct i40iw_hmc_obj_info *obj_info,
192 u32 rsrc_idx)
193{
194 u64 temp;
195 u32 size;
196
197 get_64bit_val(buf, buf_idx, &temp);
198 obj_info[rsrc_idx].max_cnt = (u32)temp;
199 size = (u32)RS_64_1(temp, 32);
200 obj_info[rsrc_idx].size = LS_64_1(1, size);
201
202 return temp;
203}
204
205/**
Faisal Latif86dbcd02016-01-20 13:40:10 -0600206 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
207 * @buf: ptr to fpm query buffer
208 * @info: ptr to i40iw_hmc_obj_info struct
209 * @hmc_fpm_misc: ptr to fpm data
210 *
211 * parses fpm query buffer and copy max_cnt and
212 * size value of hmc objects in hmc_info
213 */
214static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
215 u64 *buf,
216 struct i40iw_hmc_info *hmc_info,
217 struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
218{
Faisal Latif86dbcd02016-01-20 13:40:10 -0600219 struct i40iw_hmc_obj_info *obj_info;
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500220 u64 temp;
221 u32 size;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600222 u16 max_pe_sds;
223
224 obj_info = hmc_info->hmc_obj;
225
226 get_64bit_val(buf, 0, &temp);
227 hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
228 max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
229
230 /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
231 if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
232 max_pe_sds--;
233 hmc_fpm_misc->max_sds = max_pe_sds;
234 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
235
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500236 get_64bit_val(buf, 8, &temp);
237 obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
238 size = (u32)RS_64_1(temp, 32);
239 obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
Faisal Latif86dbcd02016-01-20 13:40:10 -0600240
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500241 get_64bit_val(buf, 16, &temp);
242 obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
243 size = (u32)RS_64_1(temp, 32);
244 obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
Faisal Latif86dbcd02016-01-20 13:40:10 -0600245
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500246 i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
247 i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
248
249 obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
250 obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
251
252 i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
253 i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
254
Faisal Latif86dbcd02016-01-20 13:40:10 -0600255 get_64bit_val(buf, 64, &temp);
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500256 obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
257 obj_info[I40IW_HMC_IW_XFFL].size = 4;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600258 hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
259 if (!hmc_fpm_misc->xf_block_size)
260 return I40IW_ERR_INVALID_SIZE;
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500261
262 i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
263
Faisal Latif86dbcd02016-01-20 13:40:10 -0600264 get_64bit_val(buf, 80, &temp);
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500265 obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
266 obj_info[I40IW_HMC_IW_Q1FL].size = 4;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600267 hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
268 if (!hmc_fpm_misc->q1_block_size)
269 return I40IW_ERR_INVALID_SIZE;
Chien Tin Tungf67ace22017-08-08 20:38:43 -0500270
271 i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
272
273 get_64bit_val(buf, 112, &temp);
274 obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
275 obj_info[I40IW_HMC_IW_PBLE].size = 8;
276
277 get_64bit_val(buf, 120, &temp);
278 hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
279 hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
280 hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
281
Faisal Latif86dbcd02016-01-20 13:40:10 -0600282 return 0;
283}
284
285/**
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500286 * i40iw_fill_qos_list - Change all unknown qs handles to available ones
287 * @qs_list: list of qs_handles to be fixed with valid qs_handles
288 */
289static void i40iw_fill_qos_list(u16 *qs_list)
290{
291 u16 qshandle = qs_list[0];
292 int i;
293
294 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
295 if (qs_list[i] == QS_HANDLE_UNKNOWN)
296 qs_list[i] = qshandle;
297 else
298 qshandle = qs_list[i];
299 }
300}
301
302/**
303 * i40iw_qp_from_entry - Given entry, get to the qp structure
304 * @entry: Points to list of qp structure
305 */
306static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
307{
308 if (!entry)
309 return NULL;
310
311 return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
312}
313
314/**
315 * i40iw_get_qp - get the next qp from the list given current qp
316 * @head: Listhead of qp's
317 * @qp: current qp
318 */
319static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
320{
321 struct list_head *entry = NULL;
322 struct list_head *lastentry;
323
324 if (list_empty(head))
325 return NULL;
326
327 if (!qp) {
328 entry = head->next;
329 } else {
330 lastentry = &qp->list;
331 entry = (lastentry != head) ? lastentry->next : NULL;
332 }
333
334 return i40iw_qp_from_entry(entry);
335}
336
337/**
338 * i40iw_change_l2params - given the new l2 parameters, change all qp
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600339 * @vsi: pointer to the vsi structure
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500340 * @l2params: New paramaters from l2
341 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600342void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500343{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600344 struct i40iw_sc_dev *dev = vsi->dev;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500345 struct i40iw_sc_qp *qp = NULL;
346 bool qs_handle_change = false;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500347 unsigned long flags;
348 u16 qs_handle;
349 int i;
350
Shiraz Saleem5b4a1a82017-10-16 15:46:01 -0500351 if (vsi->mtu != l2params->mtu) {
352 vsi->mtu = l2params->mtu;
353 i40iw_reinitialize_ieq(dev);
354 }
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500355
356 i40iw_fill_qos_list(l2params->qs_handle_list);
357 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
358 qs_handle = l2params->qs_handle_list[i];
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600359 if (vsi->qos[i].qs_handle != qs_handle)
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500360 qs_handle_change = true;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600361 spin_lock_irqsave(&vsi->qos[i].lock, flags);
362 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500363 while (qp) {
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500364 if (qs_handle_change) {
365 qp->qs_handle = qs_handle;
366 /* issue cqp suspend command */
367 i40iw_qp_suspend_resume(dev, qp, true);
368 }
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600369 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500370 }
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600371 spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
372 vsi->qos[i].qs_handle = qs_handle;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500373 }
374}
375
376/**
377 * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500378 * @qp: qp to be removed from qos
379 */
Ivan Barreraf535b562017-10-16 15:46:04 -0500380void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500381{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600382 struct i40iw_sc_vsi *vsi = qp->vsi;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500383 unsigned long flags;
384
385 if (!qp->on_qoslist)
386 return;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600387 spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500388 list_del(&qp->list);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600389 spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500390}
391
392/**
393 * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500394 * @qp: qp to be added to qos
395 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600396void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500397{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600398 struct i40iw_sc_vsi *vsi = qp->vsi;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500399 unsigned long flags;
400
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600401 if (qp->on_qoslist)
402 return;
403 spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
404 qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
405 list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500406 qp->on_qoslist = true;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600407 spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500408}
409
410/**
Faisal Latif86dbcd02016-01-20 13:40:10 -0600411 * i40iw_sc_pd_init - initialize sc pd struct
412 * @dev: sc device struct
413 * @pd: sc pd ptr
414 * @pd_id: pd_id for allocated pd
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600415 * @abi_ver: ABI version from user context, -1 if not valid
Faisal Latif86dbcd02016-01-20 13:40:10 -0600416 */
417static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
418 struct i40iw_sc_pd *pd,
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600419 u16 pd_id,
420 int abi_ver)
Faisal Latif86dbcd02016-01-20 13:40:10 -0600421{
422 pd->size = sizeof(*pd);
423 pd->pd_id = pd_id;
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600424 pd->abi_ver = abi_ver;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600425 pd->dev = dev;
426}
427
428/**
429 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
430 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
431 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
432 */
433u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
434{
435 u8 encoded_size = 0;
436
437 /* cqp sq's hw coded value starts from 1 for size of 4
438 * while it starts from 0 for qp' wq's.
439 */
440 if (cqpsq)
441 encoded_size = 1;
442 wqsize >>= 2;
443 while (wqsize >>= 1)
444 encoded_size++;
445 return encoded_size;
446}
447
448/**
449 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
450 * @cqp: IWARP control queue pair pointer
451 * @info: IWARP control queue pair init info pointer
452 *
453 * Initializes the object and context buffers for a control Queue Pair.
454 */
455static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
456 struct i40iw_cqp_init_info *info)
457{
458 u8 hw_sq_size;
459
460 if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
461 (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
462 ((info->sq_size & (info->sq_size - 1))))
463 return I40IW_ERR_INVALID_SIZE;
464
465 hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
466 cqp->size = sizeof(*cqp);
467 cqp->sq_size = info->sq_size;
468 cqp->hw_sq_size = hw_sq_size;
469 cqp->sq_base = info->sq;
470 cqp->host_ctx = info->host_ctx;
471 cqp->sq_pa = info->sq_pa;
472 cqp->host_ctx_pa = info->host_ctx_pa;
473 cqp->dev = info->dev;
474 cqp->struct_ver = info->struct_ver;
475 cqp->scratch_array = info->scratch_array;
476 cqp->polarity = 0;
477 cqp->en_datacenter_tcp = info->en_datacenter_tcp;
478 cqp->enabled_vf_count = info->enabled_vf_count;
479 cqp->hmc_profile = info->hmc_profile;
480 info->dev->cqp = cqp;
481
482 I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600483 cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
484 cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
485
Christopher Bednarz56b2f522017-10-16 15:46:03 -0500486 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
487 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
488
Faisal Latif86dbcd02016-01-20 13:40:10 -0600489 i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
490 "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
491 __func__, cqp->sq_size, cqp->hw_sq_size,
492 cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
493 return 0;
494}
495
496/**
497 * i40iw_sc_cqp_create - create cqp during bringup
498 * @cqp: struct for cqp hw
Faisal Latif86dbcd02016-01-20 13:40:10 -0600499 * @maj_err: If error, major err number
500 * @min_err: If error, minor err number
501 */
502static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
Faisal Latif86dbcd02016-01-20 13:40:10 -0600503 u16 *maj_err,
504 u16 *min_err)
505{
506 u64 temp;
507 u32 cnt = 0, p1, p2, val = 0, err_code;
508 enum i40iw_status_code ret_code;
509
Shiraz Saleem3f9fade2017-01-18 11:48:29 -0600510 *maj_err = 0;
511 *min_err = 0;
512
Faisal Latif86dbcd02016-01-20 13:40:10 -0600513 ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
514 &cqp->sdbuf,
515 128,
516 I40IW_SD_BUF_ALIGNMENT);
517
518 if (ret_code)
519 goto exit;
520
521 temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
522 LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
523
Faisal Latif86dbcd02016-01-20 13:40:10 -0600524 set_64bit_val(cqp->host_ctx, 0, temp);
525 set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
526 temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
527 LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
528 set_64bit_val(cqp->host_ctx, 16, temp);
529 set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
530 set_64bit_val(cqp->host_ctx, 32, 0);
531 set_64bit_val(cqp->host_ctx, 40, 0);
532 set_64bit_val(cqp->host_ctx, 48, 0);
533 set_64bit_val(cqp->host_ctx, 56, 0);
534
535 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
536 cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
537
538 p1 = RS_32_1(cqp->host_ctx_pa, 32);
539 p2 = (u32)cqp->host_ctx_pa;
540
541 if (cqp->dev->is_pf) {
542 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
543 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
544 } else {
545 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
546 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
547 }
548 do {
549 if (cnt++ > I40IW_DONE_COUNT) {
550 i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
551 ret_code = I40IW_ERR_TIMEOUT;
552 /*
553 * read PFPE_CQPERRORCODES register to get the minor
554 * and major error code
555 */
556 if (cqp->dev->is_pf)
557 err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
558 else
559 err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
560 *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
561 *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
562 goto exit;
563 }
564 udelay(I40IW_SLEEP_COUNT);
565 if (cqp->dev->is_pf)
566 val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
567 else
568 val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
569 } while (!val);
570
571exit:
572 if (!ret_code)
573 cqp->process_cqp_sds = i40iw_update_sds_noccq;
574 return ret_code;
575}
576
577/**
578 * i40iw_sc_cqp_post_sq - post of cqp's sq
579 * @cqp: struct for cqp hw
580 */
581void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
582{
583 if (cqp->dev->is_pf)
584 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
585 else
586 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
587
588 i40iw_debug(cqp->dev,
589 I40IW_DEBUG_WQE,
590 "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
591 __func__,
592 cqp->sq_ring.head,
593 cqp->sq_ring.tail,
594 cqp->sq_ring.size);
595}
596
597/**
598 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
599 * @cqp: struct for cqp hw
600 * @wqe_idx: we index of cqp ring
601 */
602u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
603{
604 u64 *wqe = NULL;
605 u32 wqe_idx;
606 enum i40iw_status_code ret_code;
607
608 if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
609 i40iw_debug(cqp->dev,
610 I40IW_DEBUG_WQE,
611 "%s: ring is full head %x tail %x size %x\n",
612 __func__,
613 cqp->sq_ring.head,
614 cqp->sq_ring.tail,
615 cqp->sq_ring.size);
616 return NULL;
617 }
618 I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600619 cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
Faisal Latif86dbcd02016-01-20 13:40:10 -0600620 if (ret_code)
621 return NULL;
622 if (!wqe_idx)
623 cqp->polarity = !cqp->polarity;
624
625 wqe = cqp->sq_base[wqe_idx].elem;
626 cqp->scratch_array[wqe_idx] = scratch;
627 I40IW_CQP_INIT_WQE(wqe);
628
629 return wqe;
630}
631
632/**
633 * i40iw_sc_cqp_destroy - destroy cqp during close
634 * @cqp: struct for cqp hw
635 */
636static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
637{
638 u32 cnt = 0, val = 1;
639 enum i40iw_status_code ret_code = 0;
640 u32 cqpstat_addr;
641
642 if (cqp->dev->is_pf) {
643 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
644 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
645 cqpstat_addr = I40E_PFPE_CCQPSTATUS;
646 } else {
647 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
648 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
649 cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
650 }
651 do {
652 if (cnt++ > I40IW_DONE_COUNT) {
653 ret_code = I40IW_ERR_TIMEOUT;
654 break;
655 }
656 udelay(I40IW_SLEEP_COUNT);
657 val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
658 } while (val);
659
660 i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
661 return ret_code;
662}
663
664/**
665 * i40iw_sc_ccq_arm - enable intr for control cq
666 * @ccq: ccq sc struct
667 */
668static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
669{
670 u64 temp_val;
671 u16 sw_cq_sel;
672 u8 arm_next_se;
673 u8 arm_seq_num;
674
675 /* write to cq doorbell shadow area */
676 /* arm next se should always be zero */
677 get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
678
679 sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
680 arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
681
682 arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
683 arm_seq_num++;
684
685 temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
686 LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
687 LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
688 LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
689
690 set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
691
692 wmb(); /* make sure shadow area is updated before arming */
693
694 if (ccq->dev->is_pf)
695 i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
696 else
697 i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
698}
699
700/**
701 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
702 * @ccq: ccq sc struct
703 * @info: completion q entry to return
704 */
705static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
706 struct i40iw_sc_cq *ccq,
707 struct i40iw_ccq_cqe_info *info)
708{
709 u64 qp_ctx, temp, temp1;
710 u64 *cqe;
711 struct i40iw_sc_cqp *cqp;
712 u32 wqe_idx;
713 u8 polarity;
714 enum i40iw_status_code ret_code = 0;
715
716 if (ccq->cq_uk.avoid_mem_cflct)
717 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
718 else
719 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
720
721 get_64bit_val(cqe, 24, &temp);
722 polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
723 if (polarity != ccq->cq_uk.polarity)
724 return I40IW_ERR_QUEUE_EMPTY;
725
726 get_64bit_val(cqe, 8, &qp_ctx);
727 cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
728 info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
729 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
730 if (info->error) {
731 info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
732 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
733 }
734 wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
735 info->scratch = cqp->scratch_array[wqe_idx];
736
737 get_64bit_val(cqe, 16, &temp1);
738 info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
739 get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
740 info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
741 info->cqp = cqp;
742
743 /* move the head for cq */
744 I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
745 if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
746 ccq->cq_uk.polarity ^= 1;
747
748 /* update cq tail in cq shadow memory also */
749 I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
750 set_64bit_val(ccq->cq_uk.shadow_area,
751 0,
752 I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
753 wmb(); /* write shadow area before tail */
754 I40IW_RING_MOVE_TAIL(cqp->sq_ring);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600755 ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
756
Faisal Latif86dbcd02016-01-20 13:40:10 -0600757 return ret_code;
758}
759
760/**
761 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
762 * @cqp: struct for cqp hw
763 * @op_code: cqp opcode for completion
764 * @info: completion q entry to return
765 */
766static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
767 struct i40iw_sc_cqp *cqp,
768 u8 op_code,
769 struct i40iw_ccq_cqe_info *compl_info)
770{
771 struct i40iw_ccq_cqe_info info;
772 struct i40iw_sc_cq *ccq;
773 enum i40iw_status_code ret_code = 0;
774 u32 cnt = 0;
775
776 memset(&info, 0, sizeof(info));
777 ccq = cqp->dev->ccq;
778 while (1) {
779 if (cnt++ > I40IW_DONE_COUNT)
780 return I40IW_ERR_TIMEOUT;
781
782 if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
783 udelay(I40IW_SLEEP_COUNT);
784 continue;
785 }
786
787 if (info.error) {
788 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
789 break;
790 }
791 /* check if opcode is cq create */
792 if (op_code != info.op_code) {
793 i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
794 "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
795 __func__, op_code, info.op_code);
796 }
797 /* success, exit out of the loop */
798 if (op_code == info.op_code)
799 break;
800 }
801
802 if (compl_info)
803 memcpy(compl_info, &info, sizeof(*compl_info));
804
805 return ret_code;
806}
807
808/**
809 * i40iw_sc_manage_push_page - Handle push page
810 * @cqp: struct for cqp hw
811 * @info: push page info
812 * @scratch: u64 saved to be used during cqp completion
813 * @post_sq: flag for cqp db to ring
814 */
815static enum i40iw_status_code i40iw_sc_manage_push_page(
816 struct i40iw_sc_cqp *cqp,
817 struct i40iw_cqp_manage_push_page_info *info,
818 u64 scratch,
819 bool post_sq)
820{
821 u64 *wqe;
822 u64 header;
823
824 if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
825 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
826
827 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
828 if (!wqe)
829 return I40IW_ERR_RING_FULL;
830
831 set_64bit_val(wqe, 16, info->qs_handle);
832
833 header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
834 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
835 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
836 LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
837
838 i40iw_insert_wqe_hdr(wqe, header);
839
840 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
841 wqe, I40IW_CQP_WQE_SIZE * 8);
842
843 if (post_sq)
844 i40iw_sc_cqp_post_sq(cqp);
845 return 0;
846}
847
848/**
849 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
850 * @cqp: struct for cqp hw
851 * @scratch: u64 saved to be used during cqp completion
852 * @vf_index: vf index for cqp
853 * @free_pm_fcn: function number
854 * @post_sq: flag for cqp db to ring
855 */
856static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
857 struct i40iw_sc_cqp *cqp,
858 u64 scratch,
859 u8 vf_index,
860 bool free_pm_fcn,
861 bool post_sq)
862{
863 u64 *wqe;
864 u64 header;
865
866 if (vf_index >= I40IW_MAX_VF_PER_PF)
867 return I40IW_ERR_INVALID_VF_ID;
868 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
869 if (!wqe)
870 return I40IW_ERR_RING_FULL;
871
872 header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
873 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
874 LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
875 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
876
877 i40iw_insert_wqe_hdr(wqe, header);
878 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
879 wqe, I40IW_CQP_WQE_SIZE * 8);
880 if (post_sq)
881 i40iw_sc_cqp_post_sq(cqp);
882 return 0;
883}
884
885/**
886 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
887 * @cqp: struct for cqp hw
888 * @scratch: u64 saved to be used during cqp completion
889 * @hmc_profile_type: type of profile to set
890 * @vf_num: vf number for profile
891 * @post_sq: flag for cqp db to ring
892 * @poll_registers: flag to poll register for cqp completion
893 */
894static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
895 struct i40iw_sc_cqp *cqp,
896 u64 scratch,
897 u8 hmc_profile_type,
898 u8 vf_num, bool post_sq,
899 bool poll_registers)
900{
901 u64 *wqe;
902 u64 header;
903 u32 val, tail, error;
904 enum i40iw_status_code ret_code = 0;
905
906 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
907 if (!wqe)
908 return I40IW_ERR_RING_FULL;
909
910 set_64bit_val(wqe, 16,
911 (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
912 LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
913
914 header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
915 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
916
917 i40iw_insert_wqe_hdr(wqe, header);
918
919 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
920 wqe, I40IW_CQP_WQE_SIZE * 8);
921
922 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
923 if (error)
924 return I40IW_ERR_CQP_COMPL_ERROR;
925
926 if (post_sq) {
927 i40iw_sc_cqp_post_sq(cqp);
928 if (poll_registers)
929 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
930 else
931 ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
932 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
933 NULL);
934 }
935
936 return ret_code;
937}
938
939/**
940 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
941 * @cqp: struct for cqp hw
942 */
943static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
944{
945 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
946}
947
948/**
949 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
950 * @cqp: struct for cqp hw
951 */
952static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
953{
954 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
955}
956
957/**
958 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
959 * @cqp: struct for cqp hw
960 * @scratch: u64 saved to be used during cqp completion
961 * @hmc_fn_id: hmc function id
962 * @commit_fpm_mem; Memory for fpm values
963 * @post_sq: flag for cqp db to ring
964 * @wait_type: poll ccq or cqp registers for cqp completion
965 */
966static enum i40iw_status_code i40iw_sc_commit_fpm_values(
967 struct i40iw_sc_cqp *cqp,
968 u64 scratch,
969 u8 hmc_fn_id,
970 struct i40iw_dma_mem *commit_fpm_mem,
971 bool post_sq,
972 u8 wait_type)
973{
974 u64 *wqe;
975 u64 header;
976 u32 tail, val, error;
977 enum i40iw_status_code ret_code = 0;
978
979 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
980 if (!wqe)
981 return I40IW_ERR_RING_FULL;
982
983 set_64bit_val(wqe, 16, hmc_fn_id);
984 set_64bit_val(wqe, 32, commit_fpm_mem->pa);
985
986 header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
987 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
988
989 i40iw_insert_wqe_hdr(wqe, header);
990
991 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
992 wqe, I40IW_CQP_WQE_SIZE * 8);
993
994 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
995 if (error)
996 return I40IW_ERR_CQP_COMPL_ERROR;
997
998 if (post_sq) {
999 i40iw_sc_cqp_post_sq(cqp);
1000
1001 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1002 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1003 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1004 ret_code = i40iw_sc_commit_fpm_values_done(cqp);
1005 }
1006
1007 return ret_code;
1008}
1009
1010/**
1011 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
1012 * @cqp: struct for cqp hw
1013 */
1014static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
1015{
1016 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
1017}
1018
1019/**
1020 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
1021 * @cqp: struct for cqp hw
1022 * @scratch: u64 saved to be used during cqp completion
1023 * @hmc_fn_id: hmc function id
1024 * @query_fpm_mem: memory for return fpm values
1025 * @post_sq: flag for cqp db to ring
1026 * @wait_type: poll ccq or cqp registers for cqp completion
1027 */
1028static enum i40iw_status_code i40iw_sc_query_fpm_values(
1029 struct i40iw_sc_cqp *cqp,
1030 u64 scratch,
1031 u8 hmc_fn_id,
1032 struct i40iw_dma_mem *query_fpm_mem,
1033 bool post_sq,
1034 u8 wait_type)
1035{
1036 u64 *wqe;
1037 u64 header;
1038 u32 tail, val, error;
1039 enum i40iw_status_code ret_code = 0;
1040
1041 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1042 if (!wqe)
1043 return I40IW_ERR_RING_FULL;
1044
1045 set_64bit_val(wqe, 16, hmc_fn_id);
1046 set_64bit_val(wqe, 32, query_fpm_mem->pa);
1047
1048 header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
1049 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1050
1051 i40iw_insert_wqe_hdr(wqe, header);
1052
1053 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
1054 wqe, I40IW_CQP_WQE_SIZE * 8);
1055
1056 /* read the tail from CQP_TAIL register */
1057 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1058
1059 if (error)
1060 return I40IW_ERR_CQP_COMPL_ERROR;
1061
1062 if (post_sq) {
1063 i40iw_sc_cqp_post_sq(cqp);
1064 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1065 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1066 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1067 ret_code = i40iw_sc_query_fpm_values_done(cqp);
1068 }
1069
1070 return ret_code;
1071}
1072
1073/**
1074 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1075 * @cqp: struct for cqp hw
1076 * @info: arp entry information
1077 * @scratch: u64 saved to be used during cqp completion
1078 * @post_sq: flag for cqp db to ring
1079 */
1080static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
1081 struct i40iw_sc_cqp *cqp,
1082 struct i40iw_add_arp_cache_entry_info *info,
1083 u64 scratch,
1084 bool post_sq)
1085{
1086 u64 *wqe;
1087 u64 temp, header;
1088
1089 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1090 if (!wqe)
1091 return I40IW_ERR_RING_FULL;
1092 set_64bit_val(wqe, 8, info->reach_max);
1093
1094 temp = info->mac_addr[5] |
1095 LS_64_1(info->mac_addr[4], 8) |
1096 LS_64_1(info->mac_addr[3], 16) |
1097 LS_64_1(info->mac_addr[2], 24) |
1098 LS_64_1(info->mac_addr[1], 32) |
1099 LS_64_1(info->mac_addr[0], 40);
1100
1101 set_64bit_val(wqe, 16, temp);
1102
1103 header = info->arp_index |
1104 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1105 LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
1106 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
1107 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1108
1109 i40iw_insert_wqe_hdr(wqe, header);
1110
1111 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1112 wqe, I40IW_CQP_WQE_SIZE * 8);
1113
1114 if (post_sq)
1115 i40iw_sc_cqp_post_sq(cqp);
1116 return 0;
1117}
1118
1119/**
1120 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1121 * @cqp: struct for cqp hw
1122 * @scratch: u64 saved to be used during cqp completion
1123 * @arp_index: arp index to delete arp entry
1124 * @post_sq: flag for cqp db to ring
1125 */
1126static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
1127 struct i40iw_sc_cqp *cqp,
1128 u64 scratch,
1129 u16 arp_index,
1130 bool post_sq)
1131{
1132 u64 *wqe;
1133 u64 header;
1134
1135 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1136 if (!wqe)
1137 return I40IW_ERR_RING_FULL;
1138
1139 header = arp_index |
1140 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1141 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1142 i40iw_insert_wqe_hdr(wqe, header);
1143
1144 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1145 wqe, I40IW_CQP_WQE_SIZE * 8);
1146
1147 if (post_sq)
1148 i40iw_sc_cqp_post_sq(cqp);
1149 return 0;
1150}
1151
1152/**
1153 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1154 * @cqp: struct for cqp hw
1155 * @scratch: u64 saved to be used during cqp completion
1156 * @arp_index: arp index to delete arp entry
1157 * @post_sq: flag for cqp db to ring
1158 */
1159static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
1160 struct i40iw_sc_cqp *cqp,
1161 u64 scratch,
1162 u16 arp_index,
1163 bool post_sq)
1164{
1165 u64 *wqe;
1166 u64 header;
1167
1168 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1169 if (!wqe)
1170 return I40IW_ERR_RING_FULL;
1171
1172 header = arp_index |
1173 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1174 LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
1175 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1176
1177 i40iw_insert_wqe_hdr(wqe, header);
1178
1179 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1180 wqe, I40IW_CQP_WQE_SIZE * 8);
1181
1182 if (post_sq)
1183 i40iw_sc_cqp_post_sq(cqp);
1184 return 0;
1185}
1186
1187/**
1188 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1189 * @cqp: struct for cqp hw
1190 * @info: info for apbvt entry to add or delete
1191 * @scratch: u64 saved to be used during cqp completion
1192 * @post_sq: flag for cqp db to ring
1193 */
1194static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1195 struct i40iw_sc_cqp *cqp,
1196 struct i40iw_apbvt_info *info,
1197 u64 scratch,
1198 bool post_sq)
1199{
1200 u64 *wqe;
1201 u64 header;
1202
1203 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1204 if (!wqe)
1205 return I40IW_ERR_RING_FULL;
1206
1207 set_64bit_val(wqe, 16, info->port);
1208
1209 header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1210 LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1211 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1212
1213 i40iw_insert_wqe_hdr(wqe, header);
1214
1215 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1216 wqe, I40IW_CQP_WQE_SIZE * 8);
1217
1218 if (post_sq)
1219 i40iw_sc_cqp_post_sq(cqp);
1220 return 0;
1221}
1222
1223/**
1224 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1225 * @cqp: struct for cqp hw
1226 * @info: info for quad hash to manage
1227 * @scratch: u64 saved to be used during cqp completion
1228 * @post_sq: flag for cqp db to ring
1229 *
1230 * This is called before connection establishment is started. For passive connections, when
1231 * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
1232 * ip address and tcp port. When SYN is received (passive connections) or
1233 * sent (active connections), this routine is called with entry type of
1234 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1235 *
1236 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1237 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1238 */
1239static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1240 struct i40iw_sc_cqp *cqp,
1241 struct i40iw_qhash_table_info *info,
1242 u64 scratch,
1243 bool post_sq)
1244{
1245 u64 *wqe;
1246 u64 qw1 = 0;
1247 u64 qw2 = 0;
1248 u64 temp;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06001249 struct i40iw_sc_vsi *vsi = info->vsi;
Faisal Latif86dbcd02016-01-20 13:40:10 -06001250
1251 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1252 if (!wqe)
1253 return I40IW_ERR_RING_FULL;
1254
1255 temp = info->mac_addr[5] |
1256 LS_64_1(info->mac_addr[4], 8) |
1257 LS_64_1(info->mac_addr[3], 16) |
1258 LS_64_1(info->mac_addr[2], 24) |
1259 LS_64_1(info->mac_addr[1], 32) |
1260 LS_64_1(info->mac_addr[0], 40);
1261
1262 set_64bit_val(wqe, 0, temp);
1263
1264 qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1265 LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1266 if (info->ipv4_valid) {
1267 set_64bit_val(wqe,
1268 48,
1269 LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1270 } else {
1271 set_64bit_val(wqe,
1272 56,
1273 LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1274 LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1275
1276 set_64bit_val(wqe,
1277 48,
1278 LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1279 LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1280 }
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06001281 qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
Faisal Latif86dbcd02016-01-20 13:40:10 -06001282 if (info->vlan_valid)
1283 qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1284 set_64bit_val(wqe, 16, qw2);
1285 if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1286 qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1287 if (!info->ipv4_valid) {
1288 set_64bit_val(wqe,
1289 40,
1290 LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1291 LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1292 set_64bit_val(wqe,
1293 32,
1294 LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1295 LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1296 } else {
1297 set_64bit_val(wqe,
1298 32,
1299 LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1300 }
1301 }
1302
1303 set_64bit_val(wqe, 8, qw1);
1304 temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1305 LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1306 LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1307 LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1308 LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1309 LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1310
1311 i40iw_insert_wqe_hdr(wqe, temp);
1312
1313 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1314 wqe, I40IW_CQP_WQE_SIZE * 8);
1315
1316 if (post_sq)
1317 i40iw_sc_cqp_post_sq(cqp);
1318 return 0;
1319}
1320
1321/**
1322 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1323 * @cqp: struct for cqp hw
1324 * @scratch: u64 saved to be used during cqp completion
1325 * @post_sq: flag for cqp db to ring
1326 */
1327static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1328 struct i40iw_sc_cqp *cqp,
1329 u64 scratch,
1330 bool post_sq)
1331{
1332 u64 *wqe;
1333 u64 header;
1334
1335 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1336 if (!wqe)
1337 return I40IW_ERR_RING_FULL;
1338 header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1339 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1340
1341 i40iw_insert_wqe_hdr(wqe, header);
1342 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1343 wqe, I40IW_CQP_WQE_SIZE * 8);
1344 if (post_sq)
1345 i40iw_sc_cqp_post_sq(cqp);
1346 return 0;
1347}
1348
1349/**
1350 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1351 * @cqp: struct for cqp hw
1352 * @info:mac addr info
1353 * @scratch: u64 saved to be used during cqp completion
1354 * @post_sq: flag for cqp db to ring
1355 */
1356static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1357 struct i40iw_sc_cqp *cqp,
1358 struct i40iw_local_mac_ipaddr_entry_info *info,
1359 u64 scratch,
1360 bool post_sq)
1361{
1362 u64 *wqe;
1363 u64 temp, header;
1364
1365 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1366 if (!wqe)
1367 return I40IW_ERR_RING_FULL;
1368 temp = info->mac_addr[5] |
1369 LS_64_1(info->mac_addr[4], 8) |
1370 LS_64_1(info->mac_addr[3], 16) |
1371 LS_64_1(info->mac_addr[2], 24) |
1372 LS_64_1(info->mac_addr[1], 32) |
1373 LS_64_1(info->mac_addr[0], 40);
1374
1375 set_64bit_val(wqe, 32, temp);
1376
1377 header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1378 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1379 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1380
1381 i40iw_insert_wqe_hdr(wqe, header);
1382
1383 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1384 wqe, I40IW_CQP_WQE_SIZE * 8);
1385
1386 if (post_sq)
1387 i40iw_sc_cqp_post_sq(cqp);
1388 return 0;
1389}
1390
1391/**
1392 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1393 * @cqp: struct for cqp hw
1394 * @scratch: u64 saved to be used during cqp completion
1395 * @entry_idx: index of mac entry
1396 * @ ignore_ref_count: to force mac adde delete
1397 * @post_sq: flag for cqp db to ring
1398 */
1399static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1400 struct i40iw_sc_cqp *cqp,
1401 u64 scratch,
1402 u8 entry_idx,
1403 u8 ignore_ref_count,
1404 bool post_sq)
1405{
1406 u64 *wqe;
1407 u64 header;
1408
1409 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1410 if (!wqe)
1411 return I40IW_ERR_RING_FULL;
1412 header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1413 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1414 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1415 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1416 LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1417
1418 i40iw_insert_wqe_hdr(wqe, header);
1419
1420 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1421 wqe, I40IW_CQP_WQE_SIZE * 8);
1422
1423 if (post_sq)
1424 i40iw_sc_cqp_post_sq(cqp);
1425 return 0;
1426}
1427
1428/**
1429 * i40iw_sc_cqp_nop - send a nop wqe
1430 * @cqp: struct for cqp hw
1431 * @scratch: u64 saved to be used during cqp completion
1432 * @post_sq: flag for cqp db to ring
1433 */
1434static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1435 u64 scratch,
1436 bool post_sq)
1437{
1438 u64 *wqe;
1439 u64 header;
1440
1441 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1442 if (!wqe)
1443 return I40IW_ERR_RING_FULL;
1444 header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1445 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1446 i40iw_insert_wqe_hdr(wqe, header);
1447 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1448 wqe, I40IW_CQP_WQE_SIZE * 8);
1449
1450 if (post_sq)
1451 i40iw_sc_cqp_post_sq(cqp);
1452 return 0;
1453}
1454
1455/**
1456 * i40iw_sc_ceq_init - initialize ceq
1457 * @ceq: ceq sc structure
1458 * @info: ceq initialization info
1459 */
1460static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1461 struct i40iw_ceq_init_info *info)
1462{
1463 u32 pble_obj_cnt;
1464
1465 if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1466 (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1467 return I40IW_ERR_INVALID_SIZE;
1468
1469 if (info->ceq_id >= I40IW_MAX_CEQID)
1470 return I40IW_ERR_INVALID_CEQ_ID;
1471
1472 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1473
1474 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1475 return I40IW_ERR_INVALID_PBLE_INDEX;
1476
1477 ceq->size = sizeof(*ceq);
1478 ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1479 ceq->ceq_id = info->ceq_id;
1480 ceq->dev = info->dev;
1481 ceq->elem_cnt = info->elem_cnt;
1482 ceq->ceq_elem_pa = info->ceqe_pa;
1483 ceq->virtual_map = info->virtual_map;
1484
1485 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1486 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1487 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1488
1489 ceq->tph_en = info->tph_en;
1490 ceq->tph_val = info->tph_val;
1491 ceq->polarity = 1;
1492 I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1493 ceq->dev->ceq[info->ceq_id] = ceq;
1494
1495 return 0;
1496}
1497
1498/**
1499 * i40iw_sc_ceq_create - create ceq wqe
1500 * @ceq: ceq sc structure
1501 * @scratch: u64 saved to be used during cqp completion
1502 * @post_sq: flag for cqp db to ring
1503 */
1504static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1505 u64 scratch,
1506 bool post_sq)
1507{
1508 struct i40iw_sc_cqp *cqp;
1509 u64 *wqe;
1510 u64 header;
1511
1512 cqp = ceq->dev->cqp;
1513 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1514 if (!wqe)
1515 return I40IW_ERR_RING_FULL;
1516 set_64bit_val(wqe, 16, ceq->elem_cnt);
1517 set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1518 set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1519 set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1520
1521 header = ceq->ceq_id |
1522 LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1523 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1524 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1525 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1526 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1527
1528 i40iw_insert_wqe_hdr(wqe, header);
1529
1530 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1531 wqe, I40IW_CQP_WQE_SIZE * 8);
1532
1533 if (post_sq)
1534 i40iw_sc_cqp_post_sq(cqp);
1535 return 0;
1536}
1537
1538/**
1539 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1540 * @ceq: ceq sc structure
1541 */
1542static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1543{
1544 struct i40iw_sc_cqp *cqp;
1545
1546 cqp = ceq->dev->cqp;
1547 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1548}
1549
1550/**
1551 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1552 * @ceq: ceq sc structure
1553 */
1554static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1555{
1556 struct i40iw_sc_cqp *cqp;
1557
1558 cqp = ceq->dev->cqp;
1559 cqp->process_cqp_sds = i40iw_update_sds_noccq;
1560 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1561}
1562
1563/**
1564 * i40iw_sc_cceq_create - create cceq
1565 * @ceq: ceq sc structure
1566 * @scratch: u64 saved to be used during cqp completion
1567 */
1568static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1569{
1570 enum i40iw_status_code ret_code;
1571
1572 ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1573 if (!ret_code)
1574 ret_code = i40iw_sc_cceq_create_done(ceq);
1575 return ret_code;
1576}
1577
1578/**
1579 * i40iw_sc_ceq_destroy - destroy ceq
1580 * @ceq: ceq sc structure
1581 * @scratch: u64 saved to be used during cqp completion
1582 * @post_sq: flag for cqp db to ring
1583 */
1584static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1585 u64 scratch,
1586 bool post_sq)
1587{
1588 struct i40iw_sc_cqp *cqp;
1589 u64 *wqe;
1590 u64 header;
1591
1592 cqp = ceq->dev->cqp;
1593 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1594 if (!wqe)
1595 return I40IW_ERR_RING_FULL;
1596 set_64bit_val(wqe, 16, ceq->elem_cnt);
1597 set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1598 header = ceq->ceq_id |
1599 LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1600 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1601 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1602 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1603 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1604 i40iw_insert_wqe_hdr(wqe, header);
1605 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1606 wqe, I40IW_CQP_WQE_SIZE * 8);
1607
1608 if (post_sq)
1609 i40iw_sc_cqp_post_sq(cqp);
1610 return 0;
1611}
1612
1613/**
1614 * i40iw_sc_process_ceq - process ceq
1615 * @dev: sc device struct
1616 * @ceq: ceq sc structure
1617 */
1618static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1619{
1620 u64 temp;
1621 u64 *ceqe;
1622 struct i40iw_sc_cq *cq = NULL;
1623 u8 polarity;
1624
1625 ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1626 get_64bit_val(ceqe, 0, &temp);
1627 polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1628 if (polarity != ceq->polarity)
1629 return cq;
1630
1631 cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1632
1633 I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1634 if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1635 ceq->polarity ^= 1;
1636
1637 if (dev->is_pf)
1638 i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1639 else
1640 i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1641
1642 return cq;
1643}
1644
1645/**
1646 * i40iw_sc_aeq_init - initialize aeq
1647 * @aeq: aeq structure ptr
1648 * @info: aeq initialization info
1649 */
1650static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1651 struct i40iw_aeq_init_info *info)
1652{
1653 u32 pble_obj_cnt;
1654
1655 if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1656 (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1657 return I40IW_ERR_INVALID_SIZE;
1658 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1659
1660 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1661 return I40IW_ERR_INVALID_PBLE_INDEX;
1662
1663 aeq->size = sizeof(*aeq);
1664 aeq->polarity = 1;
1665 aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1666 aeq->dev = info->dev;
1667 aeq->elem_cnt = info->elem_cnt;
1668
1669 aeq->aeq_elem_pa = info->aeq_elem_pa;
1670 I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1671 info->dev->aeq = aeq;
1672
1673 aeq->virtual_map = info->virtual_map;
1674 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1675 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1676 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1677 info->dev->aeq = aeq;
1678 return 0;
1679}
1680
1681/**
1682 * i40iw_sc_aeq_create - create aeq
1683 * @aeq: aeq structure ptr
1684 * @scratch: u64 saved to be used during cqp completion
1685 * @post_sq: flag for cqp db to ring
1686 */
1687static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1688 u64 scratch,
1689 bool post_sq)
1690{
1691 u64 *wqe;
1692 struct i40iw_sc_cqp *cqp;
1693 u64 header;
1694
1695 cqp = aeq->dev->cqp;
1696 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1697 if (!wqe)
1698 return I40IW_ERR_RING_FULL;
1699 set_64bit_val(wqe, 16, aeq->elem_cnt);
1700 set_64bit_val(wqe, 32,
1701 (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1702 set_64bit_val(wqe, 48,
1703 (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1704
1705 header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1706 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1707 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1708 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1709
1710 i40iw_insert_wqe_hdr(wqe, header);
1711 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1712 wqe, I40IW_CQP_WQE_SIZE * 8);
1713 if (post_sq)
1714 i40iw_sc_cqp_post_sq(cqp);
1715 return 0;
1716}
1717
1718/**
1719 * i40iw_sc_aeq_destroy - destroy aeq during close
1720 * @aeq: aeq structure ptr
1721 * @scratch: u64 saved to be used during cqp completion
1722 * @post_sq: flag for cqp db to ring
1723 */
1724static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1725 u64 scratch,
1726 bool post_sq)
1727{
1728 u64 *wqe;
1729 struct i40iw_sc_cqp *cqp;
1730 u64 header;
1731
1732 cqp = aeq->dev->cqp;
1733 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1734 if (!wqe)
1735 return I40IW_ERR_RING_FULL;
1736 set_64bit_val(wqe, 16, aeq->elem_cnt);
1737 set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1738 header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1739 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1740 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1741 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1742 i40iw_insert_wqe_hdr(wqe, header);
1743
1744 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1745 wqe, I40IW_CQP_WQE_SIZE * 8);
1746 if (post_sq)
1747 i40iw_sc_cqp_post_sq(cqp);
1748 return 0;
1749}
1750
1751/**
1752 * i40iw_sc_get_next_aeqe - get next aeq entry
1753 * @aeq: aeq structure ptr
1754 * @info: aeqe info to be returned
1755 */
1756static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1757 struct i40iw_aeqe_info *info)
1758{
1759 u64 temp, compl_ctx;
1760 u64 *aeqe;
1761 u16 wqe_idx;
1762 u8 ae_src;
1763 u8 polarity;
1764
1765 aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1766 get_64bit_val(aeqe, 0, &compl_ctx);
1767 get_64bit_val(aeqe, 8, &temp);
1768 polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1769
1770 if (aeq->polarity != polarity)
1771 return I40IW_ERR_QUEUE_EMPTY;
1772
1773 i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1774
1775 ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1776 wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1777 info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1778 info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1779 info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1780 info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1781 info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1782 info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
Mustafa Ismail4236f4b2017-10-16 15:45:55 -05001783
1784 switch (info->ae_id) {
1785 case I40IW_AE_PRIV_OPERATION_DENIED:
1786 case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
1787 case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
1788 case I40IW_AE_BAD_CLOSE:
1789 case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
1790 case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
1791 case I40IW_AE_STAG_ZERO_INVALID:
1792 case I40IW_AE_IB_RREQ_AND_Q1_FULL:
1793 case I40IW_AE_WQE_UNEXPECTED_OPCODE:
1794 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
1795 case I40IW_AE_DDP_UBE_INVALID_MO:
1796 case I40IW_AE_DDP_UBE_INVALID_QN:
1797 case I40IW_AE_DDP_NO_L_BIT:
1798 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1799 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
1800 case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
1801 case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
1802 case I40IW_AE_INVALID_ARP_ENTRY:
1803 case I40IW_AE_INVALID_TCP_OPTION_RCVD:
1804 case I40IW_AE_STALE_ARP_ENTRY:
1805 case I40IW_AE_LLP_CLOSE_COMPLETE:
1806 case I40IW_AE_LLP_CONNECTION_RESET:
1807 case I40IW_AE_LLP_FIN_RECEIVED:
1808 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1809 case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
1810 case I40IW_AE_LLP_SYN_RECEIVED:
1811 case I40IW_AE_LLP_TERMINATE_RECEIVED:
1812 case I40IW_AE_LLP_TOO_MANY_RETRIES:
1813 case I40IW_AE_LLP_DOUBT_REACHABILITY:
1814 case I40IW_AE_RESET_SENT:
1815 case I40IW_AE_TERMINATE_SENT:
1816 case I40IW_AE_RESET_NOT_SENT:
1817 case I40IW_AE_LCE_QP_CATASTROPHIC:
1818 case I40IW_AE_QP_SUSPEND_COMPLETE:
1819 info->qp = true;
1820 info->compl_ctx = compl_ctx;
1821 ae_src = I40IW_AE_SOURCE_RSVD;
1822 break;
1823 case I40IW_AE_LCE_CQ_CATASTROPHIC:
1824 info->cq = true;
1825 info->compl_ctx = LS_64_1(compl_ctx, 1);
1826 ae_src = I40IW_AE_SOURCE_RSVD;
1827 break;
1828 }
1829
Faisal Latif86dbcd02016-01-20 13:40:10 -06001830 switch (ae_src) {
1831 case I40IW_AE_SOURCE_RQ:
1832 case I40IW_AE_SOURCE_RQ_0011:
1833 info->qp = true;
1834 info->wqe_idx = wqe_idx;
1835 info->compl_ctx = compl_ctx;
1836 break;
1837 case I40IW_AE_SOURCE_CQ:
1838 case I40IW_AE_SOURCE_CQ_0110:
1839 case I40IW_AE_SOURCE_CQ_1010:
1840 case I40IW_AE_SOURCE_CQ_1110:
1841 info->cq = true;
1842 info->compl_ctx = LS_64_1(compl_ctx, 1);
1843 break;
1844 case I40IW_AE_SOURCE_SQ:
1845 case I40IW_AE_SOURCE_SQ_0111:
1846 info->qp = true;
1847 info->sq = true;
1848 info->wqe_idx = wqe_idx;
1849 info->compl_ctx = compl_ctx;
1850 break;
1851 case I40IW_AE_SOURCE_IN_RR_WR:
1852 case I40IW_AE_SOURCE_IN_RR_WR_1011:
1853 info->qp = true;
1854 info->compl_ctx = compl_ctx;
1855 info->in_rdrsp_wr = true;
1856 break;
1857 case I40IW_AE_SOURCE_OUT_RR:
1858 case I40IW_AE_SOURCE_OUT_RR_1111:
1859 info->qp = true;
1860 info->compl_ctx = compl_ctx;
1861 info->out_rdrsp = true;
1862 break;
Mustafa Ismail4236f4b2017-10-16 15:45:55 -05001863 case I40IW_AE_SOURCE_RSVD:
1864 /* fallthrough */
Faisal Latif86dbcd02016-01-20 13:40:10 -06001865 default:
1866 break;
1867 }
1868 I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1869 if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1870 aeq->polarity ^= 1;
1871 return 0;
1872}
1873
1874/**
1875 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1876 * @dev: sc device struct
1877 * @count: allocate count
1878 */
1879static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1880 u32 count)
1881{
1882 if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
1883 return I40IW_ERR_INVALID_SIZE;
1884
1885 if (dev->is_pf)
1886 i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1887 else
1888 i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1889
1890 return 0;
1891}
1892
1893/**
1894 * i40iw_sc_aeq_create_done - create aeq
1895 * @aeq: aeq structure ptr
1896 */
1897static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1898{
1899 struct i40iw_sc_cqp *cqp;
1900
1901 cqp = aeq->dev->cqp;
1902 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
1903}
1904
1905/**
1906 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1907 * @aeq: aeq structure ptr
1908 */
1909static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
1910{
1911 struct i40iw_sc_cqp *cqp;
1912
1913 cqp = aeq->dev->cqp;
1914 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
1915}
1916
1917/**
1918 * i40iw_sc_ccq_init - initialize control cq
1919 * @cq: sc's cq ctruct
1920 * @info: info for control cq initialization
1921 */
1922static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
1923 struct i40iw_ccq_init_info *info)
1924{
1925 u32 pble_obj_cnt;
1926
1927 if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
1928 return I40IW_ERR_INVALID_SIZE;
1929
1930 if (info->ceq_id > I40IW_MAX_CEQID)
1931 return I40IW_ERR_INVALID_CEQ_ID;
1932
1933 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1934
1935 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1936 return I40IW_ERR_INVALID_PBLE_INDEX;
1937
1938 cq->cq_pa = info->cq_pa;
1939 cq->cq_uk.cq_base = info->cq_base;
1940 cq->shadow_area_pa = info->shadow_area_pa;
1941 cq->cq_uk.shadow_area = info->shadow_area;
1942 cq->shadow_read_threshold = info->shadow_read_threshold;
1943 cq->dev = info->dev;
1944 cq->ceq_id = info->ceq_id;
1945 cq->cq_uk.cq_size = info->num_elem;
1946 cq->cq_type = I40IW_CQ_TYPE_CQP;
1947 cq->ceqe_mask = info->ceqe_mask;
1948 I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
1949
1950 cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
1951 cq->ceq_id_valid = info->ceq_id_valid;
1952 cq->tph_en = info->tph_en;
1953 cq->tph_val = info->tph_val;
1954 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
1955
1956 cq->pbl_list = info->pbl_list;
1957 cq->virtual_map = info->virtual_map;
1958 cq->pbl_chunk_size = info->pbl_chunk_size;
1959 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1960 cq->cq_uk.polarity = true;
1961
1962 /* following are only for iw cqs so initialize them to zero */
1963 cq->cq_uk.cqe_alloc_reg = NULL;
1964 info->dev->ccq = cq;
1965 return 0;
1966}
1967
1968/**
1969 * i40iw_sc_ccq_create_done - poll cqp for ccq create
1970 * @ccq: ccq sc struct
1971 */
1972static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
1973{
1974 struct i40iw_sc_cqp *cqp;
1975
1976 cqp = ccq->dev->cqp;
1977 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
1978}
1979
1980/**
1981 * i40iw_sc_ccq_create - create control cq
1982 * @ccq: ccq sc struct
1983 * @scratch: u64 saved to be used during cqp completion
1984 * @check_overflow: overlow flag for ccq
1985 * @post_sq: flag for cqp db to ring
1986 */
1987static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
1988 u64 scratch,
1989 bool check_overflow,
1990 bool post_sq)
1991{
1992 u64 *wqe;
1993 struct i40iw_sc_cqp *cqp;
1994 u64 header;
1995 enum i40iw_status_code ret_code;
1996
1997 cqp = ccq->dev->cqp;
1998 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1999 if (!wqe)
2000 return I40IW_ERR_RING_FULL;
2001 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2002 set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2003 set_64bit_val(wqe, 16,
2004 LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2005 set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
2006 set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2007 set_64bit_val(wqe, 48,
2008 (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
2009 set_64bit_val(wqe, 56,
2010 LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
2011
2012 header = ccq->cq_uk.cq_id |
2013 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2014 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2015 LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2016 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2017 LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2018 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2019 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2020 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2021 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2022 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2023
2024 i40iw_insert_wqe_hdr(wqe, header);
2025
2026 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
2027 wqe, I40IW_CQP_WQE_SIZE * 8);
2028
2029 if (post_sq) {
2030 i40iw_sc_cqp_post_sq(cqp);
2031 ret_code = i40iw_sc_ccq_create_done(ccq);
2032 if (ret_code)
2033 return ret_code;
2034 }
2035 cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
2036
2037 return 0;
2038}
2039
2040/**
2041 * i40iw_sc_ccq_destroy - destroy ccq during close
2042 * @ccq: ccq sc struct
2043 * @scratch: u64 saved to be used during cqp completion
2044 * @post_sq: flag for cqp db to ring
2045 */
2046static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
2047 u64 scratch,
2048 bool post_sq)
2049{
2050 struct i40iw_sc_cqp *cqp;
2051 u64 *wqe;
2052 u64 header;
2053 enum i40iw_status_code ret_code = 0;
2054 u32 tail, val, error;
2055
2056 cqp = ccq->dev->cqp;
2057 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2058 if (!wqe)
2059 return I40IW_ERR_RING_FULL;
2060 set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2061 set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2062 set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2063
2064 header = ccq->cq_uk.cq_id |
2065 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2066 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2067 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2068 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2069 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2070 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2071 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2072
2073 i40iw_insert_wqe_hdr(wqe, header);
2074
2075 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
2076 wqe, I40IW_CQP_WQE_SIZE * 8);
2077
2078 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
2079 if (error)
2080 return I40IW_ERR_CQP_COMPL_ERROR;
2081
2082 if (post_sq) {
2083 i40iw_sc_cqp_post_sq(cqp);
2084 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
2085 }
2086
Mustafa Ismail415920a2017-06-23 16:03:56 -05002087 cqp->process_cqp_sds = i40iw_update_sds_noccq;
2088
Faisal Latif86dbcd02016-01-20 13:40:10 -06002089 return ret_code;
2090}
2091
2092/**
2093 * i40iw_sc_cq_init - initialize completion q
2094 * @cq: cq struct
2095 * @info: cq initialization info
2096 */
2097static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
2098 struct i40iw_cq_init_info *info)
2099{
2100 u32 __iomem *cqe_alloc_reg = NULL;
2101 enum i40iw_status_code ret_code;
2102 u32 pble_obj_cnt;
2103 u32 arm_offset;
2104
2105 pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2106
2107 if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
2108 return I40IW_ERR_INVALID_PBLE_INDEX;
2109
2110 cq->cq_pa = info->cq_base_pa;
2111 cq->dev = info->dev;
2112 cq->ceq_id = info->ceq_id;
2113 arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
2114 if (i40iw_get_hw_addr(cq->dev))
2115 cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
2116 arm_offset);
2117 info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
2118 ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
2119 if (ret_code)
2120 return ret_code;
2121 cq->virtual_map = info->virtual_map;
2122 cq->pbl_chunk_size = info->pbl_chunk_size;
2123 cq->ceqe_mask = info->ceqe_mask;
2124 cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
2125
2126 cq->shadow_area_pa = info->shadow_area_pa;
2127 cq->shadow_read_threshold = info->shadow_read_threshold;
2128
2129 cq->ceq_id_valid = info->ceq_id_valid;
2130 cq->tph_en = info->tph_en;
2131 cq->tph_val = info->tph_val;
2132
2133 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2134
2135 return 0;
2136}
2137
2138/**
2139 * i40iw_sc_cq_create - create completion q
2140 * @cq: cq struct
2141 * @scratch: u64 saved to be used during cqp completion
2142 * @check_overflow: flag for overflow check
2143 * @post_sq: flag for cqp db to ring
2144 */
2145static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
2146 u64 scratch,
2147 bool check_overflow,
2148 bool post_sq)
2149{
2150 u64 *wqe;
2151 struct i40iw_sc_cqp *cqp;
2152 u64 header;
2153
2154 if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
2155 return I40IW_ERR_INVALID_CQ_ID;
2156
2157 if (cq->ceq_id > I40IW_MAX_CEQID)
2158 return I40IW_ERR_INVALID_CEQ_ID;
2159
2160 cqp = cq->dev->cqp;
2161 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2162 if (!wqe)
2163 return I40IW_ERR_RING_FULL;
2164
2165 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2166 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2167 set_64bit_val(wqe,
2168 16,
2169 LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2170
2171 set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2172
2173 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2174 set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2175 set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2176
2177 header = cq->cq_uk.cq_id |
2178 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2179 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2180 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2181 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2182 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2183 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2184 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2185 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2186 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2187 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2188
2189 i40iw_insert_wqe_hdr(wqe, header);
2190
2191 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2192 wqe, I40IW_CQP_WQE_SIZE * 8);
2193
2194 if (post_sq)
2195 i40iw_sc_cqp_post_sq(cqp);
2196 return 0;
2197}
2198
2199/**
2200 * i40iw_sc_cq_destroy - destroy completion q
2201 * @cq: cq struct
2202 * @scratch: u64 saved to be used during cqp completion
2203 * @post_sq: flag for cqp db to ring
2204 */
2205static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
2206 u64 scratch,
2207 bool post_sq)
2208{
2209 struct i40iw_sc_cqp *cqp;
2210 u64 *wqe;
2211 u64 header;
2212
2213 cqp = cq->dev->cqp;
2214 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2215 if (!wqe)
2216 return I40IW_ERR_RING_FULL;
2217 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2218 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2219 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2220 set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2221
2222 header = cq->cq_uk.cq_id |
2223 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2224 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2225 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2226 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2227 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2228 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2229 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2230 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2231 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2232
2233 i40iw_insert_wqe_hdr(wqe, header);
2234
2235 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2236 wqe, I40IW_CQP_WQE_SIZE * 8);
2237
2238 if (post_sq)
2239 i40iw_sc_cqp_post_sq(cqp);
2240 return 0;
2241}
2242
2243/**
2244 * i40iw_sc_cq_modify - modify a Completion Queue
2245 * @cq: cq struct
2246 * @info: modification info struct
2247 * @scratch:
2248 * @post_sq: flag to post to sq
2249 */
2250static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2251 struct i40iw_modify_cq_info *info,
2252 u64 scratch,
2253 bool post_sq)
2254{
2255 struct i40iw_sc_cqp *cqp;
2256 u64 *wqe;
2257 u64 header;
2258 u32 cq_size, ceq_id, first_pm_pbl_idx;
2259 u8 pbl_chunk_size;
2260 bool virtual_map, ceq_id_valid, check_overflow;
2261 u32 pble_obj_cnt;
2262
2263 if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2264 return I40IW_ERR_INVALID_CEQ_ID;
2265
2266 pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2267
2268 if (info->cq_resize && info->virtual_map &&
2269 (info->first_pm_pbl_idx >= pble_obj_cnt))
2270 return I40IW_ERR_INVALID_PBLE_INDEX;
2271
2272 cqp = cq->dev->cqp;
2273 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2274 if (!wqe)
2275 return I40IW_ERR_RING_FULL;
2276
2277 cq->pbl_list = info->pbl_list;
2278 cq->cq_pa = info->cq_pa;
2279 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2280
2281 cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2282 if (info->ceq_change) {
2283 ceq_id_valid = true;
2284 ceq_id = info->ceq_id;
2285 } else {
2286 ceq_id_valid = cq->ceq_id_valid;
2287 ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2288 }
2289 virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2290 first_pm_pbl_idx = (info->cq_resize ?
2291 (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2292 (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2293 pbl_chunk_size = (info->cq_resize ?
2294 (info->virtual_map ? info->pbl_chunk_size : 0) :
2295 (cq->virtual_map ? cq->pbl_chunk_size : 0));
2296 check_overflow = info->check_overflow_change ? info->check_overflow :
2297 cq->check_overflow;
2298 cq->cq_uk.cq_size = cq_size;
2299 cq->ceq_id_valid = ceq_id_valid;
2300 cq->ceq_id = ceq_id;
2301 cq->virtual_map = virtual_map;
2302 cq->first_pm_pbl_idx = first_pm_pbl_idx;
2303 cq->pbl_chunk_size = pbl_chunk_size;
2304 cq->check_overflow = check_overflow;
2305
2306 set_64bit_val(wqe, 0, cq_size);
2307 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2308 set_64bit_val(wqe, 16,
2309 LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2310 set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2311 set_64bit_val(wqe, 40, cq->shadow_area_pa);
2312 set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2313 set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2314
2315 header = cq->cq_uk.cq_id |
2316 LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2317 LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2318 LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2319 LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2320 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2321 LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2322 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2323 LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2324 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2325 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2326 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2327
2328 i40iw_insert_wqe_hdr(wqe, header);
2329
2330 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2331 wqe, I40IW_CQP_WQE_SIZE * 8);
2332
2333 if (post_sq)
2334 i40iw_sc_cqp_post_sq(cqp);
2335 return 0;
2336}
2337
2338/**
2339 * i40iw_sc_qp_init - initialize qp
2340 * @qp: sc qp
2341 * @info: initialization qp info
2342 */
2343static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2344 struct i40iw_qp_init_info *info)
2345{
2346 u32 __iomem *wqe_alloc_reg = NULL;
2347 enum i40iw_status_code ret_code;
2348 u32 pble_obj_cnt;
2349 u8 wqe_size;
2350 u32 offset;
2351
2352 qp->dev = info->pd->dev;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002353 qp->vsi = info->vsi;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002354 qp->sq_pa = info->sq_pa;
2355 qp->rq_pa = info->rq_pa;
2356 qp->hw_host_ctx_pa = info->host_ctx_pa;
2357 qp->q2_pa = info->q2_pa;
2358 qp->shadow_area_pa = info->shadow_area_pa;
2359
2360 qp->q2_buf = info->q2;
2361 qp->pd = info->pd;
2362 qp->hw_host_ctx = info->host_ctx;
2363 offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2364 if (i40iw_get_hw_addr(qp->pd->dev))
2365 wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2366 offset);
2367
2368 info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
Chien Tin Tung61f51b72016-12-21 08:53:46 -06002369 info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002370 ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2371 if (ret_code)
2372 return ret_code;
2373 qp->virtual_map = info->virtual_map;
2374
2375 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2376
2377 if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2378 (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2379 return I40IW_ERR_INVALID_PBLE_INDEX;
2380
2381 qp->llp_stream_handle = (void *)(-1);
2382 qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2383
2384 qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2385 false);
2386 i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2387 __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
Chien Tin Tung61f51b72016-12-21 08:53:46 -06002388
2389 switch (qp->pd->abi_ver) {
2390 case 4:
2391 ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2392 &wqe_size);
2393 if (ret_code)
2394 return ret_code;
2395 break;
2396 case 5: /* fallthrough until next ABI version */
2397 default:
2398 if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
2399 return I40IW_ERR_INVALID_FRAG_COUNT;
2400 wqe_size = I40IW_MAX_WQE_SIZE_RQ;
2401 break;
2402 }
Faisal Latif86dbcd02016-01-20 13:40:10 -06002403 qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2404 (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2405 i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2406 "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2407 __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2408 qp->sq_tph_val = info->sq_tph_val;
2409 qp->rq_tph_val = info->rq_tph_val;
2410 qp->sq_tph_en = info->sq_tph_en;
2411 qp->rq_tph_en = info->rq_tph_en;
2412 qp->rcv_tph_en = info->rcv_tph_en;
2413 qp->xmit_tph_en = info->xmit_tph_en;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002414 qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002415
2416 return 0;
2417}
2418
2419/**
2420 * i40iw_sc_qp_create - create qp
2421 * @qp: sc qp
2422 * @info: qp create info
2423 * @scratch: u64 saved to be used during cqp completion
2424 * @post_sq: flag for cqp db to ring
2425 */
2426static enum i40iw_status_code i40iw_sc_qp_create(
2427 struct i40iw_sc_qp *qp,
2428 struct i40iw_create_qp_info *info,
2429 u64 scratch,
2430 bool post_sq)
2431{
2432 struct i40iw_sc_cqp *cqp;
2433 u64 *wqe;
2434 u64 header;
2435
2436 if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2437 (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2438 return I40IW_ERR_INVALID_QP_ID;
2439
2440 cqp = qp->pd->dev->cqp;
2441 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2442 if (!wqe)
2443 return I40IW_ERR_RING_FULL;
2444
2445 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2446
2447 set_64bit_val(wqe, 40, qp->shadow_area_pa);
2448
2449 header = qp->qp_uk.qp_id |
2450 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2451 LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2452 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2453 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2454 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2455 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002456 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2457 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2458 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2459
2460 i40iw_insert_wqe_hdr(wqe, header);
2461 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2462 wqe, I40IW_CQP_WQE_SIZE * 8);
2463
2464 if (post_sq)
2465 i40iw_sc_cqp_post_sq(cqp);
2466 return 0;
2467}
2468
2469/**
2470 * i40iw_sc_qp_modify - modify qp cqp wqe
2471 * @qp: sc qp
2472 * @info: modify qp info
2473 * @scratch: u64 saved to be used during cqp completion
2474 * @post_sq: flag for cqp db to ring
2475 */
2476static enum i40iw_status_code i40iw_sc_qp_modify(
2477 struct i40iw_sc_qp *qp,
2478 struct i40iw_modify_qp_info *info,
2479 u64 scratch,
2480 bool post_sq)
2481{
2482 u64 *wqe;
2483 struct i40iw_sc_cqp *cqp;
2484 u64 header;
2485 u8 term_actions = 0;
2486 u8 term_len = 0;
2487
2488 cqp = qp->pd->dev->cqp;
2489 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2490 if (!wqe)
2491 return I40IW_ERR_RING_FULL;
2492 if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2493 if (info->dont_send_fin)
2494 term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2495 if (info->dont_send_term)
2496 term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2497 if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2498 (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2499 term_len = info->termlen;
2500 }
2501
2502 set_64bit_val(wqe,
2503 8,
Faisal Latif86dbcd02016-01-20 13:40:10 -06002504 LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2505
2506 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2507 set_64bit_val(wqe, 40, qp->shadow_area_pa);
2508
2509 header = qp->qp_uk.qp_id |
2510 LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2511 LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2512 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2513 LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2514 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2515 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2516 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2517 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002518 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2519 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2520 LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2521 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2522 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2523 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2524
2525 i40iw_insert_wqe_hdr(wqe, header);
2526
2527 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2528 wqe, I40IW_CQP_WQE_SIZE * 8);
2529
2530 if (post_sq)
2531 i40iw_sc_cqp_post_sq(cqp);
2532 return 0;
2533}
2534
2535/**
2536 * i40iw_sc_qp_destroy - cqp destroy qp
2537 * @qp: sc qp
2538 * @scratch: u64 saved to be used during cqp completion
2539 * @remove_hash_idx: flag if to remove hash idx
2540 * @ignore_mw_bnd: memory window bind flag
2541 * @post_sq: flag for cqp db to ring
2542 */
2543static enum i40iw_status_code i40iw_sc_qp_destroy(
2544 struct i40iw_sc_qp *qp,
2545 u64 scratch,
2546 bool remove_hash_idx,
2547 bool ignore_mw_bnd,
2548 bool post_sq)
2549{
2550 u64 *wqe;
2551 struct i40iw_sc_cqp *cqp;
2552 u64 header;
2553
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002554 i40iw_qp_rem_qos(qp);
Faisal Latif86dbcd02016-01-20 13:40:10 -06002555 cqp = qp->pd->dev->cqp;
2556 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2557 if (!wqe)
2558 return I40IW_ERR_RING_FULL;
2559 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2560 set_64bit_val(wqe, 40, qp->shadow_area_pa);
2561
2562 header = qp->qp_uk.qp_id |
2563 LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2564 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2565 LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2566 LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2567 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2568
2569 i40iw_insert_wqe_hdr(wqe, header);
2570 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2571 wqe, I40IW_CQP_WQE_SIZE * 8);
2572
2573 if (post_sq)
2574 i40iw_sc_cqp_post_sq(cqp);
2575 return 0;
2576}
2577
2578/**
2579 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2580 * @qp: sc qp
2581 * @info: dlush information
2582 * @scratch: u64 saved to be used during cqp completion
2583 * @post_sq: flag for cqp db to ring
2584 */
2585static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2586 struct i40iw_sc_qp *qp,
2587 struct i40iw_qp_flush_info *info,
2588 u64 scratch,
2589 bool post_sq)
2590{
2591 u64 temp = 0;
2592 u64 *wqe;
2593 struct i40iw_sc_cqp *cqp;
2594 u64 header;
2595 bool flush_sq = false, flush_rq = false;
2596
2597 if (info->rq && !qp->flush_rq)
2598 flush_rq = true;
2599
2600 if (info->sq && !qp->flush_sq)
2601 flush_sq = true;
2602
2603 qp->flush_sq |= flush_sq;
2604 qp->flush_rq |= flush_rq;
2605 if (!flush_sq && !flush_rq) {
2606 if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
2607 return 0;
2608 }
2609
2610 cqp = qp->pd->dev->cqp;
2611 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2612 if (!wqe)
2613 return I40IW_ERR_RING_FULL;
2614 if (info->userflushcode) {
2615 if (flush_rq) {
2616 temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2617 LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2618 }
2619 if (flush_sq) {
2620 temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2621 LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2622 }
2623 }
2624 set_64bit_val(wqe, 16, temp);
2625
2626 temp = (info->generate_ae) ?
2627 info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2628
2629 set_64bit_val(wqe, 8, temp);
2630
2631 header = qp->qp_uk.qp_id |
2632 LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2633 LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2634 LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2635 LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2636 LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2637 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2638
2639 i40iw_insert_wqe_hdr(wqe, header);
2640
2641 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2642 wqe, I40IW_CQP_WQE_SIZE * 8);
2643
2644 if (post_sq)
2645 i40iw_sc_cqp_post_sq(cqp);
2646 return 0;
2647}
2648
2649/**
2650 * i40iw_sc_qp_upload_context - upload qp's context
2651 * @dev: sc device struct
2652 * @info: upload context info ptr for return
2653 * @scratch: u64 saved to be used during cqp completion
2654 * @post_sq: flag for cqp db to ring
2655 */
2656static enum i40iw_status_code i40iw_sc_qp_upload_context(
2657 struct i40iw_sc_dev *dev,
2658 struct i40iw_upload_context_info *info,
2659 u64 scratch,
2660 bool post_sq)
2661{
2662 u64 *wqe;
2663 struct i40iw_sc_cqp *cqp;
2664 u64 header;
2665
2666 cqp = dev->cqp;
2667 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2668 if (!wqe)
2669 return I40IW_ERR_RING_FULL;
2670 set_64bit_val(wqe, 16, info->buf_pa);
2671
2672 header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2673 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2674 LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2675 LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2676 LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2677 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2678
2679 i40iw_insert_wqe_hdr(wqe, header);
2680
2681 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2682 wqe, I40IW_CQP_WQE_SIZE * 8);
2683
2684 if (post_sq)
2685 i40iw_sc_cqp_post_sq(cqp);
2686 return 0;
2687}
2688
2689/**
2690 * i40iw_sc_qp_setctx - set qp's context
2691 * @qp: sc qp
2692 * @qp_ctx: context ptr
2693 * @info: ctx info
2694 */
2695static enum i40iw_status_code i40iw_sc_qp_setctx(
2696 struct i40iw_sc_qp *qp,
2697 u64 *qp_ctx,
2698 struct i40iw_qp_host_ctx_info *info)
2699{
2700 struct i40iwarp_offload_info *iw;
2701 struct i40iw_tcp_offload_info *tcp;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002702 struct i40iw_sc_vsi *vsi;
2703 struct i40iw_sc_dev *dev;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002704 u64 qw0, qw3, qw7 = 0;
2705
2706 iw = info->iwarp_info;
2707 tcp = info->tcp_info;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002708 vsi = qp->vsi;
2709 dev = qp->dev;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05002710 if (info->add_to_qoslist) {
2711 qp->user_pri = info->user_pri;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002712 i40iw_qp_add_qos(qp);
Henry Orosco0fc2dc52016-10-10 21:12:10 -05002713 i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
2714 __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
2715 }
Faisal Latif86dbcd02016-01-20 13:40:10 -06002716 qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2717 LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2718 LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2719 LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2720 LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2721 LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2722 LS_64(info->push_idx, I40IWQPC_PPIDX) |
2723 LS_64(info->push_mode_en, I40IWQPC_PMENA);
2724
2725 set_64bit_val(qp_ctx, 8, qp->sq_pa);
2726 set_64bit_val(qp_ctx, 16, qp->rq_pa);
2727
2728 qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2729 LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2730 LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2731
2732 set_64bit_val(qp_ctx,
2733 128,
2734 LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2735
2736 set_64bit_val(qp_ctx,
2737 136,
2738 LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2739 LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2740
2741 set_64bit_val(qp_ctx,
2742 168,
2743 LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2744 set_64bit_val(qp_ctx,
2745 176,
2746 LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2747 LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2748 LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
Mustafa Ismail66f49f82017-10-16 15:45:57 -05002749 LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
Faisal Latif86dbcd02016-01-20 13:40:10 -06002750
2751 if (info->iwarp_info_valid) {
2752 qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2753 LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2754
2755 qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002756 set_64bit_val(qp_ctx,
2757 144,
2758 LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
2759 LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
Faisal Latif86dbcd02016-01-20 13:40:10 -06002760 set_64bit_val(qp_ctx,
2761 152,
2762 LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2763
Faisal Latif86dbcd02016-01-20 13:40:10 -06002764 set_64bit_val(qp_ctx,
2765 160,
2766 LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2767 LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2768 LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2769 LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2770 LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2771 LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2772 LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2773 LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002774 LS_64((((vsi->stats_fcn_id_alloc) &&
2775 (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
2776 I40IWQPC_USESTATSINSTANCE) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002777 LS_64(1, I40IWQPC_IWARPMODE) |
2778 LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2779 LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2780 LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2781 LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2782 LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2783 }
2784 if (info->tcp_info_valid) {
2785 qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2786 LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2787 LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2788 LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2789 LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2790 LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2791 LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2792
2793 qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2794 LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2795 LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2796 LS_64(tcp->tos, I40IWQPC_TOS) |
2797 LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2798 LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2799
2800 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2801 set_64bit_val(qp_ctx,
2802 32,
2803 LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2804 LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2805
2806 set_64bit_val(qp_ctx,
2807 40,
2808 LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2809 LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2810
2811 set_64bit_val(qp_ctx,
2812 48,
2813 LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2814 LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2815 LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2816
2817 qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2818 LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2819 LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2820 LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2821 LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2822 LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2823 LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2824
2825 set_64bit_val(qp_ctx,
2826 72,
2827 LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2828 LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2829 set_64bit_val(qp_ctx,
2830 80,
2831 LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2832 LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2833
2834 set_64bit_val(qp_ctx,
2835 88,
2836 LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2837 LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2838 set_64bit_val(qp_ctx,
2839 96,
2840 LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2841 LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2842 set_64bit_val(qp_ctx,
2843 104,
2844 LS_64(tcp->srtt, I40IWQPC_SRTT) |
2845 LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2846 set_64bit_val(qp_ctx,
2847 112,
2848 LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2849 LS_64(tcp->cwnd, I40IWQPC_CWND));
2850 set_64bit_val(qp_ctx,
2851 120,
2852 LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2853 LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2854 set_64bit_val(qp_ctx,
2855 128,
2856 LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2857 LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2858 set_64bit_val(qp_ctx,
2859 184,
2860 LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
2861 LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
2862 set_64bit_val(qp_ctx,
2863 192,
2864 LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
2865 LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
2866 }
2867
2868 set_64bit_val(qp_ctx, 0, qw0);
2869 set_64bit_val(qp_ctx, 24, qw3);
2870 set_64bit_val(qp_ctx, 56, qw7);
2871
2872 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
2873 qp_ctx, I40IW_QP_CTX_SIZE);
2874 return 0;
2875}
2876
2877/**
2878 * i40iw_sc_alloc_stag - mr stag alloc
2879 * @dev: sc device struct
2880 * @info: stag info
2881 * @scratch: u64 saved to be used during cqp completion
2882 * @post_sq: flag for cqp db to ring
2883 */
2884static enum i40iw_status_code i40iw_sc_alloc_stag(
2885 struct i40iw_sc_dev *dev,
2886 struct i40iw_allocate_stag_info *info,
2887 u64 scratch,
2888 bool post_sq)
2889{
2890 u64 *wqe;
2891 struct i40iw_sc_cqp *cqp;
2892 u64 header;
Henry Orosco68583ca2016-11-19 20:26:25 -06002893 enum i40iw_page_size page_size;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002894
Henry Orosco68583ca2016-11-19 20:26:25 -06002895 page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002896 cqp = dev->cqp;
2897 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2898 if (!wqe)
2899 return I40IW_ERR_RING_FULL;
2900 set_64bit_val(wqe,
2901 8,
2902 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
2903 LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
2904 set_64bit_val(wqe,
2905 16,
2906 LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2907 set_64bit_val(wqe,
2908 40,
2909 LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
2910
2911 header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2912 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2913 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2914 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
Henry Orosco68583ca2016-11-19 20:26:25 -06002915 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002916 LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2917 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2918 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2919 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2920
2921 i40iw_insert_wqe_hdr(wqe, header);
2922
2923 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
2924 wqe, I40IW_CQP_WQE_SIZE * 8);
2925
2926 if (post_sq)
2927 i40iw_sc_cqp_post_sq(cqp);
2928 return 0;
2929}
2930
2931/**
2932 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2933 * @dev: sc device struct
2934 * @info: mr info
2935 * @scratch: u64 saved to be used during cqp completion
2936 * @post_sq: flag for cqp db to ring
2937 */
2938static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
2939 struct i40iw_sc_dev *dev,
2940 struct i40iw_reg_ns_stag_info *info,
2941 u64 scratch,
2942 bool post_sq)
2943{
2944 u64 *wqe;
2945 u64 temp;
2946 struct i40iw_sc_cqp *cqp;
2947 u64 header;
2948 u32 pble_obj_cnt;
2949 bool remote_access;
2950 u8 addr_type;
Henry Orosco68583ca2016-11-19 20:26:25 -06002951 enum i40iw_page_size page_size;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002952
Henry Orosco68583ca2016-11-19 20:26:25 -06002953 page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
Faisal Latif86dbcd02016-01-20 13:40:10 -06002954 if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2955 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2956 remote_access = true;
2957 else
2958 remote_access = false;
2959
2960 pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2961
2962 if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
2963 return I40IW_ERR_INVALID_PBLE_INDEX;
2964
2965 cqp = dev->cqp;
2966 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2967 if (!wqe)
2968 return I40IW_ERR_RING_FULL;
2969
2970 temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2971 set_64bit_val(wqe, 0, temp);
2972
2973 set_64bit_val(wqe,
2974 8,
2975 LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
2976 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2977
2978 set_64bit_val(wqe,
2979 16,
2980 LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
2981 LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2982 if (!info->chunk_size) {
2983 set_64bit_val(wqe, 32, info->reg_addr_pa);
2984 set_64bit_val(wqe, 48, 0);
2985 } else {
2986 set_64bit_val(wqe, 32, 0);
2987 set_64bit_val(wqe, 48, info->first_pm_pbl_index);
2988 }
2989 set_64bit_val(wqe, 40, info->hmc_fcn_index);
2990 set_64bit_val(wqe, 56, 0);
2991
2992 addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2993 header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
2994 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2995 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
Henry Orosco68583ca2016-11-19 20:26:25 -06002996 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
Faisal Latif86dbcd02016-01-20 13:40:10 -06002997 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2998 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2999 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3000 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
3001 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
3002 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3003
3004 i40iw_insert_wqe_hdr(wqe, header);
3005
3006 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
3007 wqe, I40IW_CQP_WQE_SIZE * 8);
3008
3009 if (post_sq)
3010 i40iw_sc_cqp_post_sq(cqp);
3011 return 0;
3012}
3013
3014/**
3015 * i40iw_sc_mr_reg_shared - registered shared memory region
3016 * @dev: sc device struct
3017 * @info: info for shared memory registeration
3018 * @scratch: u64 saved to be used during cqp completion
3019 * @post_sq: flag for cqp db to ring
3020 */
3021static enum i40iw_status_code i40iw_sc_mr_reg_shared(
3022 struct i40iw_sc_dev *dev,
3023 struct i40iw_register_shared_stag *info,
3024 u64 scratch,
3025 bool post_sq)
3026{
3027 u64 *wqe;
3028 struct i40iw_sc_cqp *cqp;
3029 u64 temp, va64, fbo, header;
3030 u32 va32;
3031 bool remote_access;
3032 u8 addr_type;
3033
3034 if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
3035 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
3036 remote_access = true;
3037 else
3038 remote_access = false;
3039 cqp = dev->cqp;
3040 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3041 if (!wqe)
3042 return I40IW_ERR_RING_FULL;
3043 va64 = (uintptr_t)(info->va);
3044 va32 = (u32)(va64 & 0x00000000FFFFFFFF);
3045 fbo = (u64)(va32 & (4096 - 1));
3046
3047 set_64bit_val(wqe,
3048 0,
3049 (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
3050
3051 set_64bit_val(wqe,
3052 8,
3053 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3054 temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
3055 LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
3056 LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
3057 set_64bit_val(wqe, 16, temp);
3058
3059 addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
3060 header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
3061 LS_64(1, I40IW_CQPSQ_STAG_MR) |
3062 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
3063 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3064 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3065 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3066
3067 i40iw_insert_wqe_hdr(wqe, header);
3068
3069 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
3070 wqe, I40IW_CQP_WQE_SIZE * 8);
3071
3072 if (post_sq)
3073 i40iw_sc_cqp_post_sq(cqp);
3074 return 0;
3075}
3076
3077/**
3078 * i40iw_sc_dealloc_stag - deallocate stag
3079 * @dev: sc device struct
3080 * @info: dealloc stag info
3081 * @scratch: u64 saved to be used during cqp completion
3082 * @post_sq: flag for cqp db to ring
3083 */
3084static enum i40iw_status_code i40iw_sc_dealloc_stag(
3085 struct i40iw_sc_dev *dev,
3086 struct i40iw_dealloc_stag_info *info,
3087 u64 scratch,
3088 bool post_sq)
3089{
3090 u64 header;
3091 u64 *wqe;
3092 struct i40iw_sc_cqp *cqp;
3093
3094 cqp = dev->cqp;
3095 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3096 if (!wqe)
3097 return I40IW_ERR_RING_FULL;
3098 set_64bit_val(wqe,
3099 8,
3100 LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3101 set_64bit_val(wqe,
3102 16,
3103 LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
3104
3105 header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3106 LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
3107 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3108
3109 i40iw_insert_wqe_hdr(wqe, header);
3110
3111 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
3112 wqe, I40IW_CQP_WQE_SIZE * 8);
3113
3114 if (post_sq)
3115 i40iw_sc_cqp_post_sq(cqp);
3116 return 0;
3117}
3118
3119/**
3120 * i40iw_sc_query_stag - query hardware for stag
3121 * @dev: sc device struct
3122 * @scratch: u64 saved to be used during cqp completion
3123 * @stag_index: stag index for query
3124 * @post_sq: flag for cqp db to ring
3125 */
3126static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
3127 u64 scratch,
3128 u32 stag_index,
3129 bool post_sq)
3130{
3131 u64 header;
3132 u64 *wqe;
3133 struct i40iw_sc_cqp *cqp;
3134
3135 cqp = dev->cqp;
3136 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3137 if (!wqe)
3138 return I40IW_ERR_RING_FULL;
3139 set_64bit_val(wqe,
3140 16,
3141 LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
3142
3143 header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
3144 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3145
3146 i40iw_insert_wqe_hdr(wqe, header);
3147
3148 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
3149 wqe, I40IW_CQP_WQE_SIZE * 8);
3150
3151 if (post_sq)
3152 i40iw_sc_cqp_post_sq(cqp);
3153 return 0;
3154}
3155
3156/**
3157 * i40iw_sc_mw_alloc - mw allocate
3158 * @dev: sc device struct
3159 * @scratch: u64 saved to be used during cqp completion
3160 * @mw_stag_index:stag index
3161 * @pd_id: pd is for this mw
3162 * @post_sq: flag for cqp db to ring
3163 */
3164static enum i40iw_status_code i40iw_sc_mw_alloc(
3165 struct i40iw_sc_dev *dev,
3166 u64 scratch,
3167 u32 mw_stag_index,
3168 u16 pd_id,
3169 bool post_sq)
3170{
3171 u64 header;
3172 struct i40iw_sc_cqp *cqp;
3173 u64 *wqe;
3174
3175 cqp = dev->cqp;
3176 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3177 if (!wqe)
3178 return I40IW_ERR_RING_FULL;
3179 set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
3180 set_64bit_val(wqe,
3181 16,
3182 LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
3183
3184 header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3185 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3186
3187 i40iw_insert_wqe_hdr(wqe, header);
3188
3189 i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
3190 wqe, I40IW_CQP_WQE_SIZE * 8);
3191
3192 if (post_sq)
3193 i40iw_sc_cqp_post_sq(cqp);
3194 return 0;
3195}
3196
3197/**
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003198 * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3199 * @qp: sc qp struct
3200 * @info: fast mr info
3201 * @post_sq: flag for cqp db to ring
3202 */
3203enum i40iw_status_code i40iw_sc_mr_fast_register(
3204 struct i40iw_sc_qp *qp,
3205 struct i40iw_fast_reg_stag_info *info,
3206 bool post_sq)
3207{
3208 u64 temp, header;
3209 u64 *wqe;
3210 u32 wqe_idx;
Henry Orosco68583ca2016-11-19 20:26:25 -06003211 enum i40iw_page_size page_size;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003212
Henry Orosco68583ca2016-11-19 20:26:25 -06003213 page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003214 wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
3215 0, info->wr_id);
3216 if (!wqe)
3217 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3218
3219 i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3220 __func__, info->wr_id, wqe_idx,
3221 &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
3222 temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3223 set_64bit_val(wqe, 0, temp);
3224
3225 temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
3226 set_64bit_val(wqe,
3227 8,
3228 LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
3229 LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
3230
3231 set_64bit_val(wqe,
3232 16,
3233 info->total_len |
3234 LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
3235
3236 header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
3237 LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
3238 LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
3239 LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
Henry Orosco68583ca2016-11-19 20:26:25 -06003240 LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
Ismail, Mustafab7aee852016-04-18 10:33:06 -05003241 LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
3242 LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
3243 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
3244 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
3245 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
3246 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3247
3248 i40iw_insert_wqe_hdr(wqe, header);
3249
3250 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
3251 wqe, I40IW_QP_WQE_MIN_SIZE);
3252
3253 if (post_sq)
3254 i40iw_qp_post_wr(&qp->qp_uk);
3255 return 0;
3256}
3257
3258/**
Faisal Latif86dbcd02016-01-20 13:40:10 -06003259 * i40iw_sc_send_lsmm - send last streaming mode message
3260 * @qp: sc qp struct
3261 * @lsmm_buf: buffer with lsmm message
3262 * @size: size of lsmm buffer
3263 * @stag: stag of lsmm buffer
3264 */
3265static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
3266 void *lsmm_buf,
3267 u32 size,
3268 i40iw_stag stag)
3269{
3270 u64 *wqe;
3271 u64 header;
3272 struct i40iw_qp_uk *qp_uk;
3273
3274 qp_uk = &qp->qp_uk;
3275 wqe = qp_uk->sq_base->elem;
3276
3277 set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3278
3279 set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3280
3281 set_64bit_val(wqe, 16, 0);
3282
3283 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3284 LS_64(1, I40IWQPSQ_STREAMMODE) |
3285 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3286 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3287
3288 i40iw_insert_wqe_hdr(wqe, header);
3289
3290 i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3291 wqe, I40IW_QP_WQE_MIN_SIZE);
3292}
3293
3294/**
3295 * i40iw_sc_send_lsmm_nostag - for privilege qp
3296 * @qp: sc qp struct
3297 * @lsmm_buf: buffer with lsmm message
3298 * @size: size of lsmm buffer
3299 */
3300static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3301 void *lsmm_buf,
3302 u32 size)
3303{
3304 u64 *wqe;
3305 u64 header;
3306 struct i40iw_qp_uk *qp_uk;
3307
3308 qp_uk = &qp->qp_uk;
3309 wqe = qp_uk->sq_base->elem;
3310
3311 set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3312
3313 set_64bit_val(wqe, 8, size);
3314
3315 set_64bit_val(wqe, 16, 0);
3316
3317 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3318 LS_64(1, I40IWQPSQ_STREAMMODE) |
3319 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3320 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3321
3322 i40iw_insert_wqe_hdr(wqe, header);
3323
3324 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3325 wqe, I40IW_QP_WQE_MIN_SIZE);
3326}
3327
3328/**
3329 * i40iw_sc_send_rtt - send last read0 or write0
3330 * @qp: sc qp struct
3331 * @read: Do read0 or write0
3332 */
3333static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3334{
3335 u64 *wqe;
3336 u64 header;
3337 struct i40iw_qp_uk *qp_uk;
3338
3339 qp_uk = &qp->qp_uk;
3340 wqe = qp_uk->sq_base->elem;
3341
3342 set_64bit_val(wqe, 0, 0);
3343 set_64bit_val(wqe, 8, 0);
3344 set_64bit_val(wqe, 16, 0);
3345 if (read) {
3346 header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3347 LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3348 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3349 set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3350 } else {
3351 header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3352 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3353 }
3354
3355 i40iw_insert_wqe_hdr(wqe, header);
3356
3357 i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3358 wqe, I40IW_QP_WQE_MIN_SIZE);
3359}
3360
3361/**
3362 * i40iw_sc_post_wqe0 - send wqe with opcode
3363 * @qp: sc qp struct
3364 * @opcode: opcode to use for wqe0
3365 */
3366static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3367{
3368 u64 *wqe;
3369 u64 header;
3370 struct i40iw_qp_uk *qp_uk;
3371
3372 qp_uk = &qp->qp_uk;
3373 wqe = qp_uk->sq_base->elem;
3374
3375 if (!wqe)
3376 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3377 switch (opcode) {
3378 case I40IWQP_OP_NOP:
3379 set_64bit_val(wqe, 0, 0);
3380 set_64bit_val(wqe, 8, 0);
3381 set_64bit_val(wqe, 16, 0);
3382 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3383 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3384
3385 i40iw_insert_wqe_hdr(wqe, header);
3386 break;
3387 case I40IWQP_OP_RDMA_SEND:
3388 set_64bit_val(wqe, 0, 0);
3389 set_64bit_val(wqe, 8, 0);
3390 set_64bit_val(wqe, 16, 0);
3391 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3392 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3393 LS_64(1, I40IWQPSQ_STREAMMODE) |
3394 LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3395
3396 i40iw_insert_wqe_hdr(wqe, header);
3397 break;
3398 default:
3399 i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3400 __func__);
3401 break;
3402 }
3403 return 0;
3404}
3405
3406/**
3407 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3408 * @dev : ptr to i40iw_dev struct
3409 * @hmc_fn_id: hmc function id
3410 */
3411enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3412{
3413 struct i40iw_hmc_info *hmc_info;
3414 struct i40iw_dma_mem query_fpm_mem;
3415 struct i40iw_virt_mem virt_mem;
3416 struct i40iw_vfdev *vf_dev = NULL;
3417 u32 mem_size;
3418 enum i40iw_status_code ret_code = 0;
3419 bool poll_registers = true;
3420 u16 iw_vf_idx;
3421 u8 wait_type;
3422
3423 if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3424 (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3425 return I40IW_ERR_INVALID_HMCFN_ID;
3426
3427 i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3428 dev->hmc_fn_id);
3429 if (hmc_fn_id == dev->hmc_fn_id) {
3430 hmc_info = dev->hmc_info;
3431 query_fpm_mem.pa = dev->fpm_query_buf_pa;
3432 query_fpm_mem.va = dev->fpm_query_buf;
3433 } else {
3434 vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3435 if (!vf_dev)
3436 return I40IW_ERR_INVALID_VF_ID;
3437
3438 hmc_info = &vf_dev->hmc_info;
3439 iw_vf_idx = vf_dev->iw_vf_idx;
3440 i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3441 hmc_info, hmc_info->hmc_obj);
3442 if (!vf_dev->fpm_query_buf) {
3443 if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3444 ret_code = i40iw_alloc_query_fpm_buf(dev,
3445 &dev->vf_fpm_query_buf[iw_vf_idx]);
3446 if (ret_code)
3447 return ret_code;
3448 }
3449 vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3450 vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3451 }
3452 query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3453 query_fpm_mem.va = vf_dev->fpm_query_buf;
3454 /**
3455 * It is HARDWARE specific:
3456 * this call is done by PF for VF and
3457 * i40iw_sc_query_fpm_values needs ccq poll
3458 * because PF ccq is already created.
3459 */
3460 poll_registers = false;
3461 }
3462
3463 hmc_info->hmc_fn_id = hmc_fn_id;
3464
3465 if (hmc_fn_id != dev->hmc_fn_id) {
3466 ret_code =
3467 i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3468 } else {
3469 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3470 (u8)I40IW_CQP_WAIT_POLL_CQ;
3471
3472 ret_code = i40iw_sc_query_fpm_values(
3473 dev->cqp,
3474 0,
3475 hmc_info->hmc_fn_id,
3476 &query_fpm_mem,
3477 true,
3478 wait_type);
3479 }
3480 if (ret_code)
3481 return ret_code;
3482
3483 /* parse the fpm_query_buf and fill hmc obj info */
3484 ret_code =
3485 i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3486 hmc_info,
3487 &dev->hmc_fpm_misc);
3488 if (ret_code)
3489 return ret_code;
3490 i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3491 query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3492
3493 if (hmc_fn_id != dev->hmc_fn_id) {
3494 i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3495
3496 /* parse the fpm_commit_buf and fill hmc obj info */
Ismail, Mustafafa415372016-04-18 10:33:08 -05003497 i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003498 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3499 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3500 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3501 if (ret_code)
3502 return ret_code;
3503 hmc_info->sd_table.sd_entry = virt_mem.va;
3504 }
3505
Faisal Latif86dbcd02016-01-20 13:40:10 -06003506 return ret_code;
3507}
3508
3509/**
3510 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3511 * populates fpm base address in hmc_info
3512 * @dev : ptr to i40iw_dev struct
3513 * @hmc_fn_id: hmc function id
3514 */
3515static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3516 u8 hmc_fn_id)
3517{
3518 struct i40iw_hmc_info *hmc_info;
3519 struct i40iw_hmc_obj_info *obj_info;
3520 u64 *buf;
3521 struct i40iw_dma_mem commit_fpm_mem;
3522 u32 i, j;
3523 enum i40iw_status_code ret_code = 0;
3524 bool poll_registers = true;
3525 u8 wait_type;
3526
3527 if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3528 (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3529 return I40IW_ERR_INVALID_HMCFN_ID;
3530
3531 if (hmc_fn_id == dev->hmc_fn_id) {
3532 hmc_info = dev->hmc_info;
3533 } else {
3534 hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3535 poll_registers = false;
3536 }
3537 if (!hmc_info)
3538 return I40IW_ERR_BAD_PTR;
3539
3540 obj_info = hmc_info->hmc_obj;
3541 buf = dev->fpm_commit_buf;
3542
3543 /* copy cnt values in commit buf */
3544 for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3545 i++, j += 8)
3546 set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3547
3548 set_64bit_val(buf, 40, 0); /* APBVT rsvd */
3549
3550 commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3551 commit_fpm_mem.va = dev->fpm_commit_buf;
3552 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3553 (u8)I40IW_CQP_WAIT_POLL_CQ;
3554 ret_code = i40iw_sc_commit_fpm_values(
3555 dev->cqp,
3556 0,
3557 hmc_info->hmc_fn_id,
3558 &commit_fpm_mem,
3559 true,
3560 wait_type);
3561
3562 /* parse the fpm_commit_buf and fill hmc obj info */
3563 if (!ret_code)
Ismail, Mustafafa415372016-04-18 10:33:08 -05003564 ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3565 hmc_info->hmc_obj,
3566 &hmc_info->sd_table.sd_cnt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003567
3568 i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3569 commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3570
3571 return ret_code;
3572}
3573
3574/**
3575 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3576 * @cqp: struct for cqp hw
3577 * @info; sd info for wqe
3578 * @scratch: u64 saved to be used during cqp completion
3579 */
3580static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3581 struct i40iw_update_sds_info *info,
3582 u64 scratch)
3583{
3584 u64 data;
3585 u64 header;
3586 u64 *wqe;
3587 int mem_entries, wqe_entries;
3588 struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3589
3590 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3591 if (!wqe)
3592 return I40IW_ERR_RING_FULL;
3593
3594 I40IW_CQP_INIT_WQE(wqe);
3595 wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3596 mem_entries = info->cnt - wqe_entries;
3597
3598 header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3599 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3600 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3601
3602 if (mem_entries) {
3603 memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
3604 data = sdbuf->pa;
3605 } else {
3606 data = 0;
3607 }
3608 data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3609
3610 set_64bit_val(wqe, 16, data);
3611
3612 switch (wqe_entries) {
3613 case 3:
3614 set_64bit_val(wqe, 48,
3615 (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3616 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3617
3618 set_64bit_val(wqe, 56, info->entry[2].data);
3619 /* fallthrough */
3620 case 2:
3621 set_64bit_val(wqe, 32,
3622 (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3623 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3624
3625 set_64bit_val(wqe, 40, info->entry[1].data);
3626 /* fallthrough */
3627 case 1:
3628 set_64bit_val(wqe, 0,
3629 LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3630
3631 set_64bit_val(wqe, 8, info->entry[0].data);
3632 break;
3633 default:
3634 break;
3635 }
3636
3637 i40iw_insert_wqe_hdr(wqe, header);
3638
3639 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3640 wqe, I40IW_CQP_WQE_SIZE * 8);
3641 return 0;
3642}
3643
3644/**
3645 * i40iw_update_pe_sds - cqp wqe for sd
3646 * @dev: ptr to i40iw_dev struct
3647 * @info: sd info for sd's
3648 * @scratch: u64 saved to be used during cqp completion
3649 */
3650static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3651 struct i40iw_update_sds_info *info,
3652 u64 scratch)
3653{
3654 struct i40iw_sc_cqp *cqp = dev->cqp;
3655 enum i40iw_status_code ret_code;
3656
3657 ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3658 if (!ret_code)
3659 i40iw_sc_cqp_post_sq(cqp);
3660
3661 return ret_code;
3662}
3663
3664/**
3665 * i40iw_update_sds_noccq - update sd before ccq created
3666 * @dev: sc device struct
3667 * @info: sd info for sd's
3668 */
3669enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3670 struct i40iw_update_sds_info *info)
3671{
3672 u32 error, val, tail;
3673 struct i40iw_sc_cqp *cqp = dev->cqp;
3674 enum i40iw_status_code ret_code;
3675
3676 ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3677 if (ret_code)
3678 return ret_code;
3679 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3680 if (error)
3681 return I40IW_ERR_CQP_COMPL_ERROR;
3682
3683 i40iw_sc_cqp_post_sq(cqp);
3684 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3685
3686 return ret_code;
3687}
3688
3689/**
3690 * i40iw_sc_suspend_qp - suspend qp for param change
3691 * @cqp: struct for cqp hw
3692 * @qp: sc qp struct
3693 * @scratch: u64 saved to be used during cqp completion
3694 */
3695enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3696 struct i40iw_sc_qp *qp,
3697 u64 scratch)
3698{
3699 u64 header;
3700 u64 *wqe;
3701
3702 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3703 if (!wqe)
3704 return I40IW_ERR_RING_FULL;
3705 header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3706 LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3707 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3708
3709 i40iw_insert_wqe_hdr(wqe, header);
3710
3711 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3712 wqe, I40IW_CQP_WQE_SIZE * 8);
3713
3714 i40iw_sc_cqp_post_sq(cqp);
3715 return 0;
3716}
3717
3718/**
3719 * i40iw_sc_resume_qp - resume qp after suspend
3720 * @cqp: struct for cqp hw
3721 * @qp: sc qp struct
3722 * @scratch: u64 saved to be used during cqp completion
3723 */
3724enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3725 struct i40iw_sc_qp *qp,
3726 u64 scratch)
3727{
3728 u64 header;
3729 u64 *wqe;
3730
3731 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3732 if (!wqe)
3733 return I40IW_ERR_RING_FULL;
3734 set_64bit_val(wqe,
3735 16,
3736 LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3737
3738 header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3739 LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3740 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3741
3742 i40iw_insert_wqe_hdr(wqe, header);
3743
3744 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3745 wqe, I40IW_CQP_WQE_SIZE * 8);
3746
3747 i40iw_sc_cqp_post_sq(cqp);
3748 return 0;
3749}
3750
3751/**
3752 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3753 * @cqp: struct for cqp hw
3754 * @scratch: u64 saved to be used during cqp completion
3755 * @hmc_fn_id: hmc function id
3756 * @post_sq: flag for cqp db to ring
3757 * @poll_registers: flag to poll register for cqp completion
3758 */
3759enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3760 struct i40iw_sc_cqp *cqp,
3761 u64 scratch,
3762 u8 hmc_fn_id,
3763 bool post_sq,
3764 bool poll_registers)
3765{
3766 u64 header;
3767 u64 *wqe;
3768 u32 tail, val, error;
3769 enum i40iw_status_code ret_code = 0;
3770
3771 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3772 if (!wqe)
3773 return I40IW_ERR_RING_FULL;
3774 set_64bit_val(wqe,
3775 16,
3776 LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3777
3778 header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3779 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3780
3781 i40iw_insert_wqe_hdr(wqe, header);
3782
3783 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3784 wqe, I40IW_CQP_WQE_SIZE * 8);
3785 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3786 if (error) {
3787 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3788 return ret_code;
3789 }
3790 if (post_sq) {
3791 i40iw_sc_cqp_post_sq(cqp);
3792 if (poll_registers)
3793 /* check for cqp sq tail update */
3794 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3795 else
3796 ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3797 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3798 NULL);
3799 }
3800
3801 return ret_code;
3802}
3803
3804/**
3805 * i40iw_ring_full - check if cqp ring is full
3806 * @cqp: struct for cqp hw
3807 */
3808static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3809{
3810 return I40IW_RING_FULL_ERR(cqp->sq_ring);
3811}
3812
3813/**
Ismail, Mustafafa415372016-04-18 10:33:08 -05003814 * i40iw_est_sd - returns approximate number of SDs for HMC
3815 * @dev: sc device struct
3816 * @hmc_info: hmc structure, size and count for HMC objects
3817 */
3818static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3819{
3820 int i;
3821 u64 size = 0;
3822 u64 sd;
3823
3824 for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3825 size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3826
3827 if (dev->is_pf)
3828 size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3829
3830 if (size & 0x1FFFFF)
3831 sd = (size >> 21) + 1; /* add 1 for remainder */
3832 else
3833 sd = size >> 21;
3834
3835 if (!dev->is_pf) {
3836 /* 2MB alignment for VF PBLE HMC */
3837 size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3838 if (size & 0x1FFFFF)
3839 sd += (size >> 21) + 1; /* add 1 for remainder */
3840 else
3841 sd += size >> 21;
3842 }
3843
3844 return sd;
3845}
3846
3847/**
Faisal Latif86dbcd02016-01-20 13:40:10 -06003848 * i40iw_config_fpm_values - configure HMC objects
3849 * @dev: sc device struct
3850 * @qp_count: desired qp count
3851 */
3852enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
3853{
3854 struct i40iw_virt_mem virt_mem;
3855 u32 i, mem_size;
3856 u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
3857 u32 powerof2;
Ismail, Mustafafa415372016-04-18 10:33:08 -05003858 u64 sd_needed;
Faisal Latif86dbcd02016-01-20 13:40:10 -06003859 u32 loop_count = 0;
3860
3861 struct i40iw_hmc_info *hmc_info;
3862 struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
3863 enum i40iw_status_code ret_code = 0;
3864
3865 hmc_info = dev->hmc_info;
3866 hmc_fpm_misc = &dev->hmc_fpm_misc;
3867
3868 ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
3869 if (ret_code) {
3870 i40iw_debug(dev, I40IW_DEBUG_HMC,
3871 "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3872 ret_code);
3873 return ret_code;
3874 }
3875
Ismail, Mustafafa415372016-04-18 10:33:08 -05003876 for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
Faisal Latif86dbcd02016-01-20 13:40:10 -06003877 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
Ismail, Mustafafa415372016-04-18 10:33:08 -05003878 sd_needed = i40iw_est_sd(dev, hmc_info);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003879 i40iw_debug(dev, I40IW_DEBUG_HMC,
3880 "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3881 __func__, sd_needed, hmc_info->first_sd_index);
3882 i40iw_debug(dev, I40IW_DEBUG_HMC,
Ismail, Mustafafa415372016-04-18 10:33:08 -05003883 "%s: sd count %d where max sd is %d\n",
3884 __func__, hmc_info->sd_table.sd_cnt,
Faisal Latif86dbcd02016-01-20 13:40:10 -06003885 hmc_fpm_misc->max_sds);
3886
3887 qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
3888 qpwantedoriginal = qpwanted;
3889 mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
3890 pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
3891
3892 i40iw_debug(dev, I40IW_DEBUG_HMC,
3893 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3894 qp_count, hmc_fpm_misc->max_sds,
3895 hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
3896 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
3897 hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
3898 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
3899
3900 do {
3901 ++loop_count;
3902 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
3903 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
3904 min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
3905 hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
3906 hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
3907 qpwanted * hmc_fpm_misc->ht_multiplier;
3908 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
3909 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
3910 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
3911 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
3912
3913 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
3914 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
3915 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
3916 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
3917 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
3918 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
3919 hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
3920 ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
3921 hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
3922 hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
3923 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
3924
3925 /* How much memory is needed for all the objects. */
Ismail, Mustafafa415372016-04-18 10:33:08 -05003926 sd_needed = i40iw_est_sd(dev, hmc_info);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003927 if ((loop_count > 1000) ||
3928 ((!(loop_count % 10)) &&
3929 (qpwanted > qpwantedoriginal * 2 / 3))) {
3930 if (qpwanted > FPM_MULTIPLIER) {
3931 qpwanted -= FPM_MULTIPLIER;
3932 powerof2 = 1;
3933 while (powerof2 < qpwanted)
3934 powerof2 *= 2;
3935 powerof2 /= 2;
3936 qpwanted = powerof2;
3937 } else {
3938 qpwanted /= 2;
3939 }
3940 }
3941 if (mrwanted > FPM_MULTIPLIER * 10)
3942 mrwanted -= FPM_MULTIPLIER * 10;
3943 if (pblewanted > FPM_MULTIPLIER * 1000)
3944 pblewanted -= FPM_MULTIPLIER * 1000;
3945 } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
3946
Ismail, Mustafafa415372016-04-18 10:33:08 -05003947 sd_needed = i40iw_est_sd(dev, hmc_info);
Faisal Latif86dbcd02016-01-20 13:40:10 -06003948
3949 i40iw_debug(dev, I40IW_DEBUG_HMC,
3950 "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3951 loop_count, sd_needed,
3952 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
3953 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
3954 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
3955 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
3956
3957 ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
3958 if (ret_code) {
3959 i40iw_debug(dev, I40IW_DEBUG_HMC,
3960 "configure_iw_fpm returned error_code[x%08X]\n",
3961 i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
3962 return ret_code;
3963 }
3964
Faisal Latif86dbcd02016-01-20 13:40:10 -06003965 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3966 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
3967 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3968 if (ret_code) {
3969 i40iw_debug(dev, I40IW_DEBUG_HMC,
3970 "%s: failed to allocate memory for sd_entry buffer\n",
3971 __func__);
3972 return ret_code;
3973 }
3974 hmc_info->sd_table.sd_entry = virt_mem.va;
3975
3976 return ret_code;
3977}
3978
3979/**
3980 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3981 * @dev: rdma device
3982 * @pcmdinfo: cqp command info
3983 */
3984static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
3985 struct cqp_commands_info *pcmdinfo)
3986{
3987 enum i40iw_status_code status;
3988 struct i40iw_dma_mem values_mem;
3989
3990 dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
3991 switch (pcmdinfo->cqp_cmd) {
3992 case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
3993 status = i40iw_sc_del_local_mac_ipaddr_entry(
3994 pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
3995 pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
3996 pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
3997 pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
3998 pcmdinfo->post_sq);
3999 break;
4000 case OP_CEQ_DESTROY:
4001 status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
4002 pcmdinfo->in.u.ceq_destroy.scratch,
4003 pcmdinfo->post_sq);
4004 break;
4005 case OP_AEQ_DESTROY:
4006 status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
4007 pcmdinfo->in.u.aeq_destroy.scratch,
4008 pcmdinfo->post_sq);
4009
4010 break;
4011 case OP_DELETE_ARP_CACHE_ENTRY:
4012 status = i40iw_sc_del_arp_cache_entry(
4013 pcmdinfo->in.u.del_arp_cache_entry.cqp,
4014 pcmdinfo->in.u.del_arp_cache_entry.scratch,
4015 pcmdinfo->in.u.del_arp_cache_entry.arp_index,
4016 pcmdinfo->post_sq);
4017 break;
4018 case OP_MANAGE_APBVT_ENTRY:
4019 status = i40iw_sc_manage_apbvt_entry(
4020 pcmdinfo->in.u.manage_apbvt_entry.cqp,
4021 &pcmdinfo->in.u.manage_apbvt_entry.info,
4022 pcmdinfo->in.u.manage_apbvt_entry.scratch,
4023 pcmdinfo->post_sq);
4024 break;
4025 case OP_CEQ_CREATE:
4026 status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
4027 pcmdinfo->in.u.ceq_create.scratch,
4028 pcmdinfo->post_sq);
4029 break;
4030 case OP_AEQ_CREATE:
4031 status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
4032 pcmdinfo->in.u.aeq_create.scratch,
4033 pcmdinfo->post_sq);
4034 break;
4035 case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
4036 status = i40iw_sc_alloc_local_mac_ipaddr_entry(
4037 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
4038 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
4039 pcmdinfo->post_sq);
4040 break;
4041 case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
4042 status = i40iw_sc_add_local_mac_ipaddr_entry(
4043 pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
4044 &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
4045 pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
4046 pcmdinfo->post_sq);
4047 break;
4048 case OP_MANAGE_QHASH_TABLE_ENTRY:
4049 status = i40iw_sc_manage_qhash_table_entry(
4050 pcmdinfo->in.u.manage_qhash_table_entry.cqp,
4051 &pcmdinfo->in.u.manage_qhash_table_entry.info,
4052 pcmdinfo->in.u.manage_qhash_table_entry.scratch,
4053 pcmdinfo->post_sq);
4054
4055 break;
4056 case OP_QP_MODIFY:
4057 status = i40iw_sc_qp_modify(
4058 pcmdinfo->in.u.qp_modify.qp,
4059 &pcmdinfo->in.u.qp_modify.info,
4060 pcmdinfo->in.u.qp_modify.scratch,
4061 pcmdinfo->post_sq);
4062
4063 break;
4064 case OP_QP_UPLOAD_CONTEXT:
4065 status = i40iw_sc_qp_upload_context(
4066 pcmdinfo->in.u.qp_upload_context.dev,
4067 &pcmdinfo->in.u.qp_upload_context.info,
4068 pcmdinfo->in.u.qp_upload_context.scratch,
4069 pcmdinfo->post_sq);
4070
4071 break;
4072 case OP_CQ_CREATE:
4073 status = i40iw_sc_cq_create(
4074 pcmdinfo->in.u.cq_create.cq,
4075 pcmdinfo->in.u.cq_create.scratch,
4076 pcmdinfo->in.u.cq_create.check_overflow,
4077 pcmdinfo->post_sq);
4078 break;
4079 case OP_CQ_DESTROY:
4080 status = i40iw_sc_cq_destroy(
4081 pcmdinfo->in.u.cq_destroy.cq,
4082 pcmdinfo->in.u.cq_destroy.scratch,
4083 pcmdinfo->post_sq);
4084
4085 break;
4086 case OP_QP_CREATE:
4087 status = i40iw_sc_qp_create(
4088 pcmdinfo->in.u.qp_create.qp,
4089 &pcmdinfo->in.u.qp_create.info,
4090 pcmdinfo->in.u.qp_create.scratch,
4091 pcmdinfo->post_sq);
4092 break;
4093 case OP_QP_DESTROY:
4094 status = i40iw_sc_qp_destroy(
4095 pcmdinfo->in.u.qp_destroy.qp,
4096 pcmdinfo->in.u.qp_destroy.scratch,
4097 pcmdinfo->in.u.qp_destroy.remove_hash_idx,
4098 pcmdinfo->in.u.qp_destroy.
4099 ignore_mw_bnd,
4100 pcmdinfo->post_sq);
4101
4102 break;
4103 case OP_ALLOC_STAG:
4104 status = i40iw_sc_alloc_stag(
4105 pcmdinfo->in.u.alloc_stag.dev,
4106 &pcmdinfo->in.u.alloc_stag.info,
4107 pcmdinfo->in.u.alloc_stag.scratch,
4108 pcmdinfo->post_sq);
4109 break;
4110 case OP_MR_REG_NON_SHARED:
4111 status = i40iw_sc_mr_reg_non_shared(
4112 pcmdinfo->in.u.mr_reg_non_shared.dev,
4113 &pcmdinfo->in.u.mr_reg_non_shared.info,
4114 pcmdinfo->in.u.mr_reg_non_shared.scratch,
4115 pcmdinfo->post_sq);
4116
4117 break;
4118 case OP_DEALLOC_STAG:
4119 status = i40iw_sc_dealloc_stag(
4120 pcmdinfo->in.u.dealloc_stag.dev,
4121 &pcmdinfo->in.u.dealloc_stag.info,
4122 pcmdinfo->in.u.dealloc_stag.scratch,
4123 pcmdinfo->post_sq);
4124
4125 break;
4126 case OP_MW_ALLOC:
4127 status = i40iw_sc_mw_alloc(
4128 pcmdinfo->in.u.mw_alloc.dev,
4129 pcmdinfo->in.u.mw_alloc.scratch,
4130 pcmdinfo->in.u.mw_alloc.mw_stag_index,
4131 pcmdinfo->in.u.mw_alloc.pd_id,
4132 pcmdinfo->post_sq);
4133
4134 break;
4135 case OP_QP_FLUSH_WQES:
4136 status = i40iw_sc_qp_flush_wqes(
4137 pcmdinfo->in.u.qp_flush_wqes.qp,
4138 &pcmdinfo->in.u.qp_flush_wqes.info,
4139 pcmdinfo->in.u.qp_flush_wqes.
4140 scratch, pcmdinfo->post_sq);
4141 break;
4142 case OP_ADD_ARP_CACHE_ENTRY:
4143 status = i40iw_sc_add_arp_cache_entry(
4144 pcmdinfo->in.u.add_arp_cache_entry.cqp,
4145 &pcmdinfo->in.u.add_arp_cache_entry.info,
4146 pcmdinfo->in.u.add_arp_cache_entry.scratch,
4147 pcmdinfo->post_sq);
4148 break;
4149 case OP_MANAGE_PUSH_PAGE:
4150 status = i40iw_sc_manage_push_page(
4151 pcmdinfo->in.u.manage_push_page.cqp,
4152 &pcmdinfo->in.u.manage_push_page.info,
4153 pcmdinfo->in.u.manage_push_page.scratch,
4154 pcmdinfo->post_sq);
4155 break;
4156 case OP_UPDATE_PE_SDS:
4157 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
4158 status = i40iw_update_pe_sds(
4159 pcmdinfo->in.u.update_pe_sds.dev,
4160 &pcmdinfo->in.u.update_pe_sds.info,
4161 pcmdinfo->in.u.update_pe_sds.
4162 scratch);
4163
4164 break;
4165 case OP_MANAGE_HMC_PM_FUNC_TABLE:
4166 status = i40iw_sc_manage_hmc_pm_func_table(
4167 pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4168 pcmdinfo->in.u.manage_hmc_pm.scratch,
4169 (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
4170 pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
4171 true);
4172 break;
4173 case OP_SUSPEND:
4174 status = i40iw_sc_suspend_qp(
4175 pcmdinfo->in.u.suspend_resume.cqp,
4176 pcmdinfo->in.u.suspend_resume.qp,
4177 pcmdinfo->in.u.suspend_resume.scratch);
4178 break;
4179 case OP_RESUME:
4180 status = i40iw_sc_resume_qp(
4181 pcmdinfo->in.u.suspend_resume.cqp,
4182 pcmdinfo->in.u.suspend_resume.qp,
4183 pcmdinfo->in.u.suspend_resume.scratch);
4184 break;
4185 case OP_MANAGE_VF_PBLE_BP:
4186 status = i40iw_manage_vf_pble_bp(
4187 pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4188 &pcmdinfo->in.u.manage_vf_pble_bp.info,
4189 pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
4190 break;
4191 case OP_QUERY_FPM_VALUES:
4192 values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
4193 values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
4194 status = i40iw_sc_query_fpm_values(
4195 pcmdinfo->in.u.query_fpm_values.cqp,
4196 pcmdinfo->in.u.query_fpm_values.scratch,
4197 pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
4198 &values_mem, true, I40IW_CQP_WAIT_EVENT);
4199 break;
4200 case OP_COMMIT_FPM_VALUES:
4201 values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
4202 values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
4203 status = i40iw_sc_commit_fpm_values(
4204 pcmdinfo->in.u.commit_fpm_values.cqp,
4205 pcmdinfo->in.u.commit_fpm_values.scratch,
4206 pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
4207 &values_mem,
4208 true,
4209 I40IW_CQP_WAIT_EVENT);
4210 break;
4211 default:
4212 status = I40IW_NOT_SUPPORTED;
4213 break;
4214 }
4215
4216 return status;
4217}
4218
4219/**
4220 * i40iw_process_cqp_cmd - process all cqp commands
4221 * @dev: sc device struct
4222 * @pcmdinfo: cqp command info
4223 */
4224enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
4225 struct cqp_commands_info *pcmdinfo)
4226{
4227 enum i40iw_status_code status = 0;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05004228 unsigned long flags;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004229
4230 spin_lock_irqsave(&dev->cqp_lock, flags);
4231 if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4232 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4233 else
4234 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
4235 spin_unlock_irqrestore(&dev->cqp_lock, flags);
4236 return status;
4237}
4238
4239/**
4240 * i40iw_process_bh - called from tasklet for cqp list
4241 * @dev: sc device struct
4242 */
4243enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
4244{
4245 enum i40iw_status_code status = 0;
4246 struct cqp_commands_info *pcmdinfo;
Henry Orosco0fc2dc52016-10-10 21:12:10 -05004247 unsigned long flags;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004248
4249 spin_lock_irqsave(&dev->cqp_lock, flags);
4250 while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
4251 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
4252
4253 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4254 if (status)
4255 break;
4256 }
4257 spin_unlock_irqrestore(&dev->cqp_lock, flags);
4258 return status;
4259}
4260
4261/**
4262 * i40iw_iwarp_opcode - determine if incoming is rdma layer
4263 * @info: aeq info for the packet
4264 * @pkt: packet for error
4265 */
4266static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4267{
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004268 __be16 *mpa;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004269 u32 opcode = 0xffffffff;
4270
4271 if (info->q2_data_written) {
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004272 mpa = (__be16 *)pkt;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004273 opcode = ntohs(mpa[1]) & 0xf;
4274 }
4275 return opcode;
4276}
4277
4278/**
4279 * i40iw_locate_mpa - return pointer to mpa in the pkt
4280 * @pkt: packet with data
4281 */
4282static u8 *i40iw_locate_mpa(u8 *pkt)
4283{
4284 /* skip over ethernet header */
4285 pkt += I40IW_MAC_HLEN;
4286
4287 /* Skip over IP and TCP headers */
4288 pkt += 4 * (pkt[0] & 0x0f);
4289 pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4290 return pkt;
4291}
4292
4293/**
4294 * i40iw_setup_termhdr - termhdr for terminate pkt
4295 * @qp: sc qp ptr for pkt
4296 * @hdr: term hdr
4297 * @opcode: flush opcode for termhdr
4298 * @layer_etype: error layer + error type
4299 * @err: error cod ein the header
4300 */
4301static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4302 struct i40iw_terminate_hdr *hdr,
4303 enum i40iw_flush_opcode opcode,
4304 u8 layer_etype,
4305 u8 err)
4306{
4307 qp->flush_code = opcode;
4308 hdr->layer_etype = layer_etype;
4309 hdr->error_code = err;
4310}
4311
4312/**
4313 * i40iw_bld_terminate_hdr - build terminate message header
4314 * @qp: qp associated with received terminate AE
4315 * @info: the struct contiaing AE information
4316 */
4317static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4318 struct i40iw_aeqe_info *info)
4319{
4320 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4321 u16 ddp_seg_len;
4322 int copy_len = 0;
4323 u8 is_tagged = 0;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004324 u32 opcode;
4325 struct i40iw_terminate_hdr *termhdr;
4326
4327 termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4328 memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4329
4330 if (info->q2_data_written) {
4331 /* Use data from offending packet to fill in ddp & rdma hdrs */
4332 pkt = i40iw_locate_mpa(pkt);
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004333 ddp_seg_len = ntohs(*(__be16 *)pkt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004334 if (ddp_seg_len) {
4335 copy_len = 2;
4336 termhdr->hdrct = DDP_LEN_FLAG;
4337 if (pkt[2] & 0x80) {
4338 is_tagged = 1;
4339 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4340 copy_len += TERM_DDP_LEN_TAGGED;
4341 termhdr->hdrct |= DDP_HDR_FLAG;
4342 }
4343 } else {
4344 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4345 copy_len += TERM_DDP_LEN_UNTAGGED;
4346 termhdr->hdrct |= DDP_HDR_FLAG;
4347 }
4348
4349 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4350 if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4351 copy_len += TERM_RDMA_LEN;
4352 termhdr->hdrct |= RDMA_HDR_FLAG;
4353 }
4354 }
4355 }
4356 }
4357 }
4358
4359 opcode = i40iw_iwarp_opcode(info, pkt);
4360
4361 switch (info->ae_id) {
4362 case I40IW_AE_AMP_UNALLOCATED_STAG:
4363 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4364 if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4365 i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4366 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4367 else
4368 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4369 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4370 break;
4371 case I40IW_AE_AMP_BOUNDS_VIOLATION:
4372 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4373 if (info->q2_data_written)
4374 i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4375 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4376 else
4377 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4378 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4379 break;
4380 case I40IW_AE_AMP_BAD_PD:
4381 switch (opcode) {
4382 case I40IW_OP_TYPE_RDMA_WRITE:
4383 i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4384 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4385 break;
4386 case I40IW_OP_TYPE_SEND_INV:
4387 case I40IW_OP_TYPE_SEND_SOL_INV:
4388 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4389 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4390 break;
4391 default:
4392 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4393 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4394 }
4395 break;
4396 case I40IW_AE_AMP_INVALID_STAG:
4397 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4398 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4399 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4400 break;
4401 case I40IW_AE_AMP_BAD_QP:
4402 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4403 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4404 break;
4405 case I40IW_AE_AMP_BAD_STAG_KEY:
4406 case I40IW_AE_AMP_BAD_STAG_INDEX:
4407 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4408 switch (opcode) {
4409 case I40IW_OP_TYPE_SEND_INV:
4410 case I40IW_OP_TYPE_SEND_SOL_INV:
4411 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4412 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4413 break;
4414 default:
4415 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4416 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4417 }
4418 break;
4419 case I40IW_AE_AMP_RIGHTS_VIOLATION:
4420 case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4421 case I40IW_AE_PRIV_OPERATION_DENIED:
4422 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4423 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4424 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4425 break;
4426 case I40IW_AE_AMP_TO_WRAP:
4427 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4428 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4429 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4430 break;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004431 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4432 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4433 (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4434 break;
4435 case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4436 case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4437 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4438 (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4439 break;
4440 case I40IW_AE_LCE_QP_CATASTROPHIC:
4441 case I40IW_AE_DDP_NO_L_BIT:
4442 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4443 (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4444 break;
4445 case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
Faisal Latif86dbcd02016-01-20 13:40:10 -06004446 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4447 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4448 break;
4449 case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4450 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4451 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4452 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4453 break;
4454 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4455 if (is_tagged)
4456 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4457 (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4458 else
4459 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4460 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4461 break;
4462 case I40IW_AE_DDP_UBE_INVALID_MO:
4463 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4464 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4465 break;
4466 case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4467 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4468 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4469 break;
4470 case I40IW_AE_DDP_UBE_INVALID_QN:
4471 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4472 (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4473 break;
4474 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4475 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4476 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4477 break;
4478 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4479 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4480 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4481 break;
4482 default:
4483 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4484 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4485 break;
4486 }
4487
4488 if (copy_len)
4489 memcpy(termhdr + 1, pkt, copy_len);
4490
Faisal Latif86dbcd02016-01-20 13:40:10 -06004491 return sizeof(struct i40iw_terminate_hdr) + copy_len;
4492}
4493
4494/**
4495 * i40iw_terminate_send_fin() - Send fin for terminate message
4496 * @qp: qp associated with received terminate AE
4497 */
4498void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4499{
4500 /* Send the fin only */
4501 i40iw_term_modify_qp(qp,
4502 I40IW_QP_STATE_TERMINATE,
4503 I40IWQP_TERM_SEND_FIN_ONLY,
4504 0);
4505}
4506
4507/**
4508 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4509 * @qp: qp associated with received terminate AE
4510 * @info: the struct contiaing AE information
4511 */
4512void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4513{
4514 u8 termlen = 0;
4515
4516 if (qp->term_flags & I40IW_TERM_SENT)
4517 return; /* Sanity check */
4518
4519 /* Eventtype can change from bld_terminate_hdr */
4520 qp->eventtype = TERM_EVENT_QP_FATAL;
4521 termlen = i40iw_bld_terminate_hdr(qp, info);
4522 i40iw_terminate_start_timer(qp);
4523 qp->term_flags |= I40IW_TERM_SENT;
4524 i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4525 I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4526}
4527
4528/**
4529 * i40iw_terminate_received - handle terminate received AE
4530 * @qp: qp associated with received terminate AE
4531 * @info: the struct contiaing AE information
4532 */
4533void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4534{
4535 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004536 __be32 *mpa;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004537 u8 ddp_ctl;
4538 u8 rdma_ctl;
4539 u16 aeq_id = 0;
4540 struct i40iw_terminate_hdr *termhdr;
4541
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05004542 mpa = (__be32 *)i40iw_locate_mpa(pkt);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004543 if (info->q2_data_written) {
4544 /* did not validate the frame - do it now */
4545 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4546 rdma_ctl = ntohl(mpa[0]) & 0xff;
4547 if ((ddp_ctl & 0xc0) != 0x40)
4548 aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4549 else if ((ddp_ctl & 0x03) != 1)
4550 aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4551 else if (ntohl(mpa[2]) != 2)
4552 aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4553 else if (ntohl(mpa[3]) != 1)
4554 aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4555 else if (ntohl(mpa[4]) != 0)
4556 aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4557 else if ((rdma_ctl & 0xc0) != 0x40)
4558 aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4559
4560 info->ae_id = aeq_id;
4561 if (info->ae_id) {
4562 /* Bad terminate recvd - send back a terminate */
4563 i40iw_terminate_connection(qp, info);
4564 return;
4565 }
4566 }
4567
4568 qp->term_flags |= I40IW_TERM_RCVD;
4569 qp->eventtype = TERM_EVENT_QP_FATAL;
4570 termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4571 if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4572 termhdr->layer_etype == RDMAP_REMOTE_OP) {
4573 i40iw_terminate_done(qp, 0);
4574 } else {
4575 i40iw_terminate_start_timer(qp);
4576 i40iw_terminate_send_fin(qp);
4577 }
4578}
4579
4580/**
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004581 * i40iw_sc_vsi_init - Initialize virtual device
4582 * @vsi: pointer to the vsi structure
4583 * @info: parameters to initialize vsi
4584 **/
4585void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
4586{
4587 int i;
4588
4589 vsi->dev = info->dev;
4590 vsi->back_vsi = info->back_vsi;
Shiraz Saleem343d86b2017-10-16 15:45:59 -05004591 vsi->mtu = info->params->mtu;
Mustafa Ismail66f49f82017-10-16 15:45:57 -05004592 vsi->exception_lan_queue = info->exception_lan_queue;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004593 i40iw_fill_qos_list(info->params->qs_handle_list);
4594
4595 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
Dan Carpenter820cd302017-01-09 23:12:16 +03004596 vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
4597 i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
4598 vsi->qos[i].qs_handle);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004599 spin_lock_init(&vsi->qos[i].lock);
4600 INIT_LIST_HEAD(&vsi->qos[i].qplist);
4601 }
4602}
4603
4604/**
4605 * i40iw_hw_stats_init - Initiliaze HW stats table
4606 * @stats: pestat struct
Faisal Latif86dbcd02016-01-20 13:40:10 -06004607 * @fcn_idx: PCI fn id
Faisal Latif86dbcd02016-01-20 13:40:10 -06004608 * @is_pf: Is it a PF?
4609 *
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004610 * Populate the HW stats table with register offset addr for each
4611 * stats. And start the perioidic stats timer.
Faisal Latif86dbcd02016-01-20 13:40:10 -06004612 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004613void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
Faisal Latif86dbcd02016-01-20 13:40:10 -06004614{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004615 u32 stats_reg_offset;
4616 u32 stats_index;
4617 struct i40iw_dev_hw_stats_offsets *stats_table =
4618 &stats->hw_stats_offsets;
4619 struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004620
4621 if (is_pf) {
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004622 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004623 I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004624 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004625 I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004626 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004627 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004628 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004629 I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004630 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004631 I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004632 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004633 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004634 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004635 I40E_GLPES_PFTCPRTXSEG(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004636 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004637 I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004638 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004639 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4640
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004641 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004642 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004643 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004644 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004645 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004646 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004647 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004648 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004649 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004650 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004651 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004652 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004653 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004654 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004655 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004656 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004657 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004658 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004659 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004660 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004661 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004662 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004663 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004664 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004665 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004666 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004667 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004668 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004669 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004670 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004671 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004672 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004673 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004674 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004675 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004676 I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004677 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004678 I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004679 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004680 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004681 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004682 I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004683 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004684 I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004685 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004686 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004687 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004688 I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004689 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004690 I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004691 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004692 I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4693 } else {
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004694 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004695 I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004696 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004697 I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004698 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004699 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004700 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004701 I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004702 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004703 I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004704 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004705 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004706 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004707 I40E_GLPES_VFTCPRTXSEG(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004708 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004709 I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004710 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004711 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4712
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004713 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004714 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004715 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004716 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004717 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004718 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004719 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004720 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004721 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004722 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004723 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004724 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004725 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004726 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004727 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004728 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004729 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004730 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004731 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004732 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004733 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004734 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004735 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004736 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004737 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004738 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004739 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004740 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004741 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004742 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004743 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004744 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004745 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004746 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004747 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004748 I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004749 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004750 I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004751 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004752 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004753 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004754 I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004755 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004756 I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004757 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004758 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004759 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004760 I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004761 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004762 I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004763 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
Faisal Latif86dbcd02016-01-20 13:40:10 -06004764 I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4765 }
4766
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004767 for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4768 stats_index++) {
4769 stats_reg_offset = stats_table->stats_offset_64[stats_index];
4770 last_rd_stats->stats_value_64[stats_index] =
4771 readq(stats->hw->hw_addr + stats_reg_offset);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004772 }
4773
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004774 for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4775 stats_index++) {
4776 stats_reg_offset = stats_table->stats_offset_32[stats_index];
4777 last_rd_stats->stats_value_32[stats_index] =
4778 i40iw_rd32(stats->hw, stats_reg_offset);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004779 }
4780}
4781
4782/**
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004783 * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
4784 * @stat: pestat struct
4785 * @index: index in HW stats table which contains offset reg-addr
4786 * @value: hw stats value
Faisal Latif86dbcd02016-01-20 13:40:10 -06004787 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004788void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
4789 enum i40iw_hw_stats_index_32b index,
4790 u64 *value)
Faisal Latif86dbcd02016-01-20 13:40:10 -06004791{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004792 struct i40iw_dev_hw_stats_offsets *stats_table =
4793 &stats->hw_stats_offsets;
4794 struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4795 struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4796 u64 new_stats_value = 0;
4797 u32 stats_reg_offset = stats_table->stats_offset_32[index];
Faisal Latif86dbcd02016-01-20 13:40:10 -06004798
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004799 new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004800 /*roll-over case */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004801 if (new_stats_value < last_rd_stats->stats_value_32[index])
4802 hw_stats->stats_value_32[index] += new_stats_value;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004803 else
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004804 hw_stats->stats_value_32[index] +=
4805 new_stats_value - last_rd_stats->stats_value_32[index];
4806 last_rd_stats->stats_value_32[index] = new_stats_value;
4807 *value = hw_stats->stats_value_32[index];
Faisal Latif86dbcd02016-01-20 13:40:10 -06004808}
4809
4810/**
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004811 * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
4812 * @stats: pestat struct
4813 * @index: index in HW stats table which contains offset reg-addr
4814 * @value: hw stats value
Faisal Latif86dbcd02016-01-20 13:40:10 -06004815 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004816void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
4817 enum i40iw_hw_stats_index_64b index,
4818 u64 *value)
Faisal Latif86dbcd02016-01-20 13:40:10 -06004819{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004820 struct i40iw_dev_hw_stats_offsets *stats_table =
4821 &stats->hw_stats_offsets;
4822 struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4823 struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4824 u64 new_stats_value = 0;
4825 u32 stats_reg_offset = stats_table->stats_offset_64[index];
Faisal Latif86dbcd02016-01-20 13:40:10 -06004826
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004827 new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004828 /*roll-over case */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004829 if (new_stats_value < last_rd_stats->stats_value_64[index])
4830 hw_stats->stats_value_64[index] += new_stats_value;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004831 else
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004832 hw_stats->stats_value_64[index] +=
4833 new_stats_value - last_rd_stats->stats_value_64[index];
4834 last_rd_stats->stats_value_64[index] = new_stats_value;
4835 *value = hw_stats->stats_value_64[index];
Faisal Latif86dbcd02016-01-20 13:40:10 -06004836}
4837
4838/**
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004839 * i40iw_hw_stats_read_all - read all HW stat counters
4840 * @stats: pestat struct
4841 * @stats_values: hw stats structure
Faisal Latif86dbcd02016-01-20 13:40:10 -06004842 *
4843 * Read all the HW stat counters and populates hw_stats structure
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004844 * of passed-in vsi's pestat as well as copy created in stat_values.
Faisal Latif86dbcd02016-01-20 13:40:10 -06004845 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004846void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
4847 struct i40iw_dev_hw_stats *stats_values)
Faisal Latif86dbcd02016-01-20 13:40:10 -06004848{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004849 u32 stats_index;
4850 unsigned long flags;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004851
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004852 spin_lock_irqsave(&stats->lock, flags);
4853
4854 for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4855 stats_index++)
4856 i40iw_hw_stats_read_32(stats, stats_index,
4857 &stats_values->stats_value_32[stats_index]);
4858 for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4859 stats_index++)
4860 i40iw_hw_stats_read_64(stats, stats_index,
4861 &stats_values->stats_value_64[stats_index]);
4862 spin_unlock_irqrestore(&stats->lock, flags);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004863}
4864
4865/**
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004866 * i40iw_hw_stats_refresh_all - Update all HW stats structs
4867 * @stats: pestat struct
Faisal Latif86dbcd02016-01-20 13:40:10 -06004868 *
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004869 * Read all the HW stats counters to refresh values in hw_stats structure
Faisal Latif86dbcd02016-01-20 13:40:10 -06004870 * of passed-in dev's pestat
4871 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004872void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
Faisal Latif86dbcd02016-01-20 13:40:10 -06004873{
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004874 u64 stats_value;
4875 u32 stats_index;
4876 unsigned long flags;
Faisal Latif86dbcd02016-01-20 13:40:10 -06004877
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004878 spin_lock_irqsave(&stats->lock, flags);
4879
4880 for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4881 stats_index++)
4882 i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
4883 for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4884 stats_index++)
4885 i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
4886 spin_unlock_irqrestore(&stats->lock, flags);
4887}
4888
4889/**
4890 * i40iw_get_fcn_id - Return the function id
4891 * @dev: pointer to the device
4892 */
4893static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
4894{
4895 u8 fcn_id = I40IW_INVALID_FCN_ID;
4896 u8 i;
4897
4898 for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
4899 if (!dev->fcn_id_array[i]) {
4900 fcn_id = i;
4901 dev->fcn_id_array[i] = true;
4902 break;
4903 }
4904 return fcn_id;
4905}
4906
4907/**
4908 * i40iw_vsi_stats_init - Initialize the vsi statistics
4909 * @vsi: pointer to the vsi structure
4910 * @info: The info structure used for initialization
4911 */
4912enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
4913{
4914 u8 fcn_id = info->fcn_id;
4915
4916 if (info->alloc_fcn_id)
4917 fcn_id = i40iw_get_fcn_id(vsi->dev);
4918
4919 if (fcn_id == I40IW_INVALID_FCN_ID)
4920 return I40IW_ERR_NOT_READY;
4921
4922 vsi->pestat = info->pestat;
4923 vsi->pestat->hw = vsi->dev->hw;
Kees Cook605cbb22017-10-04 17:45:41 -07004924 vsi->pestat->vsi = vsi;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004925
4926 if (info->stats_initialize) {
4927 i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
4928 spin_lock_init(&vsi->pestat->lock);
4929 i40iw_hw_stats_start_timer(vsi);
4930 }
4931 vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
4932 vsi->fcn_id = fcn_id;
4933 return I40IW_SUCCESS;
4934}
4935
4936/**
4937 * i40iw_vsi_stats_free - Free the vsi stats
4938 * @vsi: pointer to the vsi structure
4939 */
4940void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
4941{
4942 u8 fcn_id = vsi->fcn_id;
4943
Christopher N Bednarzaa939c12017-08-08 20:38:48 -05004944 if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06004945 vsi->dev->fcn_id_array[fcn_id] = false;
4946 i40iw_hw_stats_stop_timer(vsi);
Faisal Latif86dbcd02016-01-20 13:40:10 -06004947}
4948
4949static struct i40iw_cqp_ops iw_cqp_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08004950 .cqp_init = i40iw_sc_cqp_init,
4951 .cqp_create = i40iw_sc_cqp_create,
4952 .cqp_post_sq = i40iw_sc_cqp_post_sq,
4953 .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
4954 .cqp_destroy = i40iw_sc_cqp_destroy,
4955 .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
Faisal Latif86dbcd02016-01-20 13:40:10 -06004956};
4957
4958static struct i40iw_ccq_ops iw_ccq_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08004959 .ccq_init = i40iw_sc_ccq_init,
4960 .ccq_create = i40iw_sc_ccq_create,
4961 .ccq_destroy = i40iw_sc_ccq_destroy,
4962 .ccq_create_done = i40iw_sc_ccq_create_done,
4963 .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
4964 .ccq_arm = i40iw_sc_ccq_arm
Faisal Latif86dbcd02016-01-20 13:40:10 -06004965};
4966
4967static struct i40iw_ceq_ops iw_ceq_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08004968 .ceq_init = i40iw_sc_ceq_init,
4969 .ceq_create = i40iw_sc_ceq_create,
4970 .cceq_create_done = i40iw_sc_cceq_create_done,
4971 .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
4972 .cceq_create = i40iw_sc_cceq_create,
4973 .ceq_destroy = i40iw_sc_ceq_destroy,
4974 .process_ceq = i40iw_sc_process_ceq
Faisal Latif86dbcd02016-01-20 13:40:10 -06004975};
4976
4977static struct i40iw_aeq_ops iw_aeq_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08004978 .aeq_init = i40iw_sc_aeq_init,
4979 .aeq_create = i40iw_sc_aeq_create,
4980 .aeq_destroy = i40iw_sc_aeq_destroy,
4981 .get_next_aeqe = i40iw_sc_get_next_aeqe,
4982 .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
4983 .aeq_create_done = i40iw_sc_aeq_create_done,
4984 .aeq_destroy_done = i40iw_sc_aeq_destroy_done
Faisal Latif86dbcd02016-01-20 13:40:10 -06004985};
4986
4987/* iwarp pd ops */
4988static struct i40iw_pd_ops iw_pd_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08004989 .pd_init = i40iw_sc_pd_init,
Faisal Latif86dbcd02016-01-20 13:40:10 -06004990};
4991
4992static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
Ismail, Mustafab7aee852016-04-18 10:33:06 -05004993 .qp_init = i40iw_sc_qp_init,
4994 .qp_create = i40iw_sc_qp_create,
4995 .qp_modify = i40iw_sc_qp_modify,
4996 .qp_destroy = i40iw_sc_qp_destroy,
4997 .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
4998 .qp_upload_context = i40iw_sc_qp_upload_context,
4999 .qp_setctx = i40iw_sc_qp_setctx,
5000 .qp_send_lsmm = i40iw_sc_send_lsmm,
5001 .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
5002 .qp_send_rtt = i40iw_sc_send_rtt,
5003 .qp_post_wqe0 = i40iw_sc_post_wqe0,
5004 .iw_mr_fast_register = i40iw_sc_mr_fast_register
Faisal Latif86dbcd02016-01-20 13:40:10 -06005005};
5006
5007static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08005008 .cq_init = i40iw_sc_cq_init,
5009 .cq_create = i40iw_sc_cq_create,
5010 .cq_destroy = i40iw_sc_cq_destroy,
5011 .cq_modify = i40iw_sc_cq_modify,
Faisal Latif86dbcd02016-01-20 13:40:10 -06005012};
5013
5014static struct i40iw_mr_ops iw_mr_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08005015 .alloc_stag = i40iw_sc_alloc_stag,
5016 .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
5017 .mr_reg_shared = i40iw_sc_mr_reg_shared,
5018 .dealloc_stag = i40iw_sc_dealloc_stag,
5019 .query_stag = i40iw_sc_query_stag,
5020 .mw_alloc = i40iw_sc_mw_alloc
Faisal Latif86dbcd02016-01-20 13:40:10 -06005021};
5022
5023static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08005024 .manage_push_page = i40iw_sc_manage_push_page,
5025 .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
5026 .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
5027 .commit_fpm_values = i40iw_sc_commit_fpm_values,
5028 .query_fpm_values = i40iw_sc_query_fpm_values,
5029 .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
5030 .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
5031 .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
5032 .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
5033 .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
5034 .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
5035 .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
5036 .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
5037 .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
5038 .cqp_nop = i40iw_sc_cqp_nop,
5039 .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
5040 .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
5041 .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
5042 .update_suspend_qp = i40iw_sc_suspend_qp,
5043 .update_resume_qp = i40iw_sc_resume_qp
Faisal Latif86dbcd02016-01-20 13:40:10 -06005044};
5045
5046static struct i40iw_hmc_ops iw_hmc_ops = {
Kees Cook7f6856b2016-12-16 17:05:42 -08005047 .init_iw_hmc = i40iw_sc_init_iw_hmc,
5048 .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
5049 .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
5050 .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
5051 .create_hmc_object = i40iw_sc_create_hmc_obj,
5052 .del_hmc_object = i40iw_sc_del_hmc_obj
Faisal Latif86dbcd02016-01-20 13:40:10 -06005053};
5054
Faisal Latif86dbcd02016-01-20 13:40:10 -06005055/**
5056 * i40iw_device_init - Initialize IWARP device
5057 * @dev: IWARP device pointer
5058 * @info: IWARP init info
5059 */
5060enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
5061 struct i40iw_device_init_info *info)
5062{
5063 u32 val;
5064 u32 vchnl_ver = 0;
5065 u16 hmc_fcn = 0;
5066 enum i40iw_status_code ret_code = 0;
5067 u8 db_size;
5068
5069 spin_lock_init(&dev->cqp_lock);
5070 INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
5071
5072 i40iw_device_init_uk(&dev->dev_uk);
5073
5074 dev->debug_mask = info->debug_mask;
5075
Faisal Latif86dbcd02016-01-20 13:40:10 -06005076 dev->hmc_fn_id = info->hmc_fn_id;
Faisal Latif86dbcd02016-01-20 13:40:10 -06005077 dev->is_pf = info->is_pf;
5078
5079 dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5080 dev->fpm_query_buf = info->fpm_query_buf;
5081
5082 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5083 dev->fpm_commit_buf = info->fpm_commit_buf;
5084
5085 dev->hw = info->hw;
5086 dev->hw->hw_addr = info->bar0;
5087
Faisal Latif86dbcd02016-01-20 13:40:10 -06005088 if (dev->is_pf) {
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06005089 val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
5090 dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
5091
Faisal Latif86dbcd02016-01-20 13:40:10 -06005092 val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
5093 db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
5094 if ((db_size != I40IW_PE_DB_SIZE_4M) &&
5095 (db_size != I40IW_PE_DB_SIZE_8M)) {
5096 i40iw_debug(dev, I40IW_DEBUG_DEV,
5097 "%s: PE doorbell is not enabled in CSR val 0x%x\n",
5098 __func__, val);
5099 ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
5100 return ret_code;
5101 }
5102 dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
5103 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
5104 } else {
5105 dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
5106 }
5107
5108 dev->cqp_ops = &iw_cqp_ops;
5109 dev->ccq_ops = &iw_ccq_ops;
5110 dev->ceq_ops = &iw_ceq_ops;
5111 dev->aeq_ops = &iw_aeq_ops;
5112 dev->cqp_misc_ops = &iw_cqp_misc_ops;
5113 dev->iw_pd_ops = &iw_pd_ops;
5114 dev->iw_priv_qp_ops = &iw_priv_qp_ops;
5115 dev->iw_priv_cq_ops = &iw_priv_cq_ops;
5116 dev->mr_ops = &iw_mr_ops;
5117 dev->hmc_ops = &iw_hmc_ops;
5118 dev->vchnl_if.vchnl_send = info->vchnl_send;
5119 if (dev->vchnl_if.vchnl_send)
5120 dev->vchnl_up = true;
5121 else
5122 dev->vchnl_up = false;
5123 if (!dev->is_pf) {
5124 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
5125 ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
5126 if (!ret_code) {
5127 i40iw_debug(dev, I40IW_DEBUG_DEV,
5128 "%s: Get Channel version rc = 0x%0x, version is %u\n",
5129 __func__, ret_code, vchnl_ver);
5130 ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
5131 if (!ret_code) {
5132 i40iw_debug(dev, I40IW_DEBUG_DEV,
5133 "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
5134 __func__, ret_code, hmc_fcn);
5135 dev->hmc_fn_id = (u8)hmc_fcn;
5136 }
5137 }
5138 }
5139 dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
5140
5141 return ret_code;
5142}