blob: b5b5ff725426c13b5c379cfe788ff4d73e50480c [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <linux/bitops.h>
35#include <linux/dma-mapping.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/list.h>
39#include <linux/log2.h>
40#include <linux/pci.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/bitops.h>
44#include "qed.h"
45#include "qed_cxt.h"
46#include "qed_dev_api.h"
47#include "qed_hsi.h"
48#include "qed_hw.h"
49#include "qed_init_ops.h"
Yuval Bason39dbc642018-06-03 19:13:07 +030050#include "qed_rdma.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020051#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030052#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020053
54/* Max number of connection types in HW (DQ/CDU etc.) */
55#define MAX_CONN_TYPES PROTOCOLID_COMMON
56#define NUM_TASK_TYPES 2
57#define NUM_TASK_PF_SEGMENTS 4
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030058#define NUM_TASK_VF_SEGMENTS 1
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020059
60/* QM constants */
61#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
62
63/* Doorbell-Queue constants */
64#define DQ_RANGE_SHIFT 4
65#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
66
Yuval Mintzdbb799c2016-06-03 14:35:35 +030067/* Searcher constants */
68#define SRC_MIN_NUM_ELEMS 256
69
70/* Timers constants */
71#define TM_SHIFT 7
72#define TM_ALIGN BIT(TM_SHIFT)
73#define TM_ELEM_SIZE 4
74
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020075#define ILT_DEFAULT_HW_P_SIZE 4
Ram Amrani51ff1722016-10-01 21:59:57 +030076
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020077#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
78#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
79
80/* ILT entry structure */
Shahed Shaikhfdd13dd2018-05-21 12:31:47 -070081#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020082#define ILT_ENTRY_PHY_ADDR_SHIFT 0
83#define ILT_ENTRY_VALID_MASK 0x1ULL
84#define ILT_ENTRY_VALID_SHIFT 52
85#define ILT_ENTRY_IN_REGS 2
86#define ILT_REG_SIZE_IN_BYTES 4
87
88/* connection context union */
89union conn_context {
Tomer Tayar21dd79e2017-12-27 19:30:06 +020090 struct e4_core_conn_context core_ctx;
91 struct e4_eth_conn_context eth_ctx;
92 struct e4_iscsi_conn_context iscsi_ctx;
93 struct e4_fcoe_conn_context fcoe_ctx;
94 struct e4_roce_conn_context roce_ctx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020095};
96
Arun Easi1e128c82017-02-15 06:28:22 -080097/* TYPE-0 task context - iSCSI, FCOE */
Yuval Mintzdbb799c2016-06-03 14:35:35 +030098union type0_task_context {
Tomer Tayar21dd79e2017-12-27 19:30:06 +020099 struct e4_iscsi_task_context iscsi_ctx;
100 struct e4_fcoe_task_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300101};
102
103/* TYPE-1 task context - ROCE */
104union type1_task_context {
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200105 struct e4_rdma_task_context roce_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300106};
107
108struct src_ent {
109 u8 opaque[56];
110 u64 next;
111};
112
Tomer Tayara2e76992017-12-27 19:30:05 +0200113#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
114#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300115
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200116#define CONN_CXT_SIZE(p_hwfn) \
117 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
118
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300119#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
120
121#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
122 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
123
124/* Alignment is inherent to the type1_task_context structure */
125#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
126
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200127/* PF per protocl configuration object */
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300128#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
129#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
130
131struct qed_tid_seg {
132 u32 count;
133 u8 type;
134 bool has_fl_mem;
135};
136
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200137struct qed_conn_type_cfg {
138 u32 cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300139 u32 cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300140 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200141};
142
143/* ILT Client configuration, Per connection type (protocol) resources. */
144#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300145#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200146#define CDUC_BLK (0)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300147#define SRQ_BLK (0)
148#define CDUT_SEG_BLK(n) (1 + (u8)(n))
149#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200150
151enum ilt_clients {
152 ILT_CLI_CDUC,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300153 ILT_CLI_CDUT,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200154 ILT_CLI_QM,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300155 ILT_CLI_TM,
156 ILT_CLI_SRC,
157 ILT_CLI_TSDM,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200158 ILT_CLI_MAX
159};
160
161struct ilt_cfg_pair {
162 u32 reg;
163 u32 val;
164};
165
166struct qed_ilt_cli_blk {
167 u32 total_size; /* 0 means not active */
168 u32 real_size_in_page;
169 u32 start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300170 u32 dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200171};
172
173struct qed_ilt_client_cfg {
174 bool active;
175
176 /* ILT boundaries */
177 struct ilt_cfg_pair first;
178 struct ilt_cfg_pair last;
179 struct ilt_cfg_pair p_size;
180
181 /* ILT client blocks for PF */
182 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
183 u32 pf_total_lines;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300184
185 /* ILT client blocks for VFs */
186 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
187 u32 vf_total_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200188};
189
190/* Per Path -
191 * ILT shadow table
192 * Protocol acquired CID lists
193 * PF start line in ILT
194 */
195struct qed_dma_mem {
196 dma_addr_t p_phys;
197 void *p_virt;
198 size_t size;
199};
200
201struct qed_cid_acquired_map {
202 u32 start_cid;
203 u32 max_count;
204 unsigned long *cid_map;
205};
206
207struct qed_cxt_mngr {
208 /* Per protocl configuration */
209 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
210
211 /* computed ILT structure */
212 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
213
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300214 /* Task type sizes */
215 u32 task_type_size[NUM_TASK_TYPES];
216
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300217 /* total number of VFs for this hwfn -
218 * ALL VFs are symmetric in terms of HW resources
219 */
220 u32 vf_count;
221
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200222 /* Acquired CIDs */
223 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
224
Mintz, Yuval6bea61d2017-06-04 13:30:59 +0300225 struct qed_cid_acquired_map
226 acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
227
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200228 /* ILT shadow table */
229 struct qed_dma_mem *ilt_shadow;
230 u32 pf_start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300231
232 /* Mutex for a dynamic ILT allocation */
233 struct mutex mutex;
234
235 /* SRC T2 */
236 struct qed_dma_mem *t2;
237 u32 t2_num_pages;
238 u64 first_free;
239 u64 last_free;
Chopra, Manishd51e4af2017-04-13 04:54:44 -0700240
241 /* total number of SRQ's for this hwfn */
242 u32 srq_count;
243
244 /* Maximal number of L2 steering filters */
245 u32 arfs_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200246};
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300247static bool src_proto(enum protocol_type type)
248{
249 return type == PROTOCOLID_ISCSI ||
Kalderon, Michal5d7dc962017-07-02 10:29:31 +0300250 type == PROTOCOLID_FCOE ||
251 type == PROTOCOLID_IWARP;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300252}
253
254static bool tm_cid_proto(enum protocol_type type)
255{
256 return type == PROTOCOLID_ISCSI ||
Arun Easi1e128c82017-02-15 06:28:22 -0800257 type == PROTOCOLID_FCOE ||
Kalderon, Michal5d7dc962017-07-02 10:29:31 +0300258 type == PROTOCOLID_ROCE ||
259 type == PROTOCOLID_IWARP;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300260}
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200261
Arun Easi1e128c82017-02-15 06:28:22 -0800262static bool tm_tid_proto(enum protocol_type type)
263{
264 return type == PROTOCOLID_FCOE;
265}
266
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300267/* counts the iids for the CDU/CDUC ILT client configuration */
268struct qed_cdu_iids {
269 u32 pf_cids;
270 u32 per_vf_cids;
271};
272
273static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
274 struct qed_cdu_iids *iids)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200275{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300276 u32 type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200277
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300278 for (type = 0; type < MAX_CONN_TYPES; type++) {
279 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
280 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
281 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200282}
283
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300284/* counts the iids for the Searcher block configuration */
285struct qed_src_iids {
286 u32 pf_cids;
287 u32 per_vf_cids;
288};
289
290static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
291 struct qed_src_iids *iids)
292{
293 u32 i;
294
295 for (i = 0; i < MAX_CONN_TYPES; i++) {
296 if (!src_proto(i))
297 continue;
298
299 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
300 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
301 }
Chopra, Manishd51e4af2017-04-13 04:54:44 -0700302
303 /* Add L2 filtering filters in addition */
304 iids->pf_cids += p_mngr->arfs_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300305}
306
307/* counts the iids for the Timers block configuration */
308struct qed_tm_iids {
309 u32 pf_cids;
310 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
311 u32 pf_tids_total;
312 u32 per_vf_cids;
313 u32 per_vf_tids;
314};
315
Michal Kalderon44531ba2017-04-03 12:21:10 +0300316static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
317 struct qed_cxt_mngr *p_mngr,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300318 struct qed_tm_iids *iids)
319{
Michal Kalderon44531ba2017-04-03 12:21:10 +0300320 bool tm_vf_required = false;
321 bool tm_required = false;
322 int i, j;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300323
Michal Kalderon44531ba2017-04-03 12:21:10 +0300324 /* Timers is a special case -> we don't count how many cids require
325 * timers but what's the max cid that will be used by the timer block.
326 * therefore we traverse in reverse order, and once we hit a protocol
327 * that requires the timers memory, we'll sum all the protocols up
328 * to that one.
329 */
330 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300331 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
332
Michal Kalderon44531ba2017-04-03 12:21:10 +0300333 if (tm_cid_proto(i) || tm_required) {
334 if (p_cfg->cid_count)
335 tm_required = true;
336
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300337 iids->pf_cids += p_cfg->cid_count;
Michal Kalderon44531ba2017-04-03 12:21:10 +0300338 }
339
340 if (tm_cid_proto(i) || tm_vf_required) {
341 if (p_cfg->cids_per_vf)
342 tm_vf_required = true;
343
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300344 iids->per_vf_cids += p_cfg->cids_per_vf;
345 }
Arun Easi1e128c82017-02-15 06:28:22 -0800346
347 if (tm_tid_proto(i)) {
348 struct qed_tid_seg *segs = p_cfg->tid_seg;
349
350 /* for each segment there is at most one
351 * protocol for which count is not 0.
352 */
353 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
354 iids->pf_tids[j] += segs[j].count;
355
356 /* The last array elelment is for the VFs. As for PF
357 * segments there can be only one protocol for
358 * which this value is not 0.
359 */
360 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
361 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300362 }
363
364 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
365 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
366 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
367
368 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
369 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
370 iids->pf_tids_total += iids->pf_tids[j];
371 }
372}
373
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200374static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
375 struct qed_qm_iids *iids)
376{
377 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300378 struct qed_tid_seg *segs;
379 u32 vf_cids = 0, type, j;
380 u32 vf_tids = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200381
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300382 for (type = 0; type < MAX_CONN_TYPES; type++) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200383 iids->cids += p_mngr->conn_cfg[type].cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300384 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300385
386 segs = p_mngr->conn_cfg[type].tid_seg;
387 /* for each segment there is at most one
388 * protocol for which count is not 0.
389 */
390 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
391 iids->tids += segs[j].count;
392
393 /* The last array elelment is for the VFs. As for PF
394 * segments there can be only one protocol for
395 * which this value is not 0.
396 */
397 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300398 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200399
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300400 iids->vf_cids += vf_cids * p_mngr->vf_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300401 iids->tids += vf_tids * p_mngr->vf_count;
402
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300403 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300404 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
405 iids->cids, iids->vf_cids, iids->tids, vf_tids);
406}
407
408static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
409 u32 seg)
410{
411 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
412 u32 i;
413
414 /* Find the protocol with tid count > 0 for this segment.
415 * Note: there can only be one and this is already validated.
416 */
417 for (i = 0; i < MAX_CONN_TYPES; i++)
418 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
419 return &p_cfg->conn_cfg[i].tid_seg[seg];
420 return NULL;
421}
422
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300423static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300424{
425 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
426
427 p_mgr->srq_count = num_srqs;
428}
429
Yuval Bason39dbc642018-06-03 19:13:07 +0300430u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300431{
432 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
433
434 return p_mgr->srq_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200435}
436
437/* set the iids count per protocol */
438static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
439 enum protocol_type type,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300440 u32 cid_count, u32 vf_cid_cnt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200441{
442 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
443 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
444
445 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300446 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300447
448 if (type == PROTOCOLID_ROCE) {
449 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
450 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
451 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
Ram Amranif3e48112017-03-14 15:25:58 +0200452 u32 align = elems_per_page * DQ_RANGE_ALIGN;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300453
Ram Amranif3e48112017-03-14 15:25:58 +0200454 p_conn->cid_count = roundup(p_conn->cid_count, align);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300455 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300456}
457
Yuval Mintz1a635e42016-08-15 10:42:43 +0300458u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
459 enum protocol_type type, u32 *vf_cid)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300460{
461 if (vf_cid)
462 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
463
464 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200465}
466
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300467u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
468 enum protocol_type type)
469{
470 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
471}
472
473u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
474 enum protocol_type type)
475{
476 u32 cnt = 0;
477 int i;
478
479 for (i = 0; i < TASK_SEGMENTS; i++)
480 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
481
482 return cnt;
483}
484
Yuval Mintz1a635e42016-08-15 10:42:43 +0300485static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
486 enum protocol_type proto,
487 u8 seg,
488 u8 seg_type, u32 count, bool has_fl)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300489{
490 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
491 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
492
493 p_seg->count = count;
494 p_seg->has_fl_mem = has_fl;
495 p_seg->type = seg_type;
496}
497
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200498static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
499 struct qed_ilt_cli_blk *p_blk,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300500 u32 start_line, u32 total_size, u32 elem_size)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200501{
502 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
503
504 /* verify thatits called only once for each block */
505 if (p_blk->total_size)
506 return;
507
508 p_blk->total_size = total_size;
509 p_blk->real_size_in_page = 0;
510 if (elem_size)
511 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
512 p_blk->start_line = start_line;
513}
514
515static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
516 struct qed_ilt_client_cfg *p_cli,
517 struct qed_ilt_cli_blk *p_blk,
518 u32 *p_line, enum ilt_clients client_id)
519{
520 if (!p_blk->total_size)
521 return;
522
523 if (!p_cli->active)
524 p_cli->first.val = *p_line;
525
526 p_cli->active = true;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300527 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200528 p_cli->last.val = *p_line - 1;
529
530 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
531 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
532 client_id, p_cli->first.val,
533 p_cli->last.val, p_blk->total_size,
534 p_blk->real_size_in_page, p_blk->start_line);
535}
536
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300537static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
538 enum ilt_clients ilt_client)
539{
540 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
541 struct qed_ilt_client_cfg *p_cli;
542 u32 lines_to_skip = 0;
543 u32 cxts_per_p;
544
545 if (ilt_client == ILT_CLI_CDUC) {
546 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
547
548 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
549 (u32) CONN_CXT_SIZE(p_hwfn);
550
551 lines_to_skip = cid_count / cxts_per_p;
552 }
553
554 return lines_to_skip;
555}
556
Ram Amranif9dc4d12017-04-03 12:21:13 +0300557static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
558 *p_cli)
559{
560 p_cli->active = false;
561 p_cli->first.val = 0;
562 p_cli->last.val = 0;
563 return p_cli;
564}
565
566static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
567{
568 p_blk->total_size = 0;
569 return p_blk;
570}
571
572int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200573{
574 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300575 u32 curr_line, total, i, task_size, line;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200576 struct qed_ilt_client_cfg *p_cli;
577 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300578 struct qed_cdu_iids cdu_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300579 struct qed_src_iids src_iids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200580 struct qed_qm_iids qm_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300581 struct qed_tm_iids tm_iids;
582 struct qed_tid_seg *p_seg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200583
584 memset(&qm_iids, 0, sizeof(qm_iids));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300585 memset(&cdu_iids, 0, sizeof(cdu_iids));
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300586 memset(&src_iids, 0, sizeof(src_iids));
587 memset(&tm_iids, 0, sizeof(tm_iids));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200588
589 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
590
591 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
592 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
593 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
594
595 /* CDUC */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300596 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
597
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200598 curr_line = p_mngr->pf_start_line;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300599
600 /* CDUC PF */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200601 p_cli->pf_total_lines = 0;
602
603 /* get the counters for the CDUC and QM clients */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300604 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200605
Ram Amranif9dc4d12017-04-03 12:21:13 +0300606 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200607
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300608 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200609
610 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
611 total, CONN_CXT_SIZE(p_hwfn));
612
613 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
614 p_cli->pf_total_lines = curr_line - p_blk->start_line;
615
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300616 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
617 ILT_CLI_CDUC);
618
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300619 /* CDUC VF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300620 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300621 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
622
623 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
624 total, CONN_CXT_SIZE(p_hwfn));
625
626 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
627 p_cli->vf_total_lines = curr_line - p_blk->start_line;
628
629 for (i = 1; i < p_mngr->vf_count; i++)
630 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
631 ILT_CLI_CDUC);
632
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300633 /* CDUT PF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300634 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300635 p_cli->first.val = curr_line;
636
637 /* first the 'working' task memory */
638 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
639 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
640 if (!p_seg || p_seg->count == 0)
641 continue;
642
Ram Amranif9dc4d12017-04-03 12:21:13 +0300643 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300644 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
645 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
646 p_mngr->task_type_size[p_seg->type]);
647
648 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
649 ILT_CLI_CDUT);
650 }
651
652 /* next the 'init' task memory (forced load memory) */
653 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
654 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
655 if (!p_seg || p_seg->count == 0)
656 continue;
657
Ram Amranif9dc4d12017-04-03 12:21:13 +0300658 p_blk =
659 qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300660
661 if (!p_seg->has_fl_mem) {
662 /* The segment is active (total size pf 'working'
663 * memory is > 0) but has no FL (forced-load, Init)
664 * memory. Thus:
665 *
666 * 1. The total-size in the corrsponding FL block of
667 * the ILT client is set to 0 - No ILT line are
668 * provisioned and no ILT memory allocated.
669 *
670 * 2. The start-line of said block is set to the
671 * start line of the matching working memory
672 * block in the ILT client. This is later used to
673 * configure the CDU segment offset registers and
674 * results in an FL command for TIDs of this
675 * segement behaves as regular load commands
676 * (loading TIDs from the working memory).
677 */
678 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
679
680 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
681 continue;
682 }
683 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
684
685 qed_ilt_cli_blk_fill(p_cli, p_blk,
686 curr_line, total,
687 p_mngr->task_type_size[p_seg->type]);
688
689 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
690 ILT_CLI_CDUT);
691 }
692 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
693
694 /* CDUT VF */
695 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
696 if (p_seg && p_seg->count) {
697 /* Stricly speaking we need to iterate over all VF
698 * task segment types, but a VF has only 1 segment
699 */
700
701 /* 'working' memory */
702 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
703
Ram Amranif9dc4d12017-04-03 12:21:13 +0300704 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300705 qed_ilt_cli_blk_fill(p_cli, p_blk,
706 curr_line, total,
707 p_mngr->task_type_size[p_seg->type]);
708
709 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
710 ILT_CLI_CDUT);
711
712 /* 'init' memory */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300713 p_blk =
714 qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300715 if (!p_seg->has_fl_mem) {
716 /* see comment above */
717 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
718 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
719 } else {
720 task_size = p_mngr->task_type_size[p_seg->type];
721 qed_ilt_cli_blk_fill(p_cli, p_blk,
722 curr_line, total, task_size);
723 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
724 ILT_CLI_CDUT);
725 }
726 p_cli->vf_total_lines = curr_line -
727 p_cli->vf_blks[0].start_line;
728
729 /* Now for the rest of the VFs */
730 for (i = 1; i < p_mngr->vf_count; i++) {
731 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
732 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
733 ILT_CLI_CDUT);
734
735 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
736 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
737 ILT_CLI_CDUT);
738 }
739 }
740
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200741 /* QM */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300742 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
743 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200744
745 qed_cxt_qm_iids(p_hwfn, &qm_iids);
Tomer Tayarda090912017-12-27 19:30:07 +0200746 total = qed_qm_pf_mem_size(qm_iids.cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300747 qm_iids.vf_cids, qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300748 p_hwfn->qm_info.num_pqs,
749 p_hwfn->qm_info.num_vf_pqs);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200750
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300751 DP_VERBOSE(p_hwfn,
752 QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300753 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300754 qm_iids.cids,
755 qm_iids.vf_cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300756 qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300757 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200758
759 qed_ilt_cli_blk_fill(p_cli, p_blk,
760 curr_line, total * 0x1000,
761 QM_PQ_ELEMENT_SIZE);
762
763 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
764 p_cli->pf_total_lines = curr_line - p_blk->start_line;
765
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300766 /* SRC */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300767 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300768 qed_cxt_src_iids(p_mngr, &src_iids);
769
770 /* Both the PF and VFs searcher connections are stored in the per PF
771 * database. Thus sum the PF searcher cids and all the VFs searcher
772 * cids.
773 */
774 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
775 if (total) {
776 u32 local_max = max_t(u32, total,
777 SRC_MIN_NUM_ELEMS);
778
779 total = roundup_pow_of_two(local_max);
780
Ram Amranif9dc4d12017-04-03 12:21:13 +0300781 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300782 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
783 total * sizeof(struct src_ent),
784 sizeof(struct src_ent));
785
786 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
787 ILT_CLI_SRC);
788 p_cli->pf_total_lines = curr_line - p_blk->start_line;
789 }
790
791 /* TM PF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300792 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
Michal Kalderon44531ba2017-04-03 12:21:10 +0300793 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300794 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
795 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300796 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300797 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
798 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
799
800 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
801 ILT_CLI_TM);
802 p_cli->pf_total_lines = curr_line - p_blk->start_line;
803 }
804
805 /* TM VF */
806 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
807 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300808 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300809 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
810 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
811
812 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
813 ILT_CLI_TM);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300814
Mintz, Yuval70566b42017-04-03 12:21:11 +0300815 p_cli->vf_total_lines = curr_line - p_blk->start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300816 for (i = 1; i < p_mngr->vf_count; i++)
817 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
818 ILT_CLI_TM);
819 }
820
821 /* TSDM (SRQ CONTEXT) */
822 total = qed_cxt_get_srq_count(p_hwfn);
823
824 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300825 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
826 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300827 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
828 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
829
830 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
831 ILT_CLI_TSDM);
832 p_cli->pf_total_lines = curr_line - p_blk->start_line;
833 }
834
Ram Amranif9dc4d12017-04-03 12:21:13 +0300835 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
836
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200837 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
Ram Amranif9dc4d12017-04-03 12:21:13 +0300838 RESC_NUM(p_hwfn, QED_ILT))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200839 return -EINVAL;
Ram Amranif9dc4d12017-04-03 12:21:13 +0300840
841 return 0;
842}
843
844u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
845{
846 struct qed_ilt_client_cfg *p_cli;
847 u32 excess_lines, available_lines;
848 struct qed_cxt_mngr *p_mngr;
849 u32 ilt_page_size, elem_size;
850 struct qed_tid_seg *p_seg;
851 int i;
852
853 available_lines = RESC_NUM(p_hwfn, QED_ILT);
854 excess_lines = used_lines - available_lines;
855
856 if (!excess_lines)
857 return 0;
858
Kalderon, Michalc851a9d2017-07-02 10:29:21 +0300859 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
Ram Amranif9dc4d12017-04-03 12:21:13 +0300860 return 0;
861
862 p_mngr = p_hwfn->p_cxt_mngr;
863 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
864 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
865
866 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
867 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
868 if (!p_seg || p_seg->count == 0)
869 continue;
870
871 elem_size = p_mngr->task_type_size[p_seg->type];
872 if (!elem_size)
873 continue;
874
875 return (ilt_page_size / elem_size) * excess_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200876 }
877
Ram Amranif9dc4d12017-04-03 12:21:13 +0300878 DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200879 return 0;
880}
881
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300882static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
883{
884 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
885 u32 i;
886
887 if (!p_mngr->t2)
888 return;
889
890 for (i = 0; i < p_mngr->t2_num_pages; i++)
891 if (p_mngr->t2[i].p_virt)
892 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
893 p_mngr->t2[i].size,
894 p_mngr->t2[i].p_virt,
895 p_mngr->t2[i].p_phys);
896
897 kfree(p_mngr->t2);
898 p_mngr->t2 = NULL;
899}
900
901static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
902{
903 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
904 u32 conn_num, total_size, ent_per_page, psz, i;
905 struct qed_ilt_client_cfg *p_src;
906 struct qed_src_iids src_iids;
907 struct qed_dma_mem *p_t2;
908 int rc;
909
910 memset(&src_iids, 0, sizeof(src_iids));
911
912 /* if the SRC ILT client is inactive - there are no connection
913 * requiring the searcer, leave.
914 */
915 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
916 if (!p_src->active)
917 return 0;
918
919 qed_cxt_src_iids(p_mngr, &src_iids);
920 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
921 total_size = conn_num * sizeof(struct src_ent);
922
923 /* use the same page size as the SRC ILT client */
924 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
925 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
926
927 /* allocate t2 */
Joe Perches2591c282016-09-04 14:24:03 -0700928 p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300929 GFP_KERNEL);
930 if (!p_mngr->t2) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300931 rc = -ENOMEM;
932 goto t2_fail;
933 }
934
935 /* allocate t2 pages */
936 for (i = 0; i < p_mngr->t2_num_pages; i++) {
937 u32 size = min_t(u32, total_size, psz);
938 void **p_virt = &p_mngr->t2[i].p_virt;
939
YueHaibingff2e3512018-06-04 21:10:31 +0800940 *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
941 size, &p_mngr->t2[i].p_phys,
942 GFP_KERNEL);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300943 if (!p_mngr->t2[i].p_virt) {
944 rc = -ENOMEM;
945 goto t2_fail;
946 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300947 p_mngr->t2[i].size = size;
948 total_size -= size;
949 }
950
951 /* Set the t2 pointers */
952
953 /* entries per page - must be a power of two */
954 ent_per_page = psz / sizeof(struct src_ent);
955
956 p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
957
958 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
959 p_mngr->last_free = (u64) p_t2->p_phys +
960 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
961
962 for (i = 0; i < p_mngr->t2_num_pages; i++) {
963 u32 ent_num = min_t(u32,
964 ent_per_page,
965 conn_num);
966 struct src_ent *entries = p_mngr->t2[i].p_virt;
967 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
968 u32 j;
969
970 for (j = 0; j < ent_num - 1; j++) {
971 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
972 entries[j].next = cpu_to_be64(val);
973 }
974
975 if (i < p_mngr->t2_num_pages - 1)
976 val = (u64) p_mngr->t2[i + 1].p_phys;
977 else
978 val = 0;
979 entries[j].next = cpu_to_be64(val);
980
Dan Carpenter01e517f2016-06-07 15:04:16 +0300981 conn_num -= ent_num;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300982 }
983
984 return 0;
985
986t2_fail:
987 qed_cxt_src_t2_free(p_hwfn);
988 return rc;
989}
990
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200991#define for_each_ilt_valid_client(pos, clients) \
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300992 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
993 if (!clients[pos].active) { \
994 continue; \
995 } else \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200996
997/* Total number of ILT lines used by this PF */
998static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
999{
1000 u32 size = 0;
1001 u32 i;
1002
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001003 for_each_ilt_valid_client(i, ilt_clients)
1004 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001005
1006 return size;
1007}
1008
1009static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
1010{
1011 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
1012 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1013 u32 ilt_size, i;
1014
1015 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
1016
1017 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
1018 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
1019
1020 if (p_dma->p_virt)
1021 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1022 p_dma->size, p_dma->p_virt,
1023 p_dma->p_phys);
1024 p_dma->p_virt = NULL;
1025 }
1026 kfree(p_mngr->ilt_shadow);
1027}
1028
1029static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1030 struct qed_ilt_cli_blk *p_blk,
1031 enum ilt_clients ilt_client,
1032 u32 start_line_offset)
1033{
1034 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001035 u32 lines, line, sz_left, lines_to_skip = 0;
1036
1037 /* Special handling for RoCE that supports dynamic allocation */
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001038 if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001039 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
1040 return 0;
1041
1042 lines_to_skip = p_blk->dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001043
1044 if (!p_blk->total_size)
1045 return 0;
1046
1047 sz_left = p_blk->total_size;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001048 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001049 line = p_blk->start_line + start_line_offset -
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001050 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001051
1052 for (; lines; lines--) {
1053 dma_addr_t p_phys;
1054 void *p_virt;
1055 u32 size;
1056
Yuval Mintz1a635e42016-08-15 10:42:43 +03001057 size = min_t(u32, sz_left, p_blk->real_size_in_page);
Himanshu Jha5f58dff2017-12-30 21:07:04 +05301058 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
1059 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001060 if (!p_virt)
1061 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001062
1063 ilt_shadow[line].p_phys = p_phys;
1064 ilt_shadow[line].p_virt = p_virt;
1065 ilt_shadow[line].size = size;
1066
1067 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1068 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1069 line, (u64)p_phys, p_virt, size);
1070
1071 sz_left -= size;
1072 line++;
1073 }
1074
1075 return 0;
1076}
1077
1078static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1079{
1080 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1081 struct qed_ilt_client_cfg *clients = p_mngr->clients;
1082 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001083 u32 size, i, j, k;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001084 int rc;
1085
1086 size = qed_cxt_ilt_shadow_size(clients);
1087 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1088 GFP_KERNEL);
1089 if (!p_mngr->ilt_shadow) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001090 rc = -ENOMEM;
1091 goto ilt_shadow_fail;
1092 }
1093
1094 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1095 "Allocated 0x%x bytes for ilt shadow\n",
1096 (u32)(size * sizeof(struct qed_dma_mem)));
1097
1098 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001099 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1100 p_blk = &clients[i].pf_blks[j];
1101 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001102 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001103 goto ilt_shadow_fail;
1104 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001105 for (k = 0; k < p_mngr->vf_count; k++) {
1106 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1107 u32 lines = clients[i].vf_total_lines * k;
1108
1109 p_blk = &clients[i].vf_blks[j];
1110 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001111 if (rc)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001112 goto ilt_shadow_fail;
1113 }
1114 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001115 }
1116
1117 return 0;
1118
1119ilt_shadow_fail:
1120 qed_ilt_shadow_free(p_hwfn);
1121 return rc;
1122}
1123
1124static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1125{
1126 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001127 u32 type, vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001128
1129 for (type = 0; type < MAX_CONN_TYPES; type++) {
1130 kfree(p_mngr->acquired[type].cid_map);
1131 p_mngr->acquired[type].max_count = 0;
1132 p_mngr->acquired[type].start_cid = 0;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001133
1134 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1135 kfree(p_mngr->acquired_vf[type][vf].cid_map);
1136 p_mngr->acquired_vf[type][vf].max_count = 0;
1137 p_mngr->acquired_vf[type][vf].start_cid = 0;
1138 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001139 }
1140}
1141
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001142static int
1143qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
1144 u32 type,
1145 u32 cid_start,
1146 u32 cid_count, struct qed_cid_acquired_map *p_map)
1147{
1148 u32 size;
1149
1150 if (!cid_count)
1151 return 0;
1152
1153 size = DIV_ROUND_UP(cid_count,
1154 sizeof(unsigned long) * BITS_PER_BYTE) *
1155 sizeof(unsigned long);
1156 p_map->cid_map = kzalloc(size, GFP_KERNEL);
1157 if (!p_map->cid_map)
1158 return -ENOMEM;
1159
1160 p_map->max_count = cid_count;
1161 p_map->start_cid = cid_start;
1162
1163 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1164 "Type %08x start: %08x count %08x\n",
1165 type, p_map->start_cid, p_map->max_count);
1166
1167 return 0;
1168}
1169
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001170static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1171{
1172 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001173 u32 start_cid = 0, vf_start_cid = 0;
1174 u32 type, vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001175
1176 for (type = 0; type < MAX_CONN_TYPES; type++) {
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001177 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1178 struct qed_cid_acquired_map *p_map;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001179
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001180 /* Handle PF maps */
1181 p_map = &p_mngr->acquired[type];
1182 if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
1183 p_cfg->cid_count, p_map))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001184 goto cid_map_fail;
1185
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001186 /* Handle VF maps */
1187 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1188 p_map = &p_mngr->acquired_vf[type][vf];
1189 if (qed_cid_map_alloc_single(p_hwfn, type,
1190 vf_start_cid,
1191 p_cfg->cids_per_vf, p_map))
1192 goto cid_map_fail;
1193 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001194
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001195 start_cid += p_cfg->cid_count;
1196 vf_start_cid += p_cfg->cids_per_vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001197 }
1198
1199 return 0;
1200
1201cid_map_fail:
1202 qed_cid_map_free(p_hwfn);
1203 return -ENOMEM;
1204}
1205
1206int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1207{
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001208 struct qed_ilt_client_cfg *clients;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001209 struct qed_cxt_mngr *p_mngr;
1210 u32 i;
1211
Yuval Mintz60fffb32016-02-21 11:40:07 +02001212 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -07001213 if (!p_mngr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001214 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001215
1216 /* Initialize ILT client registers */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001217 clients = p_mngr->clients;
1218 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1219 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1220 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001221
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001222 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1223 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1224 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001225
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001226 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1227 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1228 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1229
1230 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1231 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1232 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1233
1234 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1235 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1236 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1237
1238 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1239 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1240 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001241 /* default ILT page size for all clients is 64K */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001242 for (i = 0; i < ILT_CLI_MAX; i++)
1243 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1244
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001245 /* Initialize task sizes */
1246 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1247 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1248
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001249 if (p_hwfn->cdev->p_iov_info)
1250 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001251 /* Initialize the dynamic ILT allocation mutex */
1252 mutex_init(&p_mngr->mutex);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001253
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001254 /* Set the cxt mangr pointer priori to further allocations */
1255 p_hwfn->p_cxt_mngr = p_mngr;
1256
1257 return 0;
1258}
1259
1260int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1261{
1262 int rc;
1263
1264 /* Allocate the ILT shadow table */
1265 rc = qed_ilt_shadow_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001266 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001267 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001268
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001269 /* Allocate the T2 table */
1270 rc = qed_cxt_src_t2_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001271 if (rc)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001272 goto tables_alloc_fail;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001273
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001274 /* Allocate and initialize the acquired cids bitmaps */
1275 rc = qed_cid_map_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001276 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001277 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001278
1279 return 0;
1280
1281tables_alloc_fail:
1282 qed_cxt_mngr_free(p_hwfn);
1283 return rc;
1284}
1285
1286void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1287{
1288 if (!p_hwfn->p_cxt_mngr)
1289 return;
1290
1291 qed_cid_map_free(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001292 qed_cxt_src_t2_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001293 qed_ilt_shadow_free(p_hwfn);
1294 kfree(p_hwfn->p_cxt_mngr);
1295
1296 p_hwfn->p_cxt_mngr = NULL;
1297}
1298
1299void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1300{
1301 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001302 struct qed_cid_acquired_map *p_map;
1303 struct qed_conn_type_cfg *p_cfg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001304 int type;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001305 u32 len;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001306
1307 /* Reset acquired cids */
1308 for (type = 0; type < MAX_CONN_TYPES; type++) {
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001309 u32 vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001310
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001311 p_cfg = &p_mngr->conn_cfg[type];
1312 if (p_cfg->cid_count) {
1313 p_map = &p_mngr->acquired[type];
1314 len = DIV_ROUND_UP(p_map->max_count,
1315 sizeof(unsigned long) *
1316 BITS_PER_BYTE) *
1317 sizeof(unsigned long);
1318 memset(p_map->cid_map, 0, len);
1319 }
1320
1321 if (!p_cfg->cids_per_vf)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001322 continue;
1323
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001324 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1325 p_map = &p_mngr->acquired_vf[type][vf];
1326 len = DIV_ROUND_UP(p_map->max_count,
1327 sizeof(unsigned long) *
1328 BITS_PER_BYTE) *
1329 sizeof(unsigned long);
1330 memset(p_map->cid_map, 0, len);
1331 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001332 }
1333}
1334
1335/* CDU Common */
1336#define CDUC_CXT_SIZE_SHIFT \
1337 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1338
1339#define CDUC_CXT_SIZE_MASK \
1340 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1341
1342#define CDUC_BLOCK_WASTE_SHIFT \
1343 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1344
1345#define CDUC_BLOCK_WASTE_MASK \
1346 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1347
1348#define CDUC_NCIB_SHIFT \
1349 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1350
1351#define CDUC_NCIB_MASK \
1352 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1353
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001354#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1355 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1356
1357#define CDUT_TYPE0_CXT_SIZE_MASK \
1358 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1359 CDUT_TYPE0_CXT_SIZE_SHIFT)
1360
1361#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1362 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1363
1364#define CDUT_TYPE0_BLOCK_WASTE_MASK \
1365 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1366 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1367
1368#define CDUT_TYPE0_NCIB_SHIFT \
1369 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1370
1371#define CDUT_TYPE0_NCIB_MASK \
1372 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1373 CDUT_TYPE0_NCIB_SHIFT)
1374
1375#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1376 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1377
1378#define CDUT_TYPE1_CXT_SIZE_MASK \
1379 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1380 CDUT_TYPE1_CXT_SIZE_SHIFT)
1381
1382#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1383 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1384
1385#define CDUT_TYPE1_BLOCK_WASTE_MASK \
1386 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1387 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1388
1389#define CDUT_TYPE1_NCIB_SHIFT \
1390 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1391
1392#define CDUT_TYPE1_NCIB_MASK \
1393 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1394 CDUT_TYPE1_NCIB_SHIFT)
1395
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001396static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1397{
1398 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1399
1400 /* CDUC - connection configuration */
1401 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1402 cxt_size = CONN_CXT_SIZE(p_hwfn);
1403 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1404 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1405
1406 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1407 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1408 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1409 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001410
1411 /* CDUT - type-0 tasks configuration */
1412 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1413 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1414 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1415 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1416
1417 /* cxt size and block-waste are multipes of 8 */
1418 cdu_params = 0;
1419 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1420 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1421 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1422 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1423
1424 /* CDUT - type-1 tasks configuration */
1425 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1426 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1427 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1428
1429 /* cxt size and block-waste are multipes of 8 */
1430 cdu_params = 0;
1431 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1432 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1433 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1434 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1435}
1436
1437/* CDU PF */
1438#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1439#define CDU_SEG_REG_TYPE_MASK 0x1
1440#define CDU_SEG_REG_OFFSET_SHIFT 0
1441#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1442
1443static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1444{
1445 struct qed_ilt_client_cfg *p_cli;
1446 struct qed_tid_seg *p_seg;
1447 u32 cdu_seg_params, offset;
1448 int i;
1449
1450 static const u32 rt_type_offset_arr[] = {
1451 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1452 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1453 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1454 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1455 };
1456
1457 static const u32 rt_type_offset_fl_arr[] = {
1458 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1459 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1460 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1461 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1462 };
1463
1464 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1465
1466 /* There are initializations only for CDUT during pf Phase */
1467 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1468 /* Segment 0 */
1469 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1470 if (!p_seg)
1471 continue;
1472
1473 /* Note: start_line is already adjusted for the CDU
1474 * segment register granularity, so we just need to
1475 * divide. Adjustment is implicit as we assume ILT
1476 * Page size is larger than 32K!
1477 */
1478 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1479 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1480 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1481
1482 cdu_seg_params = 0;
1483 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1484 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1485 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1486
1487 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1488 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1489 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1490
1491 cdu_seg_params = 0;
1492 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1493 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1494 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1495 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001496}
1497
Tomer Tayarda090912017-12-27 19:30:07 +02001498void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
1499 struct qed_ptt *p_ptt, bool is_pf_loading)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001500{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001501 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
Tomer Tayarda090912017-12-27 19:30:07 +02001502 struct qed_qm_pf_rt_init_params params;
1503 struct qed_mcp_link_state *p_link;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001504 struct qed_qm_iids iids;
1505
1506 memset(&iids, 0, sizeof(iids));
1507 qed_cxt_qm_iids(p_hwfn, &iids);
1508
Tomer Tayarda090912017-12-27 19:30:07 +02001509 p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
1510
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001511 memset(&params, 0, sizeof(params));
1512 params.port_id = p_hwfn->port_id;
1513 params.pf_id = p_hwfn->rel_pf_id;
1514 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
Tomer Tayarda090912017-12-27 19:30:07 +02001515 params.is_pf_loading = is_pf_loading;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001516 params.num_pf_cids = iids.cids;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001517 params.num_vf_cids = iids.vf_cids;
Mintz, Yuvalc9f0523b2017-05-09 15:07:49 +03001518 params.num_tids = iids.tids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001519 params.start_pq = qm_info->start_pq;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001520 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1521 params.num_vf_pqs = qm_info->num_vf_pqs;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001522 params.start_vport = qm_info->start_vport;
1523 params.num_vports = qm_info->num_vports;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001524 params.pf_wfq = qm_info->pf_wfq;
1525 params.pf_rl = qm_info->pf_rl;
Tomer Tayarda090912017-12-27 19:30:07 +02001526 params.link_speed = p_link->speed;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001527 params.pq_params = qm_info->qm_pq_params;
1528 params.vport_params = qm_info->qm_vport_params;
1529
Rahul Verma15582962017-04-06 15:58:29 +03001530 qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001531}
1532
1533/* CM PF */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001534void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001535{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001536 /* XCM pure-LB queue */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001537 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1538 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001539}
1540
1541/* DQ PF */
1542static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1543{
1544 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001545 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001546
1547 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1548 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1549
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001550 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1551 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1552
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001553 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1554 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1555
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001556 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1557 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1558
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001559 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1560 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1561
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001562 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1563 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1564
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001565 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1566 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1567
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001568 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1569 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1570
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001571 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1572 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1573
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001574 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1575 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1576
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001577 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1578 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001579
1580 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1581 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1582
1583 /* Connection types 6 & 7 are not in use, yet they must be configured
1584 * as the highest possible connection. Not configuring them means the
1585 * defaults will be used, and with a large number of cids a bug may
1586 * occur, if the defaults will be smaller than dq_pf_max_cid /
1587 * dq_vf_max_cid.
1588 */
1589 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1590 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1591
1592 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1593 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001594}
1595
1596static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1597{
1598 struct qed_ilt_client_cfg *ilt_clients;
1599 int i;
1600
1601 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1602 for_each_ilt_valid_client(i, ilt_clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001603 STORE_RT_REG(p_hwfn,
1604 ilt_clients[i].first.reg,
1605 ilt_clients[i].first.val);
1606 STORE_RT_REG(p_hwfn,
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001607 ilt_clients[i].last.reg, ilt_clients[i].last.val);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001608 STORE_RT_REG(p_hwfn,
1609 ilt_clients[i].p_size.reg,
1610 ilt_clients[i].p_size.val);
1611 }
1612}
1613
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001614static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1615{
1616 struct qed_ilt_client_cfg *p_cli;
1617 u32 blk_factor;
1618
1619 /* For simplicty we set the 'block' to be an ILT page */
1620 if (p_hwfn->cdev->p_iov_info) {
1621 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1622
1623 STORE_RT_REG(p_hwfn,
1624 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1625 p_iov->first_vf_in_pf);
1626 STORE_RT_REG(p_hwfn,
1627 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1628 p_iov->first_vf_in_pf + p_iov->total_vfs);
1629 }
1630
1631 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1632 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1633 if (p_cli->active) {
1634 STORE_RT_REG(p_hwfn,
1635 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1636 blk_factor);
1637 STORE_RT_REG(p_hwfn,
1638 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1639 p_cli->pf_total_lines);
1640 STORE_RT_REG(p_hwfn,
1641 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1642 p_cli->vf_total_lines);
1643 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001644
1645 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1646 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1647 if (p_cli->active) {
1648 STORE_RT_REG(p_hwfn,
1649 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1650 blk_factor);
1651 STORE_RT_REG(p_hwfn,
1652 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1653 p_cli->pf_total_lines);
1654 STORE_RT_REG(p_hwfn,
1655 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1656 p_cli->vf_total_lines);
1657 }
1658
1659 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1660 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1661 if (p_cli->active) {
1662 STORE_RT_REG(p_hwfn,
1663 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1664 STORE_RT_REG(p_hwfn,
1665 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1666 p_cli->pf_total_lines);
1667 STORE_RT_REG(p_hwfn,
1668 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1669 p_cli->vf_total_lines);
1670 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001671}
1672
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001673/* ILT (PSWRQ2) PF */
1674static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1675{
1676 struct qed_ilt_client_cfg *clients;
1677 struct qed_cxt_mngr *p_mngr;
1678 struct qed_dma_mem *p_shdw;
1679 u32 line, rt_offst, i;
1680
1681 qed_ilt_bounds_init(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001682 qed_ilt_vf_bounds_init(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001683
1684 p_mngr = p_hwfn->p_cxt_mngr;
1685 p_shdw = p_mngr->ilt_shadow;
1686 clients = p_hwfn->p_cxt_mngr->clients;
1687
1688 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001689 /** Client's 1st val and RT array are absolute, ILT shadows'
1690 * lines are relative.
1691 */
1692 line = clients[i].first.val - p_mngr->pf_start_line;
1693 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1694 clients[i].first.val * ILT_ENTRY_IN_REGS;
1695
1696 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1697 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1698 u64 ilt_hw_entry = 0;
1699
1700 /** p_virt could be NULL incase of dynamic
1701 * allocation
1702 */
1703 if (p_shdw[line].p_virt) {
1704 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1705 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1706 (p_shdw[line].p_phys >> 12));
1707
1708 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1709 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1710 rt_offst, line, i,
1711 (u64)(p_shdw[line].p_phys >> 12));
1712 }
1713
1714 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1715 }
1716 }
1717}
1718
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001719/* SRC (Searcher) PF */
1720static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1721{
1722 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1723 u32 rounded_conn_num, conn_num, conn_max;
1724 struct qed_src_iids src_iids;
1725
1726 memset(&src_iids, 0, sizeof(src_iids));
1727 qed_cxt_src_iids(p_mngr, &src_iids);
1728 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1729 if (!conn_num)
1730 return;
1731
1732 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1733 rounded_conn_num = roundup_pow_of_two(conn_max);
1734
1735 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1736 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1737 ilog2(rounded_conn_num));
1738
1739 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1740 p_hwfn->p_cxt_mngr->first_free);
1741 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1742 p_hwfn->p_cxt_mngr->last_free);
1743}
1744
1745/* Timers PF */
1746#define TM_CFG_NUM_IDS_SHIFT 0
1747#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1748#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1749#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1750#define TM_CFG_PARENT_PF_SHIFT 25
1751#define TM_CFG_PARENT_PF_MASK 0x7ULL
1752
1753#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1754#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1755
1756#define TM_CFG_TID_OFFSET_SHIFT 30
1757#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1758#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1759#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1760
1761static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1762{
1763 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1764 u32 active_seg_mask = 0, tm_offset, rt_reg;
1765 struct qed_tm_iids tm_iids;
1766 u64 cfg_word;
1767 u8 i;
1768
1769 memset(&tm_iids, 0, sizeof(tm_iids));
Michal Kalderon44531ba2017-04-03 12:21:10 +03001770 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001771
1772 /* @@@TBD No pre-scan for now */
1773
1774 /* Note: We assume consecutive VFs for a PF */
1775 for (i = 0; i < p_mngr->vf_count; i++) {
1776 cfg_word = 0;
1777 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1778 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1779 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1780 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1781 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1782 (sizeof(cfg_word) / sizeof(u32)) *
1783 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1784 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1785 }
1786
1787 cfg_word = 0;
1788 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1789 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1790 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1791 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1792
1793 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1794 (sizeof(cfg_word) / sizeof(u32)) *
1795 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1796 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1797
1798 /* enale scan */
1799 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1800 tm_iids.pf_cids ? 0x1 : 0x0);
1801
1802 /* @@@TBD how to enable the scan for the VFs */
1803
1804 tm_offset = tm_iids.per_vf_cids;
1805
1806 /* Note: We assume consecutive VFs for a PF */
1807 for (i = 0; i < p_mngr->vf_count; i++) {
1808 cfg_word = 0;
1809 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1810 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1811 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1812 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1813 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1814
1815 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1816 (sizeof(cfg_word) / sizeof(u32)) *
1817 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1818
1819 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1820 }
1821
1822 tm_offset = tm_iids.pf_cids;
1823 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1824 cfg_word = 0;
1825 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1826 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1827 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1828 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1829 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1830
1831 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1832 (sizeof(cfg_word) / sizeof(u32)) *
1833 (NUM_OF_VFS(p_hwfn->cdev) +
1834 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1835
1836 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001837 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001838
1839 tm_offset += tm_iids.pf_tids[i];
1840 }
1841
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001842 if (QED_IS_RDMA_PERSONALITY(p_hwfn))
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001843 active_seg_mask = 0;
1844
1845 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1846
1847 /* @@@TBD how to enable the scan for the VFs */
1848}
1849
Arun Easi1e128c82017-02-15 06:28:22 -08001850static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1851{
1852 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1853 p_hwfn->pf_params.fcoe_pf_params.is_target)
1854 STORE_RT_REG(p_hwfn,
1855 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1856}
1857
1858static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1859{
1860 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1861 struct qed_conn_type_cfg *p_fcoe;
1862 struct qed_tid_seg *p_tid;
1863
1864 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1865
1866 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1867 if (!p_fcoe->cid_count)
1868 return;
1869
1870 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1871 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1872 STORE_RT_REG_AGG(p_hwfn,
1873 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1874 p_tid->count);
1875 } else {
1876 STORE_RT_REG_AGG(p_hwfn,
1877 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1878 p_tid->count);
1879 }
1880}
1881
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001882void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1883{
1884 qed_cdu_init_common(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001885 qed_prs_init_common(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001886}
1887
Rahul Verma15582962017-04-06 15:58:29 +03001888void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001889{
Tomer Tayarda090912017-12-27 19:30:07 +02001890 qed_qm_init_pf(p_hwfn, p_ptt, true);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001891 qed_cm_init_pf(p_hwfn);
1892 qed_dq_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001893 qed_cdu_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001894 qed_ilt_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001895 qed_src_init_pf(p_hwfn);
1896 qed_tm_init_pf(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001897 qed_prs_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001898}
1899
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001900int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1901 enum protocol_type type, u32 *p_cid, u8 vfid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001902{
1903 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001904 struct qed_cid_acquired_map *p_map;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001905 u32 rel_cid;
1906
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001907 if (type >= MAX_CONN_TYPES) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001908 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1909 return -EINVAL;
1910 }
1911
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001912 if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
1913 DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
1914 return -EINVAL;
1915 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001916
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001917 /* Determine the right map to take this CID from */
1918 if (vfid == QED_CXT_PF_CID)
1919 p_map = &p_mngr->acquired[type];
1920 else
1921 p_map = &p_mngr->acquired_vf[type][vfid];
1922
1923 if (!p_map->cid_map) {
1924 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1925 return -EINVAL;
1926 }
1927
1928 rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
1929
1930 if (rel_cid >= p_map->max_count) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001931 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001932 return -EINVAL;
1933 }
1934
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001935 __set_bit(rel_cid, p_map->cid_map);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001936
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001937 *p_cid = rel_cid + p_map->start_cid;
1938
1939 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1940 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1941 *p_cid, rel_cid, vfid, type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001942
1943 return 0;
1944}
1945
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001946int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1947 enum protocol_type type, u32 *p_cid)
1948{
1949 return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
1950}
1951
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001952static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001953 u32 cid,
1954 u8 vfid,
1955 enum protocol_type *p_type,
1956 struct qed_cid_acquired_map **pp_map)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001957{
1958 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001959 u32 rel_cid;
1960
1961 /* Iterate over protocols and find matching cid range */
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001962 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1963 if (vfid == QED_CXT_PF_CID)
1964 *pp_map = &p_mngr->acquired[*p_type];
1965 else
1966 *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001967
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001968 if (!((*pp_map)->cid_map))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001969 continue;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001970 if (cid >= (*pp_map)->start_cid &&
1971 cid < (*pp_map)->start_cid + (*pp_map)->max_count)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001972 break;
1973 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001974
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001975 if (*p_type == MAX_CONN_TYPES) {
1976 DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
1977 goto fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001978 }
1979
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001980 rel_cid = cid - (*pp_map)->start_cid;
1981 if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
1982 DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
1983 cid, vfid);
1984 goto fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001985 }
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001986
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001987 return true;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001988fail:
1989 *p_type = MAX_CONN_TYPES;
1990 *pp_map = NULL;
1991 return false;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001992}
1993
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001994void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001995{
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001996 struct qed_cid_acquired_map *p_map = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001997 enum protocol_type type;
1998 bool b_acquired;
1999 u32 rel_cid;
2000
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002001 if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
2002 DP_NOTICE(p_hwfn,
2003 "Trying to return incorrect CID belonging to VF %02x\n",
2004 vfid);
2005 return;
2006 }
2007
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002008 /* Test acquired and find matching per-protocol map */
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002009 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
2010 &type, &p_map);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002011
2012 if (!b_acquired)
2013 return;
2014
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002015 rel_cid = cid - p_map->start_cid;
2016 clear_bit(rel_cid, p_map->cid_map);
2017
2018 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
2019 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
2020 cid, rel_cid, vfid, type);
2021}
2022
2023void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
2024{
2025 _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002026}
2027
Yuval Mintz1a635e42016-08-15 10:42:43 +03002028int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002029{
2030 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002031 struct qed_cid_acquired_map *p_map = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002032 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
2033 enum protocol_type type;
2034 bool b_acquired;
2035
2036 /* Test acquired and find matching per-protocol map */
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002037 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
2038 QED_CXT_PF_CID, &type, &p_map);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002039
2040 if (!b_acquired)
2041 return -EINVAL;
2042
2043 /* set the protocl type */
2044 p_info->type = type;
2045
2046 /* compute context virtual pointer */
2047 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
2048
2049 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
2050 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
2051 line = p_info->iid / cxts_per_p;
2052
2053 /* Make sure context is allocated (dynamic allocation) */
2054 if (!p_mngr->ilt_shadow[line].p_virt)
2055 return -EINVAL;
2056
2057 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
2058 p_info->iid % cxts_per_p * conn_cxt_size;
2059
2060 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
2061 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
2062 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
2063
2064 return 0;
2065}
2066
Yuval Mintz8c93bea2016-10-13 22:57:03 +03002067static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
Ram Amranif9dc4d12017-04-03 12:21:13 +03002068 struct qed_rdma_pf_params *p_params,
2069 u32 num_tasks)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002070{
Ram Amranif9dc4d12017-04-03 12:21:13 +03002071 u32 num_cons, num_qps, num_srqs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002072 enum protocol_type proto;
2073
Yuval Bason39dbc642018-06-03 19:13:07 +03002074 num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002075
Michal Kalderone0a8f9d2017-09-24 12:09:42 +03002076 if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
2077 DP_NOTICE(p_hwfn,
2078 "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
2079 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
2080 }
2081
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002082 switch (p_hwfn->hw_info.personality) {
Kalderon, Michal5d7dc962017-07-02 10:29:31 +03002083 case QED_PCI_ETH_IWARP:
2084 /* Each QP requires one connection */
2085 num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
2086 proto = PROTOCOLID_IWARP;
2087 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002088 case QED_PCI_ETH_ROCE:
2089 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
2090 num_cons = num_qps * 2; /* each QP requires two connections */
2091 proto = PROTOCOLID_ROCE;
2092 break;
2093 default:
2094 return;
2095 }
2096
2097 if (num_cons && num_tasks) {
2098 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
2099
2100 /* Deliberatly passing ROCE for tasks id. This is because
2101 * iWARP / RoCE share the task id.
2102 */
2103 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2104 QED_CXT_ROCE_TID_SEG, 1,
2105 num_tasks, false);
2106 qed_cxt_set_srq_count(p_hwfn, num_srqs);
2107 } else {
2108 DP_INFO(p_hwfn->cdev,
2109 "RDMA personality used without setting params!\n");
2110 }
2111}
2112
Ram Amranif9dc4d12017-04-03 12:21:13 +03002113int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002114{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002115 /* Set the number of required CORE connections */
2116 u32 core_cids = 1; /* SPQ */
2117
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002118 if (p_hwfn->using_ll2)
2119 core_cids += 4;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002120 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002121
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002122 switch (p_hwfn->hw_info.personality) {
Kalderon, Michal5d7dc962017-07-02 10:29:31 +03002123 case QED_PCI_ETH_RDMA:
2124 case QED_PCI_ETH_IWARP:
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002125 case QED_PCI_ETH_ROCE:
2126 {
Ram Amranif9dc4d12017-04-03 12:21:13 +03002127 qed_rdma_set_pf_params(p_hwfn,
2128 &p_hwfn->
2129 pf_params.rdma_pf_params,
2130 rdma_tasks);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002131 /* no need for break since RoCE coexist with Ethernet */
2132 }
2133 case QED_PCI_ETH:
2134 {
2135 struct qed_eth_pf_params *p_params =
2136 &p_hwfn->pf_params.eth_pf_params;
2137
Mintz, Yuval08bc8f12017-06-04 13:31:06 +03002138 if (!p_params->num_vf_cons)
2139 p_params->num_vf_cons =
2140 ETH_PF_PARAMS_VF_CONS_DEFAULT;
2141 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2142 p_params->num_cons,
2143 p_params->num_vf_cons);
Chopra, Manishd51e4af2017-04-13 04:54:44 -07002144 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002145 break;
2146 }
Arun Easi1e128c82017-02-15 06:28:22 -08002147 case QED_PCI_FCOE:
2148 {
2149 struct qed_fcoe_pf_params *p_params;
2150
2151 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2152
2153 if (p_params->num_cons && p_params->num_tasks) {
2154 qed_cxt_set_proto_cid_count(p_hwfn,
2155 PROTOCOLID_FCOE,
2156 p_params->num_cons,
2157 0);
2158
2159 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2160 QED_CXT_FCOE_TID_SEG, 0,
2161 p_params->num_tasks, true);
2162 } else {
2163 DP_INFO(p_hwfn->cdev,
2164 "Fcoe personality used without setting params!\n");
2165 }
2166 break;
2167 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002168 case QED_PCI_ISCSI:
2169 {
2170 struct qed_iscsi_pf_params *p_params;
2171
2172 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2173
2174 if (p_params->num_cons && p_params->num_tasks) {
2175 qed_cxt_set_proto_cid_count(p_hwfn,
2176 PROTOCOLID_ISCSI,
2177 p_params->num_cons,
2178 0);
2179
2180 qed_cxt_set_proto_tid_count(p_hwfn,
2181 PROTOCOLID_ISCSI,
2182 QED_CXT_ISCSI_TID_SEG,
2183 0,
2184 p_params->num_tasks,
2185 true);
2186 } else {
2187 DP_INFO(p_hwfn->cdev,
2188 "Iscsi personality used without setting params!\n");
2189 }
2190 break;
2191 }
2192 default:
2193 return -EINVAL;
2194 }
2195
2196 return 0;
2197}
2198
2199int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2200 struct qed_tid_mem *p_info)
2201{
2202 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2203 u32 proto, seg, total_lines, i, shadow_line;
2204 struct qed_ilt_client_cfg *p_cli;
2205 struct qed_ilt_cli_blk *p_fl_seg;
2206 struct qed_tid_seg *p_seg_info;
2207
2208 /* Verify the personality */
2209 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002210 case QED_PCI_FCOE:
2211 proto = PROTOCOLID_FCOE;
2212 seg = QED_CXT_FCOE_TID_SEG;
2213 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002214 case QED_PCI_ISCSI:
2215 proto = PROTOCOLID_ISCSI;
2216 seg = QED_CXT_ISCSI_TID_SEG;
2217 break;
2218 default:
2219 return -EINVAL;
2220 }
2221
2222 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2223 if (!p_cli->active)
2224 return -EINVAL;
2225
2226 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2227 if (!p_seg_info->has_fl_mem)
2228 return -EINVAL;
2229
2230 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2231 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2232 p_fl_seg->real_size_in_page);
2233
2234 for (i = 0; i < total_lines; i++) {
2235 shadow_line = i + p_fl_seg->start_line -
2236 p_hwfn->p_cxt_mngr->pf_start_line;
2237 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2238 }
2239 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2240 p_fl_seg->real_size_in_page;
2241 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2242 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2243 p_info->tid_size;
2244
2245 return 0;
2246}
2247
2248/* This function is very RoCE oriented, if another protocol in the future
2249 * will want this feature we'll need to modify the function to be more generic
2250 */
2251int
2252qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2253 enum qed_cxt_elem_type elem_type, u32 iid)
2254{
2255 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2256 struct qed_ilt_client_cfg *p_cli;
2257 struct qed_ilt_cli_blk *p_blk;
2258 struct qed_ptt *p_ptt;
2259 dma_addr_t p_phys;
2260 u64 ilt_hw_entry;
2261 void *p_virt;
2262 int rc = 0;
2263
2264 switch (elem_type) {
2265 case QED_ELEM_CXT:
2266 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2267 elem_size = CONN_CXT_SIZE(p_hwfn);
2268 p_blk = &p_cli->pf_blks[CDUC_BLK];
2269 break;
2270 case QED_ELEM_SRQ:
2271 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2272 elem_size = SRQ_CXT_SIZE;
2273 p_blk = &p_cli->pf_blks[SRQ_BLK];
2274 break;
2275 case QED_ELEM_TASK:
2276 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2277 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2278 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2279 break;
2280 default:
2281 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2282 return -EINVAL;
2283 }
2284
2285 /* Calculate line in ilt */
2286 hw_p_size = p_cli->p_size.val;
2287 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2288 line = p_blk->start_line + (iid / elems_per_p);
2289 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2290
2291 /* If line is already allocated, do nothing, otherwise allocate it and
2292 * write it to the PSWRQ2 registers.
2293 * This section can be run in parallel from different contexts and thus
2294 * a mutex protection is needed.
2295 */
2296
2297 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2298
2299 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2300 goto out0;
2301
2302 p_ptt = qed_ptt_acquire(p_hwfn);
2303 if (!p_ptt) {
2304 DP_NOTICE(p_hwfn,
2305 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2306 rc = -EBUSY;
2307 goto out0;
2308 }
2309
Himanshu Jha5f58dff2017-12-30 21:07:04 +05302310 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
2311 p_blk->real_size_in_page, &p_phys,
2312 GFP_KERNEL);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002313 if (!p_virt) {
2314 rc = -ENOMEM;
2315 goto out1;
2316 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002317
2318 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2319 * to compensate for a HW bug, but it is configured even if DIF is not
2320 * enabled. This is harmless and allows us to avoid a dedicated API. We
2321 * configure the field for all of the contexts on the newly allocated
2322 * page.
2323 */
2324 if (elem_type == QED_ELEM_TASK) {
2325 u32 elem_i;
2326 u8 *elem_start = (u8 *)p_virt;
2327 union type1_task_context *elem;
2328
2329 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2330 elem = (union type1_task_context *)elem_start;
2331 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
Tomer Tayara2e76992017-12-27 19:30:05 +02002332 TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002333 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2334 }
2335 }
2336
2337 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2338 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2339 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2340 p_blk->real_size_in_page;
2341
2342 /* compute absolute offset */
2343 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2344 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2345
2346 ilt_hw_entry = 0;
2347 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2348 SET_FIELD(ilt_hw_entry,
2349 ILT_ENTRY_PHY_ADDR,
2350 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2351
2352 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2353 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2354 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2355
2356 if (elem_type == QED_ELEM_CXT) {
2357 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2358 elems_per_p;
2359
2360 /* Update the relevant register in the parser */
2361 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2362 last_cid_allocated - 1);
2363
2364 if (!p_hwfn->b_rdma_enabled_in_prs) {
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03002365 /* Enable RDMA search */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002366 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2367 p_hwfn->b_rdma_enabled_in_prs = true;
2368 }
2369 }
2370
2371out1:
2372 qed_ptt_release(p_hwfn, p_ptt);
2373out0:
2374 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2375
2376 return rc;
2377}
2378
2379/* This function is very RoCE oriented, if another protocol in the future
2380 * will want this feature we'll need to modify the function to be more generic
2381 */
2382static int
2383qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2384 enum qed_cxt_elem_type elem_type,
2385 u32 start_iid, u32 count)
2386{
2387 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2388 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2389 struct qed_ilt_client_cfg *p_cli;
2390 struct qed_ilt_cli_blk *p_blk;
2391 u32 end_iid = start_iid + count;
2392 struct qed_ptt *p_ptt;
2393 u64 ilt_hw_entry = 0;
2394 u32 i;
2395
2396 switch (elem_type) {
2397 case QED_ELEM_CXT:
2398 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2399 elem_size = CONN_CXT_SIZE(p_hwfn);
2400 p_blk = &p_cli->pf_blks[CDUC_BLK];
2401 break;
2402 case QED_ELEM_SRQ:
2403 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2404 elem_size = SRQ_CXT_SIZE;
2405 p_blk = &p_cli->pf_blks[SRQ_BLK];
2406 break;
2407 case QED_ELEM_TASK:
2408 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2409 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2410 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2411 break;
2412 default:
2413 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2414 return -EINVAL;
2415 }
2416
2417 /* Calculate line in ilt */
2418 hw_p_size = p_cli->p_size.val;
2419 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2420 start_line = p_blk->start_line + (start_iid / elems_per_p);
2421 end_line = p_blk->start_line + (end_iid / elems_per_p);
2422 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2423 end_line--;
2424
2425 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2426 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2427
2428 p_ptt = qed_ptt_acquire(p_hwfn);
2429 if (!p_ptt) {
2430 DP_NOTICE(p_hwfn,
2431 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2432 return -EBUSY;
2433 }
2434
2435 for (i = shadow_start_line; i < shadow_end_line; i++) {
2436 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2437 continue;
2438
2439 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2440 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2441 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2442 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2443
2444 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2445 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2446 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2447
2448 /* compute absolute offset */
2449 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2450 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2451 ILT_ENTRY_IN_REGS);
2452
2453 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2454 * wide-bus.
2455 */
2456 qed_dmae_host2grc(p_hwfn, p_ptt,
2457 (u64) (uintptr_t) &ilt_hw_entry,
2458 reg_offset,
2459 sizeof(ilt_hw_entry) / sizeof(u32),
2460 0);
2461 }
2462
2463 qed_ptt_release(p_hwfn, p_ptt);
2464
2465 return 0;
2466}
2467
2468int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2469{
2470 int rc;
2471 u32 cid;
2472
2473 /* Free Connection CXT */
2474 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2475 qed_cxt_get_proto_cid_start(p_hwfn,
2476 proto),
2477 qed_cxt_get_proto_cid_count(p_hwfn,
2478 proto, &cid));
2479
2480 if (rc)
2481 return rc;
2482
Michal Kalderon9de506a2018-03-05 23:50:46 +02002483 /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2484 * RoCE and iWARP )
2485 */
2486 proto = PROTOCOLID_ROCE;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002487 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2488 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2489 if (rc)
2490 return rc;
2491
2492 /* Free TSDM CXT */
2493 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2494 qed_cxt_get_srq_count(p_hwfn));
2495
2496 return rc;
2497}
2498
2499int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2500 u32 tid, u8 ctx_type, void **pp_task_ctx)
2501{
2502 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2503 struct qed_ilt_client_cfg *p_cli;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002504 struct qed_tid_seg *p_seg_info;
Arun Easi1e128c82017-02-15 06:28:22 -08002505 struct qed_ilt_cli_blk *p_seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002506 u32 num_tids_per_block;
Arun Easi1e128c82017-02-15 06:28:22 -08002507 u32 tid_size, ilt_idx;
2508 u32 total_lines;
2509 u32 proto, seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002510
2511 /* Verify the personality */
2512 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002513 case QED_PCI_FCOE:
2514 proto = PROTOCOLID_FCOE;
2515 seg = QED_CXT_FCOE_TID_SEG;
2516 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002517 case QED_PCI_ISCSI:
2518 proto = PROTOCOLID_ISCSI;
2519 seg = QED_CXT_ISCSI_TID_SEG;
2520 break;
2521 default:
2522 return -EINVAL;
2523 }
2524
2525 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2526 if (!p_cli->active)
2527 return -EINVAL;
2528
2529 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2530
2531 if (ctx_type == QED_CTX_WORKING_MEM) {
2532 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2533 } else if (ctx_type == QED_CTX_FL_MEM) {
2534 if (!p_seg_info->has_fl_mem)
2535 return -EINVAL;
2536 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2537 } else {
2538 return -EINVAL;
2539 }
2540 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2541 tid_size = p_mngr->task_type_size[p_seg_info->type];
2542 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2543
2544 if (total_lines < tid / num_tids_per_block)
2545 return -EINVAL;
2546
2547 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2548 p_mngr->pf_start_line;
2549 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2550 (tid % num_tids_per_block) * tid_size;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002551
2552 return 0;
2553}