blob: ad4a3fdd4cb4010f140848eed3c3bd5e2d906e06 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <linux/bitops.h>
35#include <linux/dma-mapping.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/list.h>
39#include <linux/log2.h>
40#include <linux/pci.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/bitops.h>
44#include "qed.h"
45#include "qed_cxt.h"
46#include "qed_dev_api.h"
47#include "qed_hsi.h"
48#include "qed_hw.h"
49#include "qed_init_ops.h"
50#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030051#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020052
53/* Max number of connection types in HW (DQ/CDU etc.) */
54#define MAX_CONN_TYPES PROTOCOLID_COMMON
55#define NUM_TASK_TYPES 2
56#define NUM_TASK_PF_SEGMENTS 4
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030057#define NUM_TASK_VF_SEGMENTS 1
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058
59/* QM constants */
60#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
61
62/* Doorbell-Queue constants */
63#define DQ_RANGE_SHIFT 4
64#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
65
Yuval Mintzdbb799c2016-06-03 14:35:35 +030066/* Searcher constants */
67#define SRC_MIN_NUM_ELEMS 256
68
69/* Timers constants */
70#define TM_SHIFT 7
71#define TM_ALIGN BIT(TM_SHIFT)
72#define TM_ELEM_SIZE 4
73
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020074#define ILT_DEFAULT_HW_P_SIZE 4
Ram Amrani51ff1722016-10-01 21:59:57 +030075
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020076#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78
79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52
84#define ILT_ENTRY_IN_REGS 2
85#define ILT_REG_SIZE_IN_BYTES 4
86
87/* connection context union */
88union conn_context {
Tomer Tayar21dd79e2017-12-27 19:30:06 +020089 struct e4_core_conn_context core_ctx;
90 struct e4_eth_conn_context eth_ctx;
91 struct e4_iscsi_conn_context iscsi_ctx;
92 struct e4_fcoe_conn_context fcoe_ctx;
93 struct e4_roce_conn_context roce_ctx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020094};
95
Arun Easi1e128c82017-02-15 06:28:22 -080096/* TYPE-0 task context - iSCSI, FCOE */
Yuval Mintzdbb799c2016-06-03 14:35:35 +030097union type0_task_context {
Tomer Tayar21dd79e2017-12-27 19:30:06 +020098 struct e4_iscsi_task_context iscsi_ctx;
99 struct e4_fcoe_task_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300100};
101
102/* TYPE-1 task context - ROCE */
103union type1_task_context {
Tomer Tayar21dd79e2017-12-27 19:30:06 +0200104 struct e4_rdma_task_context roce_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300105};
106
107struct src_ent {
108 u8 opaque[56];
109 u64 next;
110};
111
Tomer Tayara2e76992017-12-27 19:30:05 +0200112#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
113#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300114
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200115#define CONN_CXT_SIZE(p_hwfn) \
116 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
117
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300118#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
119
120#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
121 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
122
123/* Alignment is inherent to the type1_task_context structure */
124#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
125
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126/* PF per protocl configuration object */
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300127#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
128#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
129
130struct qed_tid_seg {
131 u32 count;
132 u8 type;
133 bool has_fl_mem;
134};
135
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200136struct qed_conn_type_cfg {
137 u32 cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300138 u32 cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300139 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200140};
141
142/* ILT Client configuration, Per connection type (protocol) resources. */
143#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300144#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200145#define CDUC_BLK (0)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300146#define SRQ_BLK (0)
147#define CDUT_SEG_BLK(n) (1 + (u8)(n))
148#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200149
150enum ilt_clients {
151 ILT_CLI_CDUC,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300152 ILT_CLI_CDUT,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200153 ILT_CLI_QM,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300154 ILT_CLI_TM,
155 ILT_CLI_SRC,
156 ILT_CLI_TSDM,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200157 ILT_CLI_MAX
158};
159
160struct ilt_cfg_pair {
161 u32 reg;
162 u32 val;
163};
164
165struct qed_ilt_cli_blk {
166 u32 total_size; /* 0 means not active */
167 u32 real_size_in_page;
168 u32 start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300169 u32 dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200170};
171
172struct qed_ilt_client_cfg {
173 bool active;
174
175 /* ILT boundaries */
176 struct ilt_cfg_pair first;
177 struct ilt_cfg_pair last;
178 struct ilt_cfg_pair p_size;
179
180 /* ILT client blocks for PF */
181 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
182 u32 pf_total_lines;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300183
184 /* ILT client blocks for VFs */
185 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
186 u32 vf_total_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200187};
188
189/* Per Path -
190 * ILT shadow table
191 * Protocol acquired CID lists
192 * PF start line in ILT
193 */
194struct qed_dma_mem {
195 dma_addr_t p_phys;
196 void *p_virt;
197 size_t size;
198};
199
200struct qed_cid_acquired_map {
201 u32 start_cid;
202 u32 max_count;
203 unsigned long *cid_map;
204};
205
206struct qed_cxt_mngr {
207 /* Per protocl configuration */
208 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
209
210 /* computed ILT structure */
211 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
212
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300213 /* Task type sizes */
214 u32 task_type_size[NUM_TASK_TYPES];
215
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300216 /* total number of VFs for this hwfn -
217 * ALL VFs are symmetric in terms of HW resources
218 */
219 u32 vf_count;
220
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200221 /* Acquired CIDs */
222 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
223
Mintz, Yuval6bea61d2017-06-04 13:30:59 +0300224 struct qed_cid_acquired_map
225 acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
226
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200227 /* ILT shadow table */
228 struct qed_dma_mem *ilt_shadow;
229 u32 pf_start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300230
231 /* Mutex for a dynamic ILT allocation */
232 struct mutex mutex;
233
234 /* SRC T2 */
235 struct qed_dma_mem *t2;
236 u32 t2_num_pages;
237 u64 first_free;
238 u64 last_free;
Chopra, Manishd51e4af2017-04-13 04:54:44 -0700239
240 /* total number of SRQ's for this hwfn */
241 u32 srq_count;
242
243 /* Maximal number of L2 steering filters */
244 u32 arfs_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200245};
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300246static bool src_proto(enum protocol_type type)
247{
248 return type == PROTOCOLID_ISCSI ||
Kalderon, Michal5d7dc962017-07-02 10:29:31 +0300249 type == PROTOCOLID_FCOE ||
250 type == PROTOCOLID_IWARP;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300251}
252
253static bool tm_cid_proto(enum protocol_type type)
254{
255 return type == PROTOCOLID_ISCSI ||
Arun Easi1e128c82017-02-15 06:28:22 -0800256 type == PROTOCOLID_FCOE ||
Kalderon, Michal5d7dc962017-07-02 10:29:31 +0300257 type == PROTOCOLID_ROCE ||
258 type == PROTOCOLID_IWARP;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300259}
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200260
Arun Easi1e128c82017-02-15 06:28:22 -0800261static bool tm_tid_proto(enum protocol_type type)
262{
263 return type == PROTOCOLID_FCOE;
264}
265
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300266/* counts the iids for the CDU/CDUC ILT client configuration */
267struct qed_cdu_iids {
268 u32 pf_cids;
269 u32 per_vf_cids;
270};
271
272static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
273 struct qed_cdu_iids *iids)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200274{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300275 u32 type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200276
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300277 for (type = 0; type < MAX_CONN_TYPES; type++) {
278 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
279 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
280 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200281}
282
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300283/* counts the iids for the Searcher block configuration */
284struct qed_src_iids {
285 u32 pf_cids;
286 u32 per_vf_cids;
287};
288
289static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
290 struct qed_src_iids *iids)
291{
292 u32 i;
293
294 for (i = 0; i < MAX_CONN_TYPES; i++) {
295 if (!src_proto(i))
296 continue;
297
298 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
299 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
300 }
Chopra, Manishd51e4af2017-04-13 04:54:44 -0700301
302 /* Add L2 filtering filters in addition */
303 iids->pf_cids += p_mngr->arfs_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300304}
305
306/* counts the iids for the Timers block configuration */
307struct qed_tm_iids {
308 u32 pf_cids;
309 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
310 u32 pf_tids_total;
311 u32 per_vf_cids;
312 u32 per_vf_tids;
313};
314
Michal Kalderon44531ba2017-04-03 12:21:10 +0300315static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
316 struct qed_cxt_mngr *p_mngr,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300317 struct qed_tm_iids *iids)
318{
Michal Kalderon44531ba2017-04-03 12:21:10 +0300319 bool tm_vf_required = false;
320 bool tm_required = false;
321 int i, j;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300322
Michal Kalderon44531ba2017-04-03 12:21:10 +0300323 /* Timers is a special case -> we don't count how many cids require
324 * timers but what's the max cid that will be used by the timer block.
325 * therefore we traverse in reverse order, and once we hit a protocol
326 * that requires the timers memory, we'll sum all the protocols up
327 * to that one.
328 */
329 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300330 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
331
Michal Kalderon44531ba2017-04-03 12:21:10 +0300332 if (tm_cid_proto(i) || tm_required) {
333 if (p_cfg->cid_count)
334 tm_required = true;
335
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300336 iids->pf_cids += p_cfg->cid_count;
Michal Kalderon44531ba2017-04-03 12:21:10 +0300337 }
338
339 if (tm_cid_proto(i) || tm_vf_required) {
340 if (p_cfg->cids_per_vf)
341 tm_vf_required = true;
342
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300343 iids->per_vf_cids += p_cfg->cids_per_vf;
344 }
Arun Easi1e128c82017-02-15 06:28:22 -0800345
346 if (tm_tid_proto(i)) {
347 struct qed_tid_seg *segs = p_cfg->tid_seg;
348
349 /* for each segment there is at most one
350 * protocol for which count is not 0.
351 */
352 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
353 iids->pf_tids[j] += segs[j].count;
354
355 /* The last array elelment is for the VFs. As for PF
356 * segments there can be only one protocol for
357 * which this value is not 0.
358 */
359 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
360 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300361 }
362
363 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
364 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
365 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
366
367 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
368 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
369 iids->pf_tids_total += iids->pf_tids[j];
370 }
371}
372
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200373static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
374 struct qed_qm_iids *iids)
375{
376 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300377 struct qed_tid_seg *segs;
378 u32 vf_cids = 0, type, j;
379 u32 vf_tids = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200380
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300381 for (type = 0; type < MAX_CONN_TYPES; type++) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200382 iids->cids += p_mngr->conn_cfg[type].cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300383 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300384
385 segs = p_mngr->conn_cfg[type].tid_seg;
386 /* for each segment there is at most one
387 * protocol for which count is not 0.
388 */
389 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
390 iids->tids += segs[j].count;
391
392 /* The last array elelment is for the VFs. As for PF
393 * segments there can be only one protocol for
394 * which this value is not 0.
395 */
396 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300397 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200398
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300399 iids->vf_cids += vf_cids * p_mngr->vf_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300400 iids->tids += vf_tids * p_mngr->vf_count;
401
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300402 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300403 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
404 iids->cids, iids->vf_cids, iids->tids, vf_tids);
405}
406
407static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
408 u32 seg)
409{
410 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
411 u32 i;
412
413 /* Find the protocol with tid count > 0 for this segment.
414 * Note: there can only be one and this is already validated.
415 */
416 for (i = 0; i < MAX_CONN_TYPES; i++)
417 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
418 return &p_cfg->conn_cfg[i].tid_seg[seg];
419 return NULL;
420}
421
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300422static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300423{
424 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
425
426 p_mgr->srq_count = num_srqs;
427}
428
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300429static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300430{
431 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
432
433 return p_mgr->srq_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200434}
435
436/* set the iids count per protocol */
437static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
438 enum protocol_type type,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300439 u32 cid_count, u32 vf_cid_cnt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200440{
441 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
442 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
443
444 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300445 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300446
447 if (type == PROTOCOLID_ROCE) {
448 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
449 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
450 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
Ram Amranif3e48112017-03-14 15:25:58 +0200451 u32 align = elems_per_page * DQ_RANGE_ALIGN;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300452
Ram Amranif3e48112017-03-14 15:25:58 +0200453 p_conn->cid_count = roundup(p_conn->cid_count, align);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300454 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300455}
456
Yuval Mintz1a635e42016-08-15 10:42:43 +0300457u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
458 enum protocol_type type, u32 *vf_cid)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300459{
460 if (vf_cid)
461 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
462
463 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200464}
465
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300466u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
467 enum protocol_type type)
468{
469 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
470}
471
472u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
473 enum protocol_type type)
474{
475 u32 cnt = 0;
476 int i;
477
478 for (i = 0; i < TASK_SEGMENTS; i++)
479 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
480
481 return cnt;
482}
483
Yuval Mintz1a635e42016-08-15 10:42:43 +0300484static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
485 enum protocol_type proto,
486 u8 seg,
487 u8 seg_type, u32 count, bool has_fl)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300488{
489 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
490 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
491
492 p_seg->count = count;
493 p_seg->has_fl_mem = has_fl;
494 p_seg->type = seg_type;
495}
496
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200497static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
498 struct qed_ilt_cli_blk *p_blk,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300499 u32 start_line, u32 total_size, u32 elem_size)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200500{
501 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
502
503 /* verify thatits called only once for each block */
504 if (p_blk->total_size)
505 return;
506
507 p_blk->total_size = total_size;
508 p_blk->real_size_in_page = 0;
509 if (elem_size)
510 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
511 p_blk->start_line = start_line;
512}
513
514static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
515 struct qed_ilt_client_cfg *p_cli,
516 struct qed_ilt_cli_blk *p_blk,
517 u32 *p_line, enum ilt_clients client_id)
518{
519 if (!p_blk->total_size)
520 return;
521
522 if (!p_cli->active)
523 p_cli->first.val = *p_line;
524
525 p_cli->active = true;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300526 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200527 p_cli->last.val = *p_line - 1;
528
529 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
530 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
531 client_id, p_cli->first.val,
532 p_cli->last.val, p_blk->total_size,
533 p_blk->real_size_in_page, p_blk->start_line);
534}
535
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300536static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
537 enum ilt_clients ilt_client)
538{
539 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
540 struct qed_ilt_client_cfg *p_cli;
541 u32 lines_to_skip = 0;
542 u32 cxts_per_p;
543
544 if (ilt_client == ILT_CLI_CDUC) {
545 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
546
547 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
548 (u32) CONN_CXT_SIZE(p_hwfn);
549
550 lines_to_skip = cid_count / cxts_per_p;
551 }
552
553 return lines_to_skip;
554}
555
Ram Amranif9dc4d12017-04-03 12:21:13 +0300556static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
557 *p_cli)
558{
559 p_cli->active = false;
560 p_cli->first.val = 0;
561 p_cli->last.val = 0;
562 return p_cli;
563}
564
565static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
566{
567 p_blk->total_size = 0;
568 return p_blk;
569}
570
571int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200572{
573 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300574 u32 curr_line, total, i, task_size, line;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200575 struct qed_ilt_client_cfg *p_cli;
576 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300577 struct qed_cdu_iids cdu_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300578 struct qed_src_iids src_iids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200579 struct qed_qm_iids qm_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300580 struct qed_tm_iids tm_iids;
581 struct qed_tid_seg *p_seg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200582
583 memset(&qm_iids, 0, sizeof(qm_iids));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300584 memset(&cdu_iids, 0, sizeof(cdu_iids));
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300585 memset(&src_iids, 0, sizeof(src_iids));
586 memset(&tm_iids, 0, sizeof(tm_iids));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200587
588 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
589
590 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
591 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
592 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
593
594 /* CDUC */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300595 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
596
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200597 curr_line = p_mngr->pf_start_line;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300598
599 /* CDUC PF */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200600 p_cli->pf_total_lines = 0;
601
602 /* get the counters for the CDUC and QM clients */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300603 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200604
Ram Amranif9dc4d12017-04-03 12:21:13 +0300605 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200606
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300607 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200608
609 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
610 total, CONN_CXT_SIZE(p_hwfn));
611
612 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
613 p_cli->pf_total_lines = curr_line - p_blk->start_line;
614
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300615 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
616 ILT_CLI_CDUC);
617
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300618 /* CDUC VF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300619 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300620 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
621
622 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
623 total, CONN_CXT_SIZE(p_hwfn));
624
625 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
626 p_cli->vf_total_lines = curr_line - p_blk->start_line;
627
628 for (i = 1; i < p_mngr->vf_count; i++)
629 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
630 ILT_CLI_CDUC);
631
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300632 /* CDUT PF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300633 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300634 p_cli->first.val = curr_line;
635
636 /* first the 'working' task memory */
637 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
638 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
639 if (!p_seg || p_seg->count == 0)
640 continue;
641
Ram Amranif9dc4d12017-04-03 12:21:13 +0300642 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300643 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
644 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
645 p_mngr->task_type_size[p_seg->type]);
646
647 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
648 ILT_CLI_CDUT);
649 }
650
651 /* next the 'init' task memory (forced load memory) */
652 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
653 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
654 if (!p_seg || p_seg->count == 0)
655 continue;
656
Ram Amranif9dc4d12017-04-03 12:21:13 +0300657 p_blk =
658 qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300659
660 if (!p_seg->has_fl_mem) {
661 /* The segment is active (total size pf 'working'
662 * memory is > 0) but has no FL (forced-load, Init)
663 * memory. Thus:
664 *
665 * 1. The total-size in the corrsponding FL block of
666 * the ILT client is set to 0 - No ILT line are
667 * provisioned and no ILT memory allocated.
668 *
669 * 2. The start-line of said block is set to the
670 * start line of the matching working memory
671 * block in the ILT client. This is later used to
672 * configure the CDU segment offset registers and
673 * results in an FL command for TIDs of this
674 * segement behaves as regular load commands
675 * (loading TIDs from the working memory).
676 */
677 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
678
679 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
680 continue;
681 }
682 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
683
684 qed_ilt_cli_blk_fill(p_cli, p_blk,
685 curr_line, total,
686 p_mngr->task_type_size[p_seg->type]);
687
688 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
689 ILT_CLI_CDUT);
690 }
691 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
692
693 /* CDUT VF */
694 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
695 if (p_seg && p_seg->count) {
696 /* Stricly speaking we need to iterate over all VF
697 * task segment types, but a VF has only 1 segment
698 */
699
700 /* 'working' memory */
701 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
702
Ram Amranif9dc4d12017-04-03 12:21:13 +0300703 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300704 qed_ilt_cli_blk_fill(p_cli, p_blk,
705 curr_line, total,
706 p_mngr->task_type_size[p_seg->type]);
707
708 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
709 ILT_CLI_CDUT);
710
711 /* 'init' memory */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300712 p_blk =
713 qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300714 if (!p_seg->has_fl_mem) {
715 /* see comment above */
716 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
717 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
718 } else {
719 task_size = p_mngr->task_type_size[p_seg->type];
720 qed_ilt_cli_blk_fill(p_cli, p_blk,
721 curr_line, total, task_size);
722 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
723 ILT_CLI_CDUT);
724 }
725 p_cli->vf_total_lines = curr_line -
726 p_cli->vf_blks[0].start_line;
727
728 /* Now for the rest of the VFs */
729 for (i = 1; i < p_mngr->vf_count; i++) {
730 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
731 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
732 ILT_CLI_CDUT);
733
734 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
735 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
736 ILT_CLI_CDUT);
737 }
738 }
739
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200740 /* QM */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300741 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
742 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200743
744 qed_cxt_qm_iids(p_hwfn, &qm_iids);
Tomer Tayarda090912017-12-27 19:30:07 +0200745 total = qed_qm_pf_mem_size(qm_iids.cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300746 qm_iids.vf_cids, qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300747 p_hwfn->qm_info.num_pqs,
748 p_hwfn->qm_info.num_vf_pqs);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200749
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300750 DP_VERBOSE(p_hwfn,
751 QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300752 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300753 qm_iids.cids,
754 qm_iids.vf_cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300755 qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300756 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200757
758 qed_ilt_cli_blk_fill(p_cli, p_blk,
759 curr_line, total * 0x1000,
760 QM_PQ_ELEMENT_SIZE);
761
762 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
763 p_cli->pf_total_lines = curr_line - p_blk->start_line;
764
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300765 /* SRC */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300766 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300767 qed_cxt_src_iids(p_mngr, &src_iids);
768
769 /* Both the PF and VFs searcher connections are stored in the per PF
770 * database. Thus sum the PF searcher cids and all the VFs searcher
771 * cids.
772 */
773 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
774 if (total) {
775 u32 local_max = max_t(u32, total,
776 SRC_MIN_NUM_ELEMS);
777
778 total = roundup_pow_of_two(local_max);
779
Ram Amranif9dc4d12017-04-03 12:21:13 +0300780 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300781 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
782 total * sizeof(struct src_ent),
783 sizeof(struct src_ent));
784
785 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
786 ILT_CLI_SRC);
787 p_cli->pf_total_lines = curr_line - p_blk->start_line;
788 }
789
790 /* TM PF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300791 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
Michal Kalderon44531ba2017-04-03 12:21:10 +0300792 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300793 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
794 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300795 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300796 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
797 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
798
799 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
800 ILT_CLI_TM);
801 p_cli->pf_total_lines = curr_line - p_blk->start_line;
802 }
803
804 /* TM VF */
805 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
806 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300807 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300808 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
809 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
810
811 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
812 ILT_CLI_TM);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300813
Mintz, Yuval70566b42017-04-03 12:21:11 +0300814 p_cli->vf_total_lines = curr_line - p_blk->start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300815 for (i = 1; i < p_mngr->vf_count; i++)
816 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
817 ILT_CLI_TM);
818 }
819
820 /* TSDM (SRQ CONTEXT) */
821 total = qed_cxt_get_srq_count(p_hwfn);
822
823 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300824 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
825 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300826 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
827 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
828
829 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
830 ILT_CLI_TSDM);
831 p_cli->pf_total_lines = curr_line - p_blk->start_line;
832 }
833
Ram Amranif9dc4d12017-04-03 12:21:13 +0300834 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
835
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200836 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
Ram Amranif9dc4d12017-04-03 12:21:13 +0300837 RESC_NUM(p_hwfn, QED_ILT))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200838 return -EINVAL;
Ram Amranif9dc4d12017-04-03 12:21:13 +0300839
840 return 0;
841}
842
843u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
844{
845 struct qed_ilt_client_cfg *p_cli;
846 u32 excess_lines, available_lines;
847 struct qed_cxt_mngr *p_mngr;
848 u32 ilt_page_size, elem_size;
849 struct qed_tid_seg *p_seg;
850 int i;
851
852 available_lines = RESC_NUM(p_hwfn, QED_ILT);
853 excess_lines = used_lines - available_lines;
854
855 if (!excess_lines)
856 return 0;
857
Kalderon, Michalc851a9d2017-07-02 10:29:21 +0300858 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
Ram Amranif9dc4d12017-04-03 12:21:13 +0300859 return 0;
860
861 p_mngr = p_hwfn->p_cxt_mngr;
862 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
863 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
864
865 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
866 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
867 if (!p_seg || p_seg->count == 0)
868 continue;
869
870 elem_size = p_mngr->task_type_size[p_seg->type];
871 if (!elem_size)
872 continue;
873
874 return (ilt_page_size / elem_size) * excess_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200875 }
876
Ram Amranif9dc4d12017-04-03 12:21:13 +0300877 DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200878 return 0;
879}
880
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300881static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
882{
883 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
884 u32 i;
885
886 if (!p_mngr->t2)
887 return;
888
889 for (i = 0; i < p_mngr->t2_num_pages; i++)
890 if (p_mngr->t2[i].p_virt)
891 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
892 p_mngr->t2[i].size,
893 p_mngr->t2[i].p_virt,
894 p_mngr->t2[i].p_phys);
895
896 kfree(p_mngr->t2);
897 p_mngr->t2 = NULL;
898}
899
900static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
901{
902 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
903 u32 conn_num, total_size, ent_per_page, psz, i;
904 struct qed_ilt_client_cfg *p_src;
905 struct qed_src_iids src_iids;
906 struct qed_dma_mem *p_t2;
907 int rc;
908
909 memset(&src_iids, 0, sizeof(src_iids));
910
911 /* if the SRC ILT client is inactive - there are no connection
912 * requiring the searcer, leave.
913 */
914 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
915 if (!p_src->active)
916 return 0;
917
918 qed_cxt_src_iids(p_mngr, &src_iids);
919 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
920 total_size = conn_num * sizeof(struct src_ent);
921
922 /* use the same page size as the SRC ILT client */
923 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
924 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
925
926 /* allocate t2 */
Joe Perches2591c282016-09-04 14:24:03 -0700927 p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300928 GFP_KERNEL);
929 if (!p_mngr->t2) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300930 rc = -ENOMEM;
931 goto t2_fail;
932 }
933
934 /* allocate t2 pages */
935 for (i = 0; i < p_mngr->t2_num_pages; i++) {
936 u32 size = min_t(u32, total_size, psz);
937 void **p_virt = &p_mngr->t2[i].p_virt;
938
939 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
940 size,
941 &p_mngr->t2[i].p_phys, GFP_KERNEL);
942 if (!p_mngr->t2[i].p_virt) {
943 rc = -ENOMEM;
944 goto t2_fail;
945 }
946 memset(*p_virt, 0, size);
947 p_mngr->t2[i].size = size;
948 total_size -= size;
949 }
950
951 /* Set the t2 pointers */
952
953 /* entries per page - must be a power of two */
954 ent_per_page = psz / sizeof(struct src_ent);
955
956 p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
957
958 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
959 p_mngr->last_free = (u64) p_t2->p_phys +
960 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
961
962 for (i = 0; i < p_mngr->t2_num_pages; i++) {
963 u32 ent_num = min_t(u32,
964 ent_per_page,
965 conn_num);
966 struct src_ent *entries = p_mngr->t2[i].p_virt;
967 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
968 u32 j;
969
970 for (j = 0; j < ent_num - 1; j++) {
971 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
972 entries[j].next = cpu_to_be64(val);
973 }
974
975 if (i < p_mngr->t2_num_pages - 1)
976 val = (u64) p_mngr->t2[i + 1].p_phys;
977 else
978 val = 0;
979 entries[j].next = cpu_to_be64(val);
980
Dan Carpenter01e517f2016-06-07 15:04:16 +0300981 conn_num -= ent_num;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300982 }
983
984 return 0;
985
986t2_fail:
987 qed_cxt_src_t2_free(p_hwfn);
988 return rc;
989}
990
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200991#define for_each_ilt_valid_client(pos, clients) \
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300992 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
993 if (!clients[pos].active) { \
994 continue; \
995 } else \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200996
997/* Total number of ILT lines used by this PF */
998static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
999{
1000 u32 size = 0;
1001 u32 i;
1002
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001003 for_each_ilt_valid_client(i, ilt_clients)
1004 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001005
1006 return size;
1007}
1008
1009static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
1010{
1011 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
1012 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1013 u32 ilt_size, i;
1014
1015 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
1016
1017 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
1018 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
1019
1020 if (p_dma->p_virt)
1021 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1022 p_dma->size, p_dma->p_virt,
1023 p_dma->p_phys);
1024 p_dma->p_virt = NULL;
1025 }
1026 kfree(p_mngr->ilt_shadow);
1027}
1028
1029static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1030 struct qed_ilt_cli_blk *p_blk,
1031 enum ilt_clients ilt_client,
1032 u32 start_line_offset)
1033{
1034 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001035 u32 lines, line, sz_left, lines_to_skip = 0;
1036
1037 /* Special handling for RoCE that supports dynamic allocation */
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001038 if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001039 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
1040 return 0;
1041
1042 lines_to_skip = p_blk->dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001043
1044 if (!p_blk->total_size)
1045 return 0;
1046
1047 sz_left = p_blk->total_size;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001048 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001049 line = p_blk->start_line + start_line_offset -
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001050 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001051
1052 for (; lines; lines--) {
1053 dma_addr_t p_phys;
1054 void *p_virt;
1055 u32 size;
1056
Yuval Mintz1a635e42016-08-15 10:42:43 +03001057 size = min_t(u32, sz_left, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001058 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001059 size, &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001060 if (!p_virt)
1061 return -ENOMEM;
1062 memset(p_virt, 0, size);
1063
1064 ilt_shadow[line].p_phys = p_phys;
1065 ilt_shadow[line].p_virt = p_virt;
1066 ilt_shadow[line].size = size;
1067
1068 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1069 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1070 line, (u64)p_phys, p_virt, size);
1071
1072 sz_left -= size;
1073 line++;
1074 }
1075
1076 return 0;
1077}
1078
1079static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1080{
1081 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1082 struct qed_ilt_client_cfg *clients = p_mngr->clients;
1083 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001084 u32 size, i, j, k;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001085 int rc;
1086
1087 size = qed_cxt_ilt_shadow_size(clients);
1088 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1089 GFP_KERNEL);
1090 if (!p_mngr->ilt_shadow) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001091 rc = -ENOMEM;
1092 goto ilt_shadow_fail;
1093 }
1094
1095 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1096 "Allocated 0x%x bytes for ilt shadow\n",
1097 (u32)(size * sizeof(struct qed_dma_mem)));
1098
1099 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001100 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1101 p_blk = &clients[i].pf_blks[j];
1102 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001103 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001104 goto ilt_shadow_fail;
1105 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001106 for (k = 0; k < p_mngr->vf_count; k++) {
1107 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1108 u32 lines = clients[i].vf_total_lines * k;
1109
1110 p_blk = &clients[i].vf_blks[j];
1111 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001112 if (rc)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001113 goto ilt_shadow_fail;
1114 }
1115 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001116 }
1117
1118 return 0;
1119
1120ilt_shadow_fail:
1121 qed_ilt_shadow_free(p_hwfn);
1122 return rc;
1123}
1124
1125static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1126{
1127 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001128 u32 type, vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001129
1130 for (type = 0; type < MAX_CONN_TYPES; type++) {
1131 kfree(p_mngr->acquired[type].cid_map);
1132 p_mngr->acquired[type].max_count = 0;
1133 p_mngr->acquired[type].start_cid = 0;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001134
1135 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1136 kfree(p_mngr->acquired_vf[type][vf].cid_map);
1137 p_mngr->acquired_vf[type][vf].max_count = 0;
1138 p_mngr->acquired_vf[type][vf].start_cid = 0;
1139 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001140 }
1141}
1142
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001143static int
1144qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
1145 u32 type,
1146 u32 cid_start,
1147 u32 cid_count, struct qed_cid_acquired_map *p_map)
1148{
1149 u32 size;
1150
1151 if (!cid_count)
1152 return 0;
1153
1154 size = DIV_ROUND_UP(cid_count,
1155 sizeof(unsigned long) * BITS_PER_BYTE) *
1156 sizeof(unsigned long);
1157 p_map->cid_map = kzalloc(size, GFP_KERNEL);
1158 if (!p_map->cid_map)
1159 return -ENOMEM;
1160
1161 p_map->max_count = cid_count;
1162 p_map->start_cid = cid_start;
1163
1164 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1165 "Type %08x start: %08x count %08x\n",
1166 type, p_map->start_cid, p_map->max_count);
1167
1168 return 0;
1169}
1170
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001171static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1172{
1173 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001174 u32 start_cid = 0, vf_start_cid = 0;
1175 u32 type, vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001176
1177 for (type = 0; type < MAX_CONN_TYPES; type++) {
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001178 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1179 struct qed_cid_acquired_map *p_map;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001180
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001181 /* Handle PF maps */
1182 p_map = &p_mngr->acquired[type];
1183 if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
1184 p_cfg->cid_count, p_map))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001185 goto cid_map_fail;
1186
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001187 /* Handle VF maps */
1188 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1189 p_map = &p_mngr->acquired_vf[type][vf];
1190 if (qed_cid_map_alloc_single(p_hwfn, type,
1191 vf_start_cid,
1192 p_cfg->cids_per_vf, p_map))
1193 goto cid_map_fail;
1194 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001195
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001196 start_cid += p_cfg->cid_count;
1197 vf_start_cid += p_cfg->cids_per_vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001198 }
1199
1200 return 0;
1201
1202cid_map_fail:
1203 qed_cid_map_free(p_hwfn);
1204 return -ENOMEM;
1205}
1206
1207int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1208{
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001209 struct qed_ilt_client_cfg *clients;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001210 struct qed_cxt_mngr *p_mngr;
1211 u32 i;
1212
Yuval Mintz60fffb32016-02-21 11:40:07 +02001213 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -07001214 if (!p_mngr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001215 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001216
1217 /* Initialize ILT client registers */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001218 clients = p_mngr->clients;
1219 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1220 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1221 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001222
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001223 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1224 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1225 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001226
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001227 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1228 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1229 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1230
1231 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1232 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1233 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1234
1235 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1236 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1237 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1238
1239 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1240 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1241 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001242 /* default ILT page size for all clients is 64K */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001243 for (i = 0; i < ILT_CLI_MAX; i++)
1244 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1245
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001246 /* Initialize task sizes */
1247 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1248 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1249
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001250 if (p_hwfn->cdev->p_iov_info)
1251 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001252 /* Initialize the dynamic ILT allocation mutex */
1253 mutex_init(&p_mngr->mutex);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001254
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001255 /* Set the cxt mangr pointer priori to further allocations */
1256 p_hwfn->p_cxt_mngr = p_mngr;
1257
1258 return 0;
1259}
1260
1261int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1262{
1263 int rc;
1264
1265 /* Allocate the ILT shadow table */
1266 rc = qed_ilt_shadow_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001267 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001268 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001269
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001270 /* Allocate the T2 table */
1271 rc = qed_cxt_src_t2_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001272 if (rc)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001273 goto tables_alloc_fail;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001274
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001275 /* Allocate and initialize the acquired cids bitmaps */
1276 rc = qed_cid_map_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001277 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001278 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001279
1280 return 0;
1281
1282tables_alloc_fail:
1283 qed_cxt_mngr_free(p_hwfn);
1284 return rc;
1285}
1286
1287void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1288{
1289 if (!p_hwfn->p_cxt_mngr)
1290 return;
1291
1292 qed_cid_map_free(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001293 qed_cxt_src_t2_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001294 qed_ilt_shadow_free(p_hwfn);
1295 kfree(p_hwfn->p_cxt_mngr);
1296
1297 p_hwfn->p_cxt_mngr = NULL;
1298}
1299
1300void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1301{
1302 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001303 struct qed_cid_acquired_map *p_map;
1304 struct qed_conn_type_cfg *p_cfg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001305 int type;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001306 u32 len;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001307
1308 /* Reset acquired cids */
1309 for (type = 0; type < MAX_CONN_TYPES; type++) {
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001310 u32 vf;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001311
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001312 p_cfg = &p_mngr->conn_cfg[type];
1313 if (p_cfg->cid_count) {
1314 p_map = &p_mngr->acquired[type];
1315 len = DIV_ROUND_UP(p_map->max_count,
1316 sizeof(unsigned long) *
1317 BITS_PER_BYTE) *
1318 sizeof(unsigned long);
1319 memset(p_map->cid_map, 0, len);
1320 }
1321
1322 if (!p_cfg->cids_per_vf)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001323 continue;
1324
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001325 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1326 p_map = &p_mngr->acquired_vf[type][vf];
1327 len = DIV_ROUND_UP(p_map->max_count,
1328 sizeof(unsigned long) *
1329 BITS_PER_BYTE) *
1330 sizeof(unsigned long);
1331 memset(p_map->cid_map, 0, len);
1332 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001333 }
1334}
1335
1336/* CDU Common */
1337#define CDUC_CXT_SIZE_SHIFT \
1338 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1339
1340#define CDUC_CXT_SIZE_MASK \
1341 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1342
1343#define CDUC_BLOCK_WASTE_SHIFT \
1344 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1345
1346#define CDUC_BLOCK_WASTE_MASK \
1347 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1348
1349#define CDUC_NCIB_SHIFT \
1350 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1351
1352#define CDUC_NCIB_MASK \
1353 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1354
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001355#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1356 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1357
1358#define CDUT_TYPE0_CXT_SIZE_MASK \
1359 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1360 CDUT_TYPE0_CXT_SIZE_SHIFT)
1361
1362#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1363 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1364
1365#define CDUT_TYPE0_BLOCK_WASTE_MASK \
1366 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1367 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1368
1369#define CDUT_TYPE0_NCIB_SHIFT \
1370 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1371
1372#define CDUT_TYPE0_NCIB_MASK \
1373 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1374 CDUT_TYPE0_NCIB_SHIFT)
1375
1376#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1377 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1378
1379#define CDUT_TYPE1_CXT_SIZE_MASK \
1380 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1381 CDUT_TYPE1_CXT_SIZE_SHIFT)
1382
1383#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1384 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1385
1386#define CDUT_TYPE1_BLOCK_WASTE_MASK \
1387 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1388 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1389
1390#define CDUT_TYPE1_NCIB_SHIFT \
1391 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1392
1393#define CDUT_TYPE1_NCIB_MASK \
1394 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1395 CDUT_TYPE1_NCIB_SHIFT)
1396
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001397static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1398{
1399 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1400
1401 /* CDUC - connection configuration */
1402 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1403 cxt_size = CONN_CXT_SIZE(p_hwfn);
1404 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1405 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1406
1407 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1408 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1409 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1410 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001411
1412 /* CDUT - type-0 tasks configuration */
1413 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1414 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1415 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1416 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1417
1418 /* cxt size and block-waste are multipes of 8 */
1419 cdu_params = 0;
1420 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1421 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1422 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1423 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1424
1425 /* CDUT - type-1 tasks configuration */
1426 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1427 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1428 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1429
1430 /* cxt size and block-waste are multipes of 8 */
1431 cdu_params = 0;
1432 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1433 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1434 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1435 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1436}
1437
1438/* CDU PF */
1439#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1440#define CDU_SEG_REG_TYPE_MASK 0x1
1441#define CDU_SEG_REG_OFFSET_SHIFT 0
1442#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1443
1444static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1445{
1446 struct qed_ilt_client_cfg *p_cli;
1447 struct qed_tid_seg *p_seg;
1448 u32 cdu_seg_params, offset;
1449 int i;
1450
1451 static const u32 rt_type_offset_arr[] = {
1452 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1453 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1454 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1455 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1456 };
1457
1458 static const u32 rt_type_offset_fl_arr[] = {
1459 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1460 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1461 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1462 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1463 };
1464
1465 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1466
1467 /* There are initializations only for CDUT during pf Phase */
1468 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1469 /* Segment 0 */
1470 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1471 if (!p_seg)
1472 continue;
1473
1474 /* Note: start_line is already adjusted for the CDU
1475 * segment register granularity, so we just need to
1476 * divide. Adjustment is implicit as we assume ILT
1477 * Page size is larger than 32K!
1478 */
1479 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1480 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1481 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1482
1483 cdu_seg_params = 0;
1484 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1485 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1486 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1487
1488 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1489 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1490 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1491
1492 cdu_seg_params = 0;
1493 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1494 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1495 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1496 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001497}
1498
Tomer Tayarda090912017-12-27 19:30:07 +02001499void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
1500 struct qed_ptt *p_ptt, bool is_pf_loading)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001501{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001502 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
Tomer Tayarda090912017-12-27 19:30:07 +02001503 struct qed_qm_pf_rt_init_params params;
1504 struct qed_mcp_link_state *p_link;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001505 struct qed_qm_iids iids;
1506
1507 memset(&iids, 0, sizeof(iids));
1508 qed_cxt_qm_iids(p_hwfn, &iids);
1509
Tomer Tayarda090912017-12-27 19:30:07 +02001510 p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
1511
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001512 memset(&params, 0, sizeof(params));
1513 params.port_id = p_hwfn->port_id;
1514 params.pf_id = p_hwfn->rel_pf_id;
1515 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
Tomer Tayarda090912017-12-27 19:30:07 +02001516 params.is_pf_loading = is_pf_loading;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001517 params.num_pf_cids = iids.cids;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001518 params.num_vf_cids = iids.vf_cids;
Mintz, Yuvalc9f0523b2017-05-09 15:07:49 +03001519 params.num_tids = iids.tids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001520 params.start_pq = qm_info->start_pq;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001521 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1522 params.num_vf_pqs = qm_info->num_vf_pqs;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001523 params.start_vport = qm_info->start_vport;
1524 params.num_vports = qm_info->num_vports;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001525 params.pf_wfq = qm_info->pf_wfq;
1526 params.pf_rl = qm_info->pf_rl;
Tomer Tayarda090912017-12-27 19:30:07 +02001527 params.link_speed = p_link->speed;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001528 params.pq_params = qm_info->qm_pq_params;
1529 params.vport_params = qm_info->qm_vport_params;
1530
Rahul Verma15582962017-04-06 15:58:29 +03001531 qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001532}
1533
1534/* CM PF */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001535void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001536{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001537 /* XCM pure-LB queue */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001538 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1539 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001540}
1541
1542/* DQ PF */
1543static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1544{
1545 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001546 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001547
1548 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1549 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1550
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001551 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1552 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1553
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001554 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1555 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1556
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001557 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1558 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1559
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001560 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1561 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1562
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001563 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1564 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1565
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001566 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1567 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1568
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001569 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1570 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1571
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001572 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1573 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1574
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001575 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1576 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1577
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001578 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1579 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001580
1581 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1582 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1583
1584 /* Connection types 6 & 7 are not in use, yet they must be configured
1585 * as the highest possible connection. Not configuring them means the
1586 * defaults will be used, and with a large number of cids a bug may
1587 * occur, if the defaults will be smaller than dq_pf_max_cid /
1588 * dq_vf_max_cid.
1589 */
1590 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1591 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1592
1593 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1594 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001595}
1596
1597static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1598{
1599 struct qed_ilt_client_cfg *ilt_clients;
1600 int i;
1601
1602 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1603 for_each_ilt_valid_client(i, ilt_clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001604 STORE_RT_REG(p_hwfn,
1605 ilt_clients[i].first.reg,
1606 ilt_clients[i].first.val);
1607 STORE_RT_REG(p_hwfn,
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001608 ilt_clients[i].last.reg, ilt_clients[i].last.val);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001609 STORE_RT_REG(p_hwfn,
1610 ilt_clients[i].p_size.reg,
1611 ilt_clients[i].p_size.val);
1612 }
1613}
1614
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001615static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1616{
1617 struct qed_ilt_client_cfg *p_cli;
1618 u32 blk_factor;
1619
1620 /* For simplicty we set the 'block' to be an ILT page */
1621 if (p_hwfn->cdev->p_iov_info) {
1622 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1623
1624 STORE_RT_REG(p_hwfn,
1625 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1626 p_iov->first_vf_in_pf);
1627 STORE_RT_REG(p_hwfn,
1628 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1629 p_iov->first_vf_in_pf + p_iov->total_vfs);
1630 }
1631
1632 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1633 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1634 if (p_cli->active) {
1635 STORE_RT_REG(p_hwfn,
1636 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1637 blk_factor);
1638 STORE_RT_REG(p_hwfn,
1639 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1640 p_cli->pf_total_lines);
1641 STORE_RT_REG(p_hwfn,
1642 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1643 p_cli->vf_total_lines);
1644 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001645
1646 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1647 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1648 if (p_cli->active) {
1649 STORE_RT_REG(p_hwfn,
1650 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1651 blk_factor);
1652 STORE_RT_REG(p_hwfn,
1653 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1654 p_cli->pf_total_lines);
1655 STORE_RT_REG(p_hwfn,
1656 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1657 p_cli->vf_total_lines);
1658 }
1659
1660 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1661 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1662 if (p_cli->active) {
1663 STORE_RT_REG(p_hwfn,
1664 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1665 STORE_RT_REG(p_hwfn,
1666 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1667 p_cli->pf_total_lines);
1668 STORE_RT_REG(p_hwfn,
1669 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1670 p_cli->vf_total_lines);
1671 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001672}
1673
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001674/* ILT (PSWRQ2) PF */
1675static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1676{
1677 struct qed_ilt_client_cfg *clients;
1678 struct qed_cxt_mngr *p_mngr;
1679 struct qed_dma_mem *p_shdw;
1680 u32 line, rt_offst, i;
1681
1682 qed_ilt_bounds_init(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001683 qed_ilt_vf_bounds_init(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001684
1685 p_mngr = p_hwfn->p_cxt_mngr;
1686 p_shdw = p_mngr->ilt_shadow;
1687 clients = p_hwfn->p_cxt_mngr->clients;
1688
1689 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001690 /** Client's 1st val and RT array are absolute, ILT shadows'
1691 * lines are relative.
1692 */
1693 line = clients[i].first.val - p_mngr->pf_start_line;
1694 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1695 clients[i].first.val * ILT_ENTRY_IN_REGS;
1696
1697 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1698 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1699 u64 ilt_hw_entry = 0;
1700
1701 /** p_virt could be NULL incase of dynamic
1702 * allocation
1703 */
1704 if (p_shdw[line].p_virt) {
1705 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1706 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1707 (p_shdw[line].p_phys >> 12));
1708
1709 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1710 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1711 rt_offst, line, i,
1712 (u64)(p_shdw[line].p_phys >> 12));
1713 }
1714
1715 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1716 }
1717 }
1718}
1719
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001720/* SRC (Searcher) PF */
1721static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1722{
1723 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1724 u32 rounded_conn_num, conn_num, conn_max;
1725 struct qed_src_iids src_iids;
1726
1727 memset(&src_iids, 0, sizeof(src_iids));
1728 qed_cxt_src_iids(p_mngr, &src_iids);
1729 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1730 if (!conn_num)
1731 return;
1732
1733 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1734 rounded_conn_num = roundup_pow_of_two(conn_max);
1735
1736 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1737 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1738 ilog2(rounded_conn_num));
1739
1740 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1741 p_hwfn->p_cxt_mngr->first_free);
1742 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1743 p_hwfn->p_cxt_mngr->last_free);
1744}
1745
1746/* Timers PF */
1747#define TM_CFG_NUM_IDS_SHIFT 0
1748#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1749#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1750#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1751#define TM_CFG_PARENT_PF_SHIFT 25
1752#define TM_CFG_PARENT_PF_MASK 0x7ULL
1753
1754#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1755#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1756
1757#define TM_CFG_TID_OFFSET_SHIFT 30
1758#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1759#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1760#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1761
1762static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1763{
1764 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1765 u32 active_seg_mask = 0, tm_offset, rt_reg;
1766 struct qed_tm_iids tm_iids;
1767 u64 cfg_word;
1768 u8 i;
1769
1770 memset(&tm_iids, 0, sizeof(tm_iids));
Michal Kalderon44531ba2017-04-03 12:21:10 +03001771 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001772
1773 /* @@@TBD No pre-scan for now */
1774
1775 /* Note: We assume consecutive VFs for a PF */
1776 for (i = 0; i < p_mngr->vf_count; i++) {
1777 cfg_word = 0;
1778 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1779 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1780 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1781 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1782 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1783 (sizeof(cfg_word) / sizeof(u32)) *
1784 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1785 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1786 }
1787
1788 cfg_word = 0;
1789 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1790 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1791 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1792 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1793
1794 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1795 (sizeof(cfg_word) / sizeof(u32)) *
1796 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1797 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1798
1799 /* enale scan */
1800 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1801 tm_iids.pf_cids ? 0x1 : 0x0);
1802
1803 /* @@@TBD how to enable the scan for the VFs */
1804
1805 tm_offset = tm_iids.per_vf_cids;
1806
1807 /* Note: We assume consecutive VFs for a PF */
1808 for (i = 0; i < p_mngr->vf_count; i++) {
1809 cfg_word = 0;
1810 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1811 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1812 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1813 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1814 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1815
1816 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1817 (sizeof(cfg_word) / sizeof(u32)) *
1818 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1819
1820 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1821 }
1822
1823 tm_offset = tm_iids.pf_cids;
1824 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1825 cfg_word = 0;
1826 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1827 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1828 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1829 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1830 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1831
1832 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1833 (sizeof(cfg_word) / sizeof(u32)) *
1834 (NUM_OF_VFS(p_hwfn->cdev) +
1835 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1836
1837 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001838 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001839
1840 tm_offset += tm_iids.pf_tids[i];
1841 }
1842
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001843 if (QED_IS_RDMA_PERSONALITY(p_hwfn))
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001844 active_seg_mask = 0;
1845
1846 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1847
1848 /* @@@TBD how to enable the scan for the VFs */
1849}
1850
Arun Easi1e128c82017-02-15 06:28:22 -08001851static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1852{
1853 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1854 p_hwfn->pf_params.fcoe_pf_params.is_target)
1855 STORE_RT_REG(p_hwfn,
1856 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1857}
1858
1859static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1860{
1861 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1862 struct qed_conn_type_cfg *p_fcoe;
1863 struct qed_tid_seg *p_tid;
1864
1865 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1866
1867 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1868 if (!p_fcoe->cid_count)
1869 return;
1870
1871 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1872 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1873 STORE_RT_REG_AGG(p_hwfn,
1874 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1875 p_tid->count);
1876 } else {
1877 STORE_RT_REG_AGG(p_hwfn,
1878 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1879 p_tid->count);
1880 }
1881}
1882
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001883void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1884{
1885 qed_cdu_init_common(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001886 qed_prs_init_common(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001887}
1888
Rahul Verma15582962017-04-06 15:58:29 +03001889void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001890{
Tomer Tayarda090912017-12-27 19:30:07 +02001891 qed_qm_init_pf(p_hwfn, p_ptt, true);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001892 qed_cm_init_pf(p_hwfn);
1893 qed_dq_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001894 qed_cdu_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001895 qed_ilt_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001896 qed_src_init_pf(p_hwfn);
1897 qed_tm_init_pf(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001898 qed_prs_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001899}
1900
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001901int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1902 enum protocol_type type, u32 *p_cid, u8 vfid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001903{
1904 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001905 struct qed_cid_acquired_map *p_map;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001906 u32 rel_cid;
1907
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001908 if (type >= MAX_CONN_TYPES) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001909 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1910 return -EINVAL;
1911 }
1912
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001913 if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
1914 DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
1915 return -EINVAL;
1916 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001917
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001918 /* Determine the right map to take this CID from */
1919 if (vfid == QED_CXT_PF_CID)
1920 p_map = &p_mngr->acquired[type];
1921 else
1922 p_map = &p_mngr->acquired_vf[type][vfid];
1923
1924 if (!p_map->cid_map) {
1925 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1926 return -EINVAL;
1927 }
1928
1929 rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
1930
1931 if (rel_cid >= p_map->max_count) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001932 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001933 return -EINVAL;
1934 }
1935
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001936 __set_bit(rel_cid, p_map->cid_map);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001937
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001938 *p_cid = rel_cid + p_map->start_cid;
1939
1940 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1941 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1942 *p_cid, rel_cid, vfid, type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001943
1944 return 0;
1945}
1946
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001947int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1948 enum protocol_type type, u32 *p_cid)
1949{
1950 return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
1951}
1952
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001953static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001954 u32 cid,
1955 u8 vfid,
1956 enum protocol_type *p_type,
1957 struct qed_cid_acquired_map **pp_map)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001958{
1959 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001960 u32 rel_cid;
1961
1962 /* Iterate over protocols and find matching cid range */
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001963 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1964 if (vfid == QED_CXT_PF_CID)
1965 *pp_map = &p_mngr->acquired[*p_type];
1966 else
1967 *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001968
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001969 if (!((*pp_map)->cid_map))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001970 continue;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001971 if (cid >= (*pp_map)->start_cid &&
1972 cid < (*pp_map)->start_cid + (*pp_map)->max_count)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001973 break;
1974 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001975
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001976 if (*p_type == MAX_CONN_TYPES) {
1977 DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
1978 goto fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001979 }
1980
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001981 rel_cid = cid - (*pp_map)->start_cid;
1982 if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
1983 DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
1984 cid, vfid);
1985 goto fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001986 }
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001987
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001988 return true;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001989fail:
1990 *p_type = MAX_CONN_TYPES;
1991 *pp_map = NULL;
1992 return false;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001993}
1994
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001995void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001996{
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03001997 struct qed_cid_acquired_map *p_map = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001998 enum protocol_type type;
1999 bool b_acquired;
2000 u32 rel_cid;
2001
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002002 if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
2003 DP_NOTICE(p_hwfn,
2004 "Trying to return incorrect CID belonging to VF %02x\n",
2005 vfid);
2006 return;
2007 }
2008
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002009 /* Test acquired and find matching per-protocol map */
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002010 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
2011 &type, &p_map);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002012
2013 if (!b_acquired)
2014 return;
2015
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002016 rel_cid = cid - p_map->start_cid;
2017 clear_bit(rel_cid, p_map->cid_map);
2018
2019 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
2020 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
2021 cid, rel_cid, vfid, type);
2022}
2023
2024void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
2025{
2026 _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002027}
2028
Yuval Mintz1a635e42016-08-15 10:42:43 +03002029int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002030{
2031 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002032 struct qed_cid_acquired_map *p_map = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002033 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
2034 enum protocol_type type;
2035 bool b_acquired;
2036
2037 /* Test acquired and find matching per-protocol map */
Mintz, Yuval6bea61d2017-06-04 13:30:59 +03002038 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
2039 QED_CXT_PF_CID, &type, &p_map);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002040
2041 if (!b_acquired)
2042 return -EINVAL;
2043
2044 /* set the protocl type */
2045 p_info->type = type;
2046
2047 /* compute context virtual pointer */
2048 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
2049
2050 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
2051 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
2052 line = p_info->iid / cxts_per_p;
2053
2054 /* Make sure context is allocated (dynamic allocation) */
2055 if (!p_mngr->ilt_shadow[line].p_virt)
2056 return -EINVAL;
2057
2058 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
2059 p_info->iid % cxts_per_p * conn_cxt_size;
2060
2061 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
2062 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
2063 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
2064
2065 return 0;
2066}
2067
Yuval Mintz8c93bea2016-10-13 22:57:03 +03002068static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
Ram Amranif9dc4d12017-04-03 12:21:13 +03002069 struct qed_rdma_pf_params *p_params,
2070 u32 num_tasks)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002071{
Ram Amranif9dc4d12017-04-03 12:21:13 +03002072 u32 num_cons, num_qps, num_srqs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002073 enum protocol_type proto;
2074
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002075 num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
2076
Michal Kalderone0a8f9d2017-09-24 12:09:42 +03002077 if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
2078 DP_NOTICE(p_hwfn,
2079 "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
2080 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
2081 }
2082
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002083 switch (p_hwfn->hw_info.personality) {
Kalderon, Michal5d7dc962017-07-02 10:29:31 +03002084 case QED_PCI_ETH_IWARP:
2085 /* Each QP requires one connection */
2086 num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
2087 proto = PROTOCOLID_IWARP;
2088 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002089 case QED_PCI_ETH_ROCE:
2090 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
2091 num_cons = num_qps * 2; /* each QP requires two connections */
2092 proto = PROTOCOLID_ROCE;
2093 break;
2094 default:
2095 return;
2096 }
2097
2098 if (num_cons && num_tasks) {
2099 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
2100
2101 /* Deliberatly passing ROCE for tasks id. This is because
2102 * iWARP / RoCE share the task id.
2103 */
2104 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2105 QED_CXT_ROCE_TID_SEG, 1,
2106 num_tasks, false);
2107 qed_cxt_set_srq_count(p_hwfn, num_srqs);
2108 } else {
2109 DP_INFO(p_hwfn->cdev,
2110 "RDMA personality used without setting params!\n");
2111 }
2112}
2113
Ram Amranif9dc4d12017-04-03 12:21:13 +03002114int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002115{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002116 /* Set the number of required CORE connections */
2117 u32 core_cids = 1; /* SPQ */
2118
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002119 if (p_hwfn->using_ll2)
2120 core_cids += 4;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002121 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002122
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002123 switch (p_hwfn->hw_info.personality) {
Kalderon, Michal5d7dc962017-07-02 10:29:31 +03002124 case QED_PCI_ETH_RDMA:
2125 case QED_PCI_ETH_IWARP:
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002126 case QED_PCI_ETH_ROCE:
2127 {
Ram Amranif9dc4d12017-04-03 12:21:13 +03002128 qed_rdma_set_pf_params(p_hwfn,
2129 &p_hwfn->
2130 pf_params.rdma_pf_params,
2131 rdma_tasks);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002132 /* no need for break since RoCE coexist with Ethernet */
2133 }
2134 case QED_PCI_ETH:
2135 {
2136 struct qed_eth_pf_params *p_params =
2137 &p_hwfn->pf_params.eth_pf_params;
2138
Mintz, Yuval08bc8f12017-06-04 13:31:06 +03002139 if (!p_params->num_vf_cons)
2140 p_params->num_vf_cons =
2141 ETH_PF_PARAMS_VF_CONS_DEFAULT;
2142 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2143 p_params->num_cons,
2144 p_params->num_vf_cons);
Chopra, Manishd51e4af2017-04-13 04:54:44 -07002145 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002146 break;
2147 }
Arun Easi1e128c82017-02-15 06:28:22 -08002148 case QED_PCI_FCOE:
2149 {
2150 struct qed_fcoe_pf_params *p_params;
2151
2152 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2153
2154 if (p_params->num_cons && p_params->num_tasks) {
2155 qed_cxt_set_proto_cid_count(p_hwfn,
2156 PROTOCOLID_FCOE,
2157 p_params->num_cons,
2158 0);
2159
2160 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2161 QED_CXT_FCOE_TID_SEG, 0,
2162 p_params->num_tasks, true);
2163 } else {
2164 DP_INFO(p_hwfn->cdev,
2165 "Fcoe personality used without setting params!\n");
2166 }
2167 break;
2168 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002169 case QED_PCI_ISCSI:
2170 {
2171 struct qed_iscsi_pf_params *p_params;
2172
2173 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2174
2175 if (p_params->num_cons && p_params->num_tasks) {
2176 qed_cxt_set_proto_cid_count(p_hwfn,
2177 PROTOCOLID_ISCSI,
2178 p_params->num_cons,
2179 0);
2180
2181 qed_cxt_set_proto_tid_count(p_hwfn,
2182 PROTOCOLID_ISCSI,
2183 QED_CXT_ISCSI_TID_SEG,
2184 0,
2185 p_params->num_tasks,
2186 true);
2187 } else {
2188 DP_INFO(p_hwfn->cdev,
2189 "Iscsi personality used without setting params!\n");
2190 }
2191 break;
2192 }
2193 default:
2194 return -EINVAL;
2195 }
2196
2197 return 0;
2198}
2199
2200int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2201 struct qed_tid_mem *p_info)
2202{
2203 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2204 u32 proto, seg, total_lines, i, shadow_line;
2205 struct qed_ilt_client_cfg *p_cli;
2206 struct qed_ilt_cli_blk *p_fl_seg;
2207 struct qed_tid_seg *p_seg_info;
2208
2209 /* Verify the personality */
2210 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002211 case QED_PCI_FCOE:
2212 proto = PROTOCOLID_FCOE;
2213 seg = QED_CXT_FCOE_TID_SEG;
2214 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002215 case QED_PCI_ISCSI:
2216 proto = PROTOCOLID_ISCSI;
2217 seg = QED_CXT_ISCSI_TID_SEG;
2218 break;
2219 default:
2220 return -EINVAL;
2221 }
2222
2223 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2224 if (!p_cli->active)
2225 return -EINVAL;
2226
2227 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2228 if (!p_seg_info->has_fl_mem)
2229 return -EINVAL;
2230
2231 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2232 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2233 p_fl_seg->real_size_in_page);
2234
2235 for (i = 0; i < total_lines; i++) {
2236 shadow_line = i + p_fl_seg->start_line -
2237 p_hwfn->p_cxt_mngr->pf_start_line;
2238 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2239 }
2240 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2241 p_fl_seg->real_size_in_page;
2242 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2243 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2244 p_info->tid_size;
2245
2246 return 0;
2247}
2248
2249/* This function is very RoCE oriented, if another protocol in the future
2250 * will want this feature we'll need to modify the function to be more generic
2251 */
2252int
2253qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2254 enum qed_cxt_elem_type elem_type, u32 iid)
2255{
2256 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2257 struct qed_ilt_client_cfg *p_cli;
2258 struct qed_ilt_cli_blk *p_blk;
2259 struct qed_ptt *p_ptt;
2260 dma_addr_t p_phys;
2261 u64 ilt_hw_entry;
2262 void *p_virt;
2263 int rc = 0;
2264
2265 switch (elem_type) {
2266 case QED_ELEM_CXT:
2267 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2268 elem_size = CONN_CXT_SIZE(p_hwfn);
2269 p_blk = &p_cli->pf_blks[CDUC_BLK];
2270 break;
2271 case QED_ELEM_SRQ:
2272 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2273 elem_size = SRQ_CXT_SIZE;
2274 p_blk = &p_cli->pf_blks[SRQ_BLK];
2275 break;
2276 case QED_ELEM_TASK:
2277 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2278 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2279 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2280 break;
2281 default:
2282 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2283 return -EINVAL;
2284 }
2285
2286 /* Calculate line in ilt */
2287 hw_p_size = p_cli->p_size.val;
2288 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2289 line = p_blk->start_line + (iid / elems_per_p);
2290 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2291
2292 /* If line is already allocated, do nothing, otherwise allocate it and
2293 * write it to the PSWRQ2 registers.
2294 * This section can be run in parallel from different contexts and thus
2295 * a mutex protection is needed.
2296 */
2297
2298 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2299
2300 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2301 goto out0;
2302
2303 p_ptt = qed_ptt_acquire(p_hwfn);
2304 if (!p_ptt) {
2305 DP_NOTICE(p_hwfn,
2306 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2307 rc = -EBUSY;
2308 goto out0;
2309 }
2310
2311 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2312 p_blk->real_size_in_page,
2313 &p_phys, GFP_KERNEL);
2314 if (!p_virt) {
2315 rc = -ENOMEM;
2316 goto out1;
2317 }
2318 memset(p_virt, 0, p_blk->real_size_in_page);
2319
2320 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2321 * to compensate for a HW bug, but it is configured even if DIF is not
2322 * enabled. This is harmless and allows us to avoid a dedicated API. We
2323 * configure the field for all of the contexts on the newly allocated
2324 * page.
2325 */
2326 if (elem_type == QED_ELEM_TASK) {
2327 u32 elem_i;
2328 u8 *elem_start = (u8 *)p_virt;
2329 union type1_task_context *elem;
2330
2331 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2332 elem = (union type1_task_context *)elem_start;
2333 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
Tomer Tayara2e76992017-12-27 19:30:05 +02002334 TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002335 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2336 }
2337 }
2338
2339 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2340 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2341 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2342 p_blk->real_size_in_page;
2343
2344 /* compute absolute offset */
2345 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2346 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2347
2348 ilt_hw_entry = 0;
2349 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2350 SET_FIELD(ilt_hw_entry,
2351 ILT_ENTRY_PHY_ADDR,
2352 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2353
2354 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2355 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2356 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2357
2358 if (elem_type == QED_ELEM_CXT) {
2359 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2360 elems_per_p;
2361
2362 /* Update the relevant register in the parser */
2363 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2364 last_cid_allocated - 1);
2365
2366 if (!p_hwfn->b_rdma_enabled_in_prs) {
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03002367 /* Enable RDMA search */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002368 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2369 p_hwfn->b_rdma_enabled_in_prs = true;
2370 }
2371 }
2372
2373out1:
2374 qed_ptt_release(p_hwfn, p_ptt);
2375out0:
2376 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2377
2378 return rc;
2379}
2380
2381/* This function is very RoCE oriented, if another protocol in the future
2382 * will want this feature we'll need to modify the function to be more generic
2383 */
2384static int
2385qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2386 enum qed_cxt_elem_type elem_type,
2387 u32 start_iid, u32 count)
2388{
2389 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2390 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2391 struct qed_ilt_client_cfg *p_cli;
2392 struct qed_ilt_cli_blk *p_blk;
2393 u32 end_iid = start_iid + count;
2394 struct qed_ptt *p_ptt;
2395 u64 ilt_hw_entry = 0;
2396 u32 i;
2397
2398 switch (elem_type) {
2399 case QED_ELEM_CXT:
2400 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2401 elem_size = CONN_CXT_SIZE(p_hwfn);
2402 p_blk = &p_cli->pf_blks[CDUC_BLK];
2403 break;
2404 case QED_ELEM_SRQ:
2405 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2406 elem_size = SRQ_CXT_SIZE;
2407 p_blk = &p_cli->pf_blks[SRQ_BLK];
2408 break;
2409 case QED_ELEM_TASK:
2410 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2411 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2412 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2413 break;
2414 default:
2415 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2416 return -EINVAL;
2417 }
2418
2419 /* Calculate line in ilt */
2420 hw_p_size = p_cli->p_size.val;
2421 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2422 start_line = p_blk->start_line + (start_iid / elems_per_p);
2423 end_line = p_blk->start_line + (end_iid / elems_per_p);
2424 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2425 end_line--;
2426
2427 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2428 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2429
2430 p_ptt = qed_ptt_acquire(p_hwfn);
2431 if (!p_ptt) {
2432 DP_NOTICE(p_hwfn,
2433 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2434 return -EBUSY;
2435 }
2436
2437 for (i = shadow_start_line; i < shadow_end_line; i++) {
2438 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2439 continue;
2440
2441 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2442 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2443 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2444 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2445
2446 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2447 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2448 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2449
2450 /* compute absolute offset */
2451 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2452 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2453 ILT_ENTRY_IN_REGS);
2454
2455 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2456 * wide-bus.
2457 */
2458 qed_dmae_host2grc(p_hwfn, p_ptt,
2459 (u64) (uintptr_t) &ilt_hw_entry,
2460 reg_offset,
2461 sizeof(ilt_hw_entry) / sizeof(u32),
2462 0);
2463 }
2464
2465 qed_ptt_release(p_hwfn, p_ptt);
2466
2467 return 0;
2468}
2469
2470int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2471{
2472 int rc;
2473 u32 cid;
2474
2475 /* Free Connection CXT */
2476 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2477 qed_cxt_get_proto_cid_start(p_hwfn,
2478 proto),
2479 qed_cxt_get_proto_cid_count(p_hwfn,
2480 proto, &cid));
2481
2482 if (rc)
2483 return rc;
2484
2485 /* Free Task CXT */
2486 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2487 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2488 if (rc)
2489 return rc;
2490
2491 /* Free TSDM CXT */
2492 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2493 qed_cxt_get_srq_count(p_hwfn));
2494
2495 return rc;
2496}
2497
2498int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2499 u32 tid, u8 ctx_type, void **pp_task_ctx)
2500{
2501 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2502 struct qed_ilt_client_cfg *p_cli;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002503 struct qed_tid_seg *p_seg_info;
Arun Easi1e128c82017-02-15 06:28:22 -08002504 struct qed_ilt_cli_blk *p_seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002505 u32 num_tids_per_block;
Arun Easi1e128c82017-02-15 06:28:22 -08002506 u32 tid_size, ilt_idx;
2507 u32 total_lines;
2508 u32 proto, seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002509
2510 /* Verify the personality */
2511 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002512 case QED_PCI_FCOE:
2513 proto = PROTOCOLID_FCOE;
2514 seg = QED_CXT_FCOE_TID_SEG;
2515 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002516 case QED_PCI_ISCSI:
2517 proto = PROTOCOLID_ISCSI;
2518 seg = QED_CXT_ISCSI_TID_SEG;
2519 break;
2520 default:
2521 return -EINVAL;
2522 }
2523
2524 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2525 if (!p_cli->active)
2526 return -EINVAL;
2527
2528 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2529
2530 if (ctx_type == QED_CTX_WORKING_MEM) {
2531 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2532 } else if (ctx_type == QED_CTX_FL_MEM) {
2533 if (!p_seg_info->has_fl_mem)
2534 return -EINVAL;
2535 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2536 } else {
2537 return -EINVAL;
2538 }
2539 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2540 tid_size = p_mngr->task_type_size[p_seg_info->type];
2541 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2542
2543 if (total_lines < tid / num_tids_per_block)
2544 return -EINVAL;
2545
2546 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2547 p_mngr->pf_start_line;
2548 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2549 (tid % num_tids_per_block) * tid_size;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002550
2551 return 0;
2552}