blob: 15ef6ebed6bb62f7b8578c31f4fa94cf03e1f491 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <linux/bitops.h>
35#include <linux/dma-mapping.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/list.h>
39#include <linux/log2.h>
40#include <linux/pci.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/bitops.h>
44#include "qed.h"
45#include "qed_cxt.h"
46#include "qed_dev_api.h"
47#include "qed_hsi.h"
48#include "qed_hw.h"
49#include "qed_init_ops.h"
50#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030051#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020052
53/* Max number of connection types in HW (DQ/CDU etc.) */
54#define MAX_CONN_TYPES PROTOCOLID_COMMON
55#define NUM_TASK_TYPES 2
56#define NUM_TASK_PF_SEGMENTS 4
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030057#define NUM_TASK_VF_SEGMENTS 1
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058
59/* QM constants */
60#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
61
62/* Doorbell-Queue constants */
63#define DQ_RANGE_SHIFT 4
64#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
65
Yuval Mintzdbb799c2016-06-03 14:35:35 +030066/* Searcher constants */
67#define SRC_MIN_NUM_ELEMS 256
68
69/* Timers constants */
70#define TM_SHIFT 7
71#define TM_ALIGN BIT(TM_SHIFT)
72#define TM_ELEM_SIZE 4
73
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020074#define ILT_DEFAULT_HW_P_SIZE 4
Ram Amrani51ff1722016-10-01 21:59:57 +030075
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020076#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78
79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52
84#define ILT_ENTRY_IN_REGS 2
85#define ILT_REG_SIZE_IN_BYTES 4
86
87/* connection context union */
88union conn_context {
89 struct core_conn_context core_ctx;
90 struct eth_conn_context eth_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +030091 struct iscsi_conn_context iscsi_ctx;
Arun Easi1e128c82017-02-15 06:28:22 -080092 struct fcoe_conn_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +030093 struct roce_conn_context roce_ctx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020094};
95
Arun Easi1e128c82017-02-15 06:28:22 -080096/* TYPE-0 task context - iSCSI, FCOE */
Yuval Mintzdbb799c2016-06-03 14:35:35 +030097union type0_task_context {
98 struct iscsi_task_context iscsi_ctx;
Arun Easi1e128c82017-02-15 06:28:22 -080099 struct fcoe_task_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300100};
101
102/* TYPE-1 task context - ROCE */
103union type1_task_context {
104 struct rdma_task_context roce_ctx;
105};
106
107struct src_ent {
108 u8 opaque[56];
109 u64 next;
110};
111
112#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
113#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
114
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200115#define CONN_CXT_SIZE(p_hwfn) \
116 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
117
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300118#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
119
120#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
121 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
122
123/* Alignment is inherent to the type1_task_context structure */
124#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
125
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126/* PF per protocl configuration object */
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300127#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
128#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
129
130struct qed_tid_seg {
131 u32 count;
132 u8 type;
133 bool has_fl_mem;
134};
135
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200136struct qed_conn_type_cfg {
137 u32 cid_count;
138 u32 cid_start;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300139 u32 cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300140 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200141};
142
143/* ILT Client configuration, Per connection type (protocol) resources. */
144#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300145#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200146#define CDUC_BLK (0)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300147#define SRQ_BLK (0)
148#define CDUT_SEG_BLK(n) (1 + (u8)(n))
149#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200150
151enum ilt_clients {
152 ILT_CLI_CDUC,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300153 ILT_CLI_CDUT,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200154 ILT_CLI_QM,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300155 ILT_CLI_TM,
156 ILT_CLI_SRC,
157 ILT_CLI_TSDM,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200158 ILT_CLI_MAX
159};
160
161struct ilt_cfg_pair {
162 u32 reg;
163 u32 val;
164};
165
166struct qed_ilt_cli_blk {
167 u32 total_size; /* 0 means not active */
168 u32 real_size_in_page;
169 u32 start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300170 u32 dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200171};
172
173struct qed_ilt_client_cfg {
174 bool active;
175
176 /* ILT boundaries */
177 struct ilt_cfg_pair first;
178 struct ilt_cfg_pair last;
179 struct ilt_cfg_pair p_size;
180
181 /* ILT client blocks for PF */
182 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
183 u32 pf_total_lines;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300184
185 /* ILT client blocks for VFs */
186 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
187 u32 vf_total_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200188};
189
190/* Per Path -
191 * ILT shadow table
192 * Protocol acquired CID lists
193 * PF start line in ILT
194 */
195struct qed_dma_mem {
196 dma_addr_t p_phys;
197 void *p_virt;
198 size_t size;
199};
200
201struct qed_cid_acquired_map {
202 u32 start_cid;
203 u32 max_count;
204 unsigned long *cid_map;
205};
206
207struct qed_cxt_mngr {
208 /* Per protocl configuration */
209 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
210
211 /* computed ILT structure */
212 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
213
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300214 /* Task type sizes */
215 u32 task_type_size[NUM_TASK_TYPES];
216
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300217 /* total number of VFs for this hwfn -
218 * ALL VFs are symmetric in terms of HW resources
219 */
220 u32 vf_count;
221
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300222 /* total number of SRQ's for this hwfn */
223 u32 srq_count;
224
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200225 /* Acquired CIDs */
226 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
227
228 /* ILT shadow table */
229 struct qed_dma_mem *ilt_shadow;
230 u32 pf_start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300231
232 /* Mutex for a dynamic ILT allocation */
233 struct mutex mutex;
234
235 /* SRC T2 */
236 struct qed_dma_mem *t2;
237 u32 t2_num_pages;
238 u64 first_free;
239 u64 last_free;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200240};
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300241static bool src_proto(enum protocol_type type)
242{
243 return type == PROTOCOLID_ISCSI ||
Mintz, Yuval5f8cb032017-04-03 12:21:12 +0300244 type == PROTOCOLID_FCOE;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300245}
246
247static bool tm_cid_proto(enum protocol_type type)
248{
249 return type == PROTOCOLID_ISCSI ||
Arun Easi1e128c82017-02-15 06:28:22 -0800250 type == PROTOCOLID_FCOE ||
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300251 type == PROTOCOLID_ROCE;
252}
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200253
Arun Easi1e128c82017-02-15 06:28:22 -0800254static bool tm_tid_proto(enum protocol_type type)
255{
256 return type == PROTOCOLID_FCOE;
257}
258
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300259/* counts the iids for the CDU/CDUC ILT client configuration */
260struct qed_cdu_iids {
261 u32 pf_cids;
262 u32 per_vf_cids;
263};
264
265static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
266 struct qed_cdu_iids *iids)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200267{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300268 u32 type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200269
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300270 for (type = 0; type < MAX_CONN_TYPES; type++) {
271 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
272 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
273 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200274}
275
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300276/* counts the iids for the Searcher block configuration */
277struct qed_src_iids {
278 u32 pf_cids;
279 u32 per_vf_cids;
280};
281
282static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
283 struct qed_src_iids *iids)
284{
285 u32 i;
286
287 for (i = 0; i < MAX_CONN_TYPES; i++) {
288 if (!src_proto(i))
289 continue;
290
291 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
292 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
293 }
294}
295
296/* counts the iids for the Timers block configuration */
297struct qed_tm_iids {
298 u32 pf_cids;
299 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
300 u32 pf_tids_total;
301 u32 per_vf_cids;
302 u32 per_vf_tids;
303};
304
Michal Kalderon44531ba2017-04-03 12:21:10 +0300305static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
306 struct qed_cxt_mngr *p_mngr,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300307 struct qed_tm_iids *iids)
308{
Michal Kalderon44531ba2017-04-03 12:21:10 +0300309 bool tm_vf_required = false;
310 bool tm_required = false;
311 int i, j;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300312
Michal Kalderon44531ba2017-04-03 12:21:10 +0300313 /* Timers is a special case -> we don't count how many cids require
314 * timers but what's the max cid that will be used by the timer block.
315 * therefore we traverse in reverse order, and once we hit a protocol
316 * that requires the timers memory, we'll sum all the protocols up
317 * to that one.
318 */
319 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300320 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
321
Michal Kalderon44531ba2017-04-03 12:21:10 +0300322 if (tm_cid_proto(i) || tm_required) {
323 if (p_cfg->cid_count)
324 tm_required = true;
325
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300326 iids->pf_cids += p_cfg->cid_count;
Michal Kalderon44531ba2017-04-03 12:21:10 +0300327 }
328
329 if (tm_cid_proto(i) || tm_vf_required) {
330 if (p_cfg->cids_per_vf)
331 tm_vf_required = true;
332
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300333 iids->per_vf_cids += p_cfg->cids_per_vf;
334 }
Arun Easi1e128c82017-02-15 06:28:22 -0800335
336 if (tm_tid_proto(i)) {
337 struct qed_tid_seg *segs = p_cfg->tid_seg;
338
339 /* for each segment there is at most one
340 * protocol for which count is not 0.
341 */
342 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
343 iids->pf_tids[j] += segs[j].count;
344
345 /* The last array elelment is for the VFs. As for PF
346 * segments there can be only one protocol for
347 * which this value is not 0.
348 */
349 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
350 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300351 }
352
353 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
354 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
355 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
356
357 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
358 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
359 iids->pf_tids_total += iids->pf_tids[j];
360 }
361}
362
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200363static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
364 struct qed_qm_iids *iids)
365{
366 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300367 struct qed_tid_seg *segs;
368 u32 vf_cids = 0, type, j;
369 u32 vf_tids = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200370
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300371 for (type = 0; type < MAX_CONN_TYPES; type++) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200372 iids->cids += p_mngr->conn_cfg[type].cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300373 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300374
375 segs = p_mngr->conn_cfg[type].tid_seg;
376 /* for each segment there is at most one
377 * protocol for which count is not 0.
378 */
379 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
380 iids->tids += segs[j].count;
381
382 /* The last array elelment is for the VFs. As for PF
383 * segments there can be only one protocol for
384 * which this value is not 0.
385 */
386 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300387 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200388
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300389 iids->vf_cids += vf_cids * p_mngr->vf_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300390 iids->tids += vf_tids * p_mngr->vf_count;
391
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300392 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300393 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
394 iids->cids, iids->vf_cids, iids->tids, vf_tids);
395}
396
397static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
398 u32 seg)
399{
400 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
401 u32 i;
402
403 /* Find the protocol with tid count > 0 for this segment.
404 * Note: there can only be one and this is already validated.
405 */
406 for (i = 0; i < MAX_CONN_TYPES; i++)
407 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
408 return &p_cfg->conn_cfg[i].tid_seg[seg];
409 return NULL;
410}
411
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300412static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300413{
414 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
415
416 p_mgr->srq_count = num_srqs;
417}
418
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300419static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300420{
421 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
422
423 return p_mgr->srq_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200424}
425
426/* set the iids count per protocol */
427static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
428 enum protocol_type type,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300429 u32 cid_count, u32 vf_cid_cnt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200430{
431 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
432 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
433
434 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300435 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300436
437 if (type == PROTOCOLID_ROCE) {
438 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
439 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
440 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
Ram Amranif3e48112017-03-14 15:25:58 +0200441 u32 align = elems_per_page * DQ_RANGE_ALIGN;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300442
Ram Amranif3e48112017-03-14 15:25:58 +0200443 p_conn->cid_count = roundup(p_conn->cid_count, align);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300444 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300445}
446
Yuval Mintz1a635e42016-08-15 10:42:43 +0300447u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
448 enum protocol_type type, u32 *vf_cid)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300449{
450 if (vf_cid)
451 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
452
453 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200454}
455
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300456u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
457 enum protocol_type type)
458{
459 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
460}
461
462u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
463 enum protocol_type type)
464{
465 u32 cnt = 0;
466 int i;
467
468 for (i = 0; i < TASK_SEGMENTS; i++)
469 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
470
471 return cnt;
472}
473
Yuval Mintz1a635e42016-08-15 10:42:43 +0300474static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
475 enum protocol_type proto,
476 u8 seg,
477 u8 seg_type, u32 count, bool has_fl)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300478{
479 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
480 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
481
482 p_seg->count = count;
483 p_seg->has_fl_mem = has_fl;
484 p_seg->type = seg_type;
485}
486
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200487static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
488 struct qed_ilt_cli_blk *p_blk,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300489 u32 start_line, u32 total_size, u32 elem_size)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200490{
491 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
492
493 /* verify thatits called only once for each block */
494 if (p_blk->total_size)
495 return;
496
497 p_blk->total_size = total_size;
498 p_blk->real_size_in_page = 0;
499 if (elem_size)
500 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
501 p_blk->start_line = start_line;
502}
503
504static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
505 struct qed_ilt_client_cfg *p_cli,
506 struct qed_ilt_cli_blk *p_blk,
507 u32 *p_line, enum ilt_clients client_id)
508{
509 if (!p_blk->total_size)
510 return;
511
512 if (!p_cli->active)
513 p_cli->first.val = *p_line;
514
515 p_cli->active = true;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300516 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200517 p_cli->last.val = *p_line - 1;
518
519 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
520 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
521 client_id, p_cli->first.val,
522 p_cli->last.val, p_blk->total_size,
523 p_blk->real_size_in_page, p_blk->start_line);
524}
525
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300526static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
527 enum ilt_clients ilt_client)
528{
529 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
530 struct qed_ilt_client_cfg *p_cli;
531 u32 lines_to_skip = 0;
532 u32 cxts_per_p;
533
534 if (ilt_client == ILT_CLI_CDUC) {
535 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
536
537 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
538 (u32) CONN_CXT_SIZE(p_hwfn);
539
540 lines_to_skip = cid_count / cxts_per_p;
541 }
542
543 return lines_to_skip;
544}
545
Ram Amranif9dc4d12017-04-03 12:21:13 +0300546static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
547 *p_cli)
548{
549 p_cli->active = false;
550 p_cli->first.val = 0;
551 p_cli->last.val = 0;
552 return p_cli;
553}
554
555static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
556{
557 p_blk->total_size = 0;
558 return p_blk;
559}
560
561int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200562{
563 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300564 u32 curr_line, total, i, task_size, line;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200565 struct qed_ilt_client_cfg *p_cli;
566 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300567 struct qed_cdu_iids cdu_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300568 struct qed_src_iids src_iids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200569 struct qed_qm_iids qm_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300570 struct qed_tm_iids tm_iids;
571 struct qed_tid_seg *p_seg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200572
573 memset(&qm_iids, 0, sizeof(qm_iids));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300574 memset(&cdu_iids, 0, sizeof(cdu_iids));
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300575 memset(&src_iids, 0, sizeof(src_iids));
576 memset(&tm_iids, 0, sizeof(tm_iids));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200577
578 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
579
580 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
581 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
582 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
583
584 /* CDUC */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300585 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
586
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200587 curr_line = p_mngr->pf_start_line;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300588
589 /* CDUC PF */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200590 p_cli->pf_total_lines = 0;
591
592 /* get the counters for the CDUC and QM clients */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300593 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200594
Ram Amranif9dc4d12017-04-03 12:21:13 +0300595 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200596
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300597 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200598
599 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
600 total, CONN_CXT_SIZE(p_hwfn));
601
602 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
603 p_cli->pf_total_lines = curr_line - p_blk->start_line;
604
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300605 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
606 ILT_CLI_CDUC);
607
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300608 /* CDUC VF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300609 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300610 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
611
612 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
613 total, CONN_CXT_SIZE(p_hwfn));
614
615 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
616 p_cli->vf_total_lines = curr_line - p_blk->start_line;
617
618 for (i = 1; i < p_mngr->vf_count; i++)
619 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
620 ILT_CLI_CDUC);
621
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300622 /* CDUT PF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300623 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300624 p_cli->first.val = curr_line;
625
626 /* first the 'working' task memory */
627 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
628 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
629 if (!p_seg || p_seg->count == 0)
630 continue;
631
Ram Amranif9dc4d12017-04-03 12:21:13 +0300632 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300633 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
634 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
635 p_mngr->task_type_size[p_seg->type]);
636
637 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
638 ILT_CLI_CDUT);
639 }
640
641 /* next the 'init' task memory (forced load memory) */
642 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
643 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
644 if (!p_seg || p_seg->count == 0)
645 continue;
646
Ram Amranif9dc4d12017-04-03 12:21:13 +0300647 p_blk =
648 qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300649
650 if (!p_seg->has_fl_mem) {
651 /* The segment is active (total size pf 'working'
652 * memory is > 0) but has no FL (forced-load, Init)
653 * memory. Thus:
654 *
655 * 1. The total-size in the corrsponding FL block of
656 * the ILT client is set to 0 - No ILT line are
657 * provisioned and no ILT memory allocated.
658 *
659 * 2. The start-line of said block is set to the
660 * start line of the matching working memory
661 * block in the ILT client. This is later used to
662 * configure the CDU segment offset registers and
663 * results in an FL command for TIDs of this
664 * segement behaves as regular load commands
665 * (loading TIDs from the working memory).
666 */
667 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
668
669 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
670 continue;
671 }
672 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
673
674 qed_ilt_cli_blk_fill(p_cli, p_blk,
675 curr_line, total,
676 p_mngr->task_type_size[p_seg->type]);
677
678 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
679 ILT_CLI_CDUT);
680 }
681 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
682
683 /* CDUT VF */
684 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
685 if (p_seg && p_seg->count) {
686 /* Stricly speaking we need to iterate over all VF
687 * task segment types, but a VF has only 1 segment
688 */
689
690 /* 'working' memory */
691 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
692
Ram Amranif9dc4d12017-04-03 12:21:13 +0300693 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300694 qed_ilt_cli_blk_fill(p_cli, p_blk,
695 curr_line, total,
696 p_mngr->task_type_size[p_seg->type]);
697
698 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
699 ILT_CLI_CDUT);
700
701 /* 'init' memory */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300702 p_blk =
703 qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300704 if (!p_seg->has_fl_mem) {
705 /* see comment above */
706 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
707 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
708 } else {
709 task_size = p_mngr->task_type_size[p_seg->type];
710 qed_ilt_cli_blk_fill(p_cli, p_blk,
711 curr_line, total, task_size);
712 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
713 ILT_CLI_CDUT);
714 }
715 p_cli->vf_total_lines = curr_line -
716 p_cli->vf_blks[0].start_line;
717
718 /* Now for the rest of the VFs */
719 for (i = 1; i < p_mngr->vf_count; i++) {
720 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
721 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
722 ILT_CLI_CDUT);
723
724 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
725 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
726 ILT_CLI_CDUT);
727 }
728 }
729
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200730 /* QM */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300731 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
732 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200733
734 qed_cxt_qm_iids(p_hwfn, &qm_iids);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300735 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300736 qm_iids.vf_cids, qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300737 p_hwfn->qm_info.num_pqs,
738 p_hwfn->qm_info.num_vf_pqs);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200739
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300740 DP_VERBOSE(p_hwfn,
741 QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300742 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300743 qm_iids.cids,
744 qm_iids.vf_cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300745 qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300746 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200747
748 qed_ilt_cli_blk_fill(p_cli, p_blk,
749 curr_line, total * 0x1000,
750 QM_PQ_ELEMENT_SIZE);
751
752 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
753 p_cli->pf_total_lines = curr_line - p_blk->start_line;
754
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300755 /* SRC */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300756 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300757 qed_cxt_src_iids(p_mngr, &src_iids);
758
759 /* Both the PF and VFs searcher connections are stored in the per PF
760 * database. Thus sum the PF searcher cids and all the VFs searcher
761 * cids.
762 */
763 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
764 if (total) {
765 u32 local_max = max_t(u32, total,
766 SRC_MIN_NUM_ELEMS);
767
768 total = roundup_pow_of_two(local_max);
769
Ram Amranif9dc4d12017-04-03 12:21:13 +0300770 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300771 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
772 total * sizeof(struct src_ent),
773 sizeof(struct src_ent));
774
775 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
776 ILT_CLI_SRC);
777 p_cli->pf_total_lines = curr_line - p_blk->start_line;
778 }
779
780 /* TM PF */
Ram Amranif9dc4d12017-04-03 12:21:13 +0300781 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
Michal Kalderon44531ba2017-04-03 12:21:10 +0300782 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300783 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
784 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300785 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300786 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
787 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
788
789 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
790 ILT_CLI_TM);
791 p_cli->pf_total_lines = curr_line - p_blk->start_line;
792 }
793
794 /* TM VF */
795 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
796 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300797 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300798 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
799 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
800
801 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
802 ILT_CLI_TM);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300803
Mintz, Yuval70566b42017-04-03 12:21:11 +0300804 p_cli->vf_total_lines = curr_line - p_blk->start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300805 for (i = 1; i < p_mngr->vf_count; i++)
806 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
807 ILT_CLI_TM);
808 }
809
810 /* TSDM (SRQ CONTEXT) */
811 total = qed_cxt_get_srq_count(p_hwfn);
812
813 if (total) {
Ram Amranif9dc4d12017-04-03 12:21:13 +0300814 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
815 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300816 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
817 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
818
819 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
820 ILT_CLI_TSDM);
821 p_cli->pf_total_lines = curr_line - p_blk->start_line;
822 }
823
Ram Amranif9dc4d12017-04-03 12:21:13 +0300824 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
825
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200826 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
Ram Amranif9dc4d12017-04-03 12:21:13 +0300827 RESC_NUM(p_hwfn, QED_ILT))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200828 return -EINVAL;
Ram Amranif9dc4d12017-04-03 12:21:13 +0300829
830 return 0;
831}
832
833u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
834{
835 struct qed_ilt_client_cfg *p_cli;
836 u32 excess_lines, available_lines;
837 struct qed_cxt_mngr *p_mngr;
838 u32 ilt_page_size, elem_size;
839 struct qed_tid_seg *p_seg;
840 int i;
841
842 available_lines = RESC_NUM(p_hwfn, QED_ILT);
843 excess_lines = used_lines - available_lines;
844
845 if (!excess_lines)
846 return 0;
847
848 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
849 return 0;
850
851 p_mngr = p_hwfn->p_cxt_mngr;
852 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
853 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
854
855 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
856 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
857 if (!p_seg || p_seg->count == 0)
858 continue;
859
860 elem_size = p_mngr->task_type_size[p_seg->type];
861 if (!elem_size)
862 continue;
863
864 return (ilt_page_size / elem_size) * excess_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200865 }
866
Ram Amranif9dc4d12017-04-03 12:21:13 +0300867 DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200868 return 0;
869}
870
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300871static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
872{
873 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
874 u32 i;
875
876 if (!p_mngr->t2)
877 return;
878
879 for (i = 0; i < p_mngr->t2_num_pages; i++)
880 if (p_mngr->t2[i].p_virt)
881 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
882 p_mngr->t2[i].size,
883 p_mngr->t2[i].p_virt,
884 p_mngr->t2[i].p_phys);
885
886 kfree(p_mngr->t2);
887 p_mngr->t2 = NULL;
888}
889
890static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
891{
892 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
893 u32 conn_num, total_size, ent_per_page, psz, i;
894 struct qed_ilt_client_cfg *p_src;
895 struct qed_src_iids src_iids;
896 struct qed_dma_mem *p_t2;
897 int rc;
898
899 memset(&src_iids, 0, sizeof(src_iids));
900
901 /* if the SRC ILT client is inactive - there are no connection
902 * requiring the searcer, leave.
903 */
904 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
905 if (!p_src->active)
906 return 0;
907
908 qed_cxt_src_iids(p_mngr, &src_iids);
909 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
910 total_size = conn_num * sizeof(struct src_ent);
911
912 /* use the same page size as the SRC ILT client */
913 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
914 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
915
916 /* allocate t2 */
Joe Perches2591c282016-09-04 14:24:03 -0700917 p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300918 GFP_KERNEL);
919 if (!p_mngr->t2) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300920 rc = -ENOMEM;
921 goto t2_fail;
922 }
923
924 /* allocate t2 pages */
925 for (i = 0; i < p_mngr->t2_num_pages; i++) {
926 u32 size = min_t(u32, total_size, psz);
927 void **p_virt = &p_mngr->t2[i].p_virt;
928
929 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
930 size,
931 &p_mngr->t2[i].p_phys, GFP_KERNEL);
932 if (!p_mngr->t2[i].p_virt) {
933 rc = -ENOMEM;
934 goto t2_fail;
935 }
936 memset(*p_virt, 0, size);
937 p_mngr->t2[i].size = size;
938 total_size -= size;
939 }
940
941 /* Set the t2 pointers */
942
943 /* entries per page - must be a power of two */
944 ent_per_page = psz / sizeof(struct src_ent);
945
946 p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
947
948 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
949 p_mngr->last_free = (u64) p_t2->p_phys +
950 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
951
952 for (i = 0; i < p_mngr->t2_num_pages; i++) {
953 u32 ent_num = min_t(u32,
954 ent_per_page,
955 conn_num);
956 struct src_ent *entries = p_mngr->t2[i].p_virt;
957 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
958 u32 j;
959
960 for (j = 0; j < ent_num - 1; j++) {
961 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
962 entries[j].next = cpu_to_be64(val);
963 }
964
965 if (i < p_mngr->t2_num_pages - 1)
966 val = (u64) p_mngr->t2[i + 1].p_phys;
967 else
968 val = 0;
969 entries[j].next = cpu_to_be64(val);
970
Dan Carpenter01e517f2016-06-07 15:04:16 +0300971 conn_num -= ent_num;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300972 }
973
974 return 0;
975
976t2_fail:
977 qed_cxt_src_t2_free(p_hwfn);
978 return rc;
979}
980
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200981#define for_each_ilt_valid_client(pos, clients) \
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300982 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
983 if (!clients[pos].active) { \
984 continue; \
985 } else \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200986
987/* Total number of ILT lines used by this PF */
988static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
989{
990 u32 size = 0;
991 u32 i;
992
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300993 for_each_ilt_valid_client(i, ilt_clients)
994 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200995
996 return size;
997}
998
999static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
1000{
1001 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
1002 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1003 u32 ilt_size, i;
1004
1005 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
1006
1007 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
1008 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
1009
1010 if (p_dma->p_virt)
1011 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1012 p_dma->size, p_dma->p_virt,
1013 p_dma->p_phys);
1014 p_dma->p_virt = NULL;
1015 }
1016 kfree(p_mngr->ilt_shadow);
1017}
1018
1019static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1020 struct qed_ilt_cli_blk *p_blk,
1021 enum ilt_clients ilt_client,
1022 u32 start_line_offset)
1023{
1024 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001025 u32 lines, line, sz_left, lines_to_skip = 0;
1026
1027 /* Special handling for RoCE that supports dynamic allocation */
1028 if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
1029 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
1030 return 0;
1031
1032 lines_to_skip = p_blk->dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001033
1034 if (!p_blk->total_size)
1035 return 0;
1036
1037 sz_left = p_blk->total_size;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001038 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001039 line = p_blk->start_line + start_line_offset -
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001040 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001041
1042 for (; lines; lines--) {
1043 dma_addr_t p_phys;
1044 void *p_virt;
1045 u32 size;
1046
Yuval Mintz1a635e42016-08-15 10:42:43 +03001047 size = min_t(u32, sz_left, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001048 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001049 size, &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001050 if (!p_virt)
1051 return -ENOMEM;
1052 memset(p_virt, 0, size);
1053
1054 ilt_shadow[line].p_phys = p_phys;
1055 ilt_shadow[line].p_virt = p_virt;
1056 ilt_shadow[line].size = size;
1057
1058 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1059 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1060 line, (u64)p_phys, p_virt, size);
1061
1062 sz_left -= size;
1063 line++;
1064 }
1065
1066 return 0;
1067}
1068
1069static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1070{
1071 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1072 struct qed_ilt_client_cfg *clients = p_mngr->clients;
1073 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001074 u32 size, i, j, k;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001075 int rc;
1076
1077 size = qed_cxt_ilt_shadow_size(clients);
1078 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1079 GFP_KERNEL);
1080 if (!p_mngr->ilt_shadow) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001081 rc = -ENOMEM;
1082 goto ilt_shadow_fail;
1083 }
1084
1085 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1086 "Allocated 0x%x bytes for ilt shadow\n",
1087 (u32)(size * sizeof(struct qed_dma_mem)));
1088
1089 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001090 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1091 p_blk = &clients[i].pf_blks[j];
1092 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001093 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001094 goto ilt_shadow_fail;
1095 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001096 for (k = 0; k < p_mngr->vf_count; k++) {
1097 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1098 u32 lines = clients[i].vf_total_lines * k;
1099
1100 p_blk = &clients[i].vf_blks[j];
1101 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001102 if (rc)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001103 goto ilt_shadow_fail;
1104 }
1105 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001106 }
1107
1108 return 0;
1109
1110ilt_shadow_fail:
1111 qed_ilt_shadow_free(p_hwfn);
1112 return rc;
1113}
1114
1115static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1116{
1117 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1118 u32 type;
1119
1120 for (type = 0; type < MAX_CONN_TYPES; type++) {
1121 kfree(p_mngr->acquired[type].cid_map);
1122 p_mngr->acquired[type].max_count = 0;
1123 p_mngr->acquired[type].start_cid = 0;
1124 }
1125}
1126
1127static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1128{
1129 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1130 u32 start_cid = 0;
1131 u32 type;
1132
1133 for (type = 0; type < MAX_CONN_TYPES; type++) {
1134 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1135 u32 size;
1136
1137 if (cid_cnt == 0)
1138 continue;
1139
1140 size = DIV_ROUND_UP(cid_cnt,
1141 sizeof(unsigned long) * BITS_PER_BYTE) *
1142 sizeof(unsigned long);
1143 p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
1144 if (!p_mngr->acquired[type].cid_map)
1145 goto cid_map_fail;
1146
1147 p_mngr->acquired[type].max_count = cid_cnt;
1148 p_mngr->acquired[type].start_cid = start_cid;
1149
1150 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
1151
1152 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1153 "Type %08x start: %08x count %08x\n",
1154 type, p_mngr->acquired[type].start_cid,
1155 p_mngr->acquired[type].max_count);
1156 start_cid += cid_cnt;
1157 }
1158
1159 return 0;
1160
1161cid_map_fail:
1162 qed_cid_map_free(p_hwfn);
1163 return -ENOMEM;
1164}
1165
1166int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1167{
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001168 struct qed_ilt_client_cfg *clients;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001169 struct qed_cxt_mngr *p_mngr;
1170 u32 i;
1171
Yuval Mintz60fffb32016-02-21 11:40:07 +02001172 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -07001173 if (!p_mngr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001174 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001175
1176 /* Initialize ILT client registers */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001177 clients = p_mngr->clients;
1178 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1179 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1180 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001181
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001182 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1183 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1184 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001185
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001186 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1187 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1188 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1189
1190 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1191 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1192 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1193
1194 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1195 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1196 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1197
1198 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1199 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1200 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001201 /* default ILT page size for all clients is 64K */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001202 for (i = 0; i < ILT_CLI_MAX; i++)
1203 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1204
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001205 /* Initialize task sizes */
1206 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1207 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1208
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001209 if (p_hwfn->cdev->p_iov_info)
1210 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001211 /* Initialize the dynamic ILT allocation mutex */
1212 mutex_init(&p_mngr->mutex);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001213
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001214 /* Set the cxt mangr pointer priori to further allocations */
1215 p_hwfn->p_cxt_mngr = p_mngr;
1216
1217 return 0;
1218}
1219
1220int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1221{
1222 int rc;
1223
1224 /* Allocate the ILT shadow table */
1225 rc = qed_ilt_shadow_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001226 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001227 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001228
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001229 /* Allocate the T2 table */
1230 rc = qed_cxt_src_t2_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001231 if (rc)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001232 goto tables_alloc_fail;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001233
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001234 /* Allocate and initialize the acquired cids bitmaps */
1235 rc = qed_cid_map_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001236 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001237 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001238
1239 return 0;
1240
1241tables_alloc_fail:
1242 qed_cxt_mngr_free(p_hwfn);
1243 return rc;
1244}
1245
1246void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1247{
1248 if (!p_hwfn->p_cxt_mngr)
1249 return;
1250
1251 qed_cid_map_free(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001252 qed_cxt_src_t2_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001253 qed_ilt_shadow_free(p_hwfn);
1254 kfree(p_hwfn->p_cxt_mngr);
1255
1256 p_hwfn->p_cxt_mngr = NULL;
1257}
1258
1259void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1260{
1261 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1262 int type;
1263
1264 /* Reset acquired cids */
1265 for (type = 0; type < MAX_CONN_TYPES; type++) {
1266 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1267
1268 if (cid_cnt == 0)
1269 continue;
1270
1271 memset(p_mngr->acquired[type].cid_map, 0,
1272 DIV_ROUND_UP(cid_cnt,
1273 sizeof(unsigned long) * BITS_PER_BYTE) *
1274 sizeof(unsigned long));
1275 }
1276}
1277
1278/* CDU Common */
1279#define CDUC_CXT_SIZE_SHIFT \
1280 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1281
1282#define CDUC_CXT_SIZE_MASK \
1283 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1284
1285#define CDUC_BLOCK_WASTE_SHIFT \
1286 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1287
1288#define CDUC_BLOCK_WASTE_MASK \
1289 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1290
1291#define CDUC_NCIB_SHIFT \
1292 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1293
1294#define CDUC_NCIB_MASK \
1295 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1296
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001297#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1298 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1299
1300#define CDUT_TYPE0_CXT_SIZE_MASK \
1301 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1302 CDUT_TYPE0_CXT_SIZE_SHIFT)
1303
1304#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1305 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1306
1307#define CDUT_TYPE0_BLOCK_WASTE_MASK \
1308 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1309 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1310
1311#define CDUT_TYPE0_NCIB_SHIFT \
1312 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1313
1314#define CDUT_TYPE0_NCIB_MASK \
1315 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1316 CDUT_TYPE0_NCIB_SHIFT)
1317
1318#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1319 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1320
1321#define CDUT_TYPE1_CXT_SIZE_MASK \
1322 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1323 CDUT_TYPE1_CXT_SIZE_SHIFT)
1324
1325#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1326 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1327
1328#define CDUT_TYPE1_BLOCK_WASTE_MASK \
1329 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1330 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1331
1332#define CDUT_TYPE1_NCIB_SHIFT \
1333 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1334
1335#define CDUT_TYPE1_NCIB_MASK \
1336 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1337 CDUT_TYPE1_NCIB_SHIFT)
1338
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001339static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1340{
1341 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1342
1343 /* CDUC - connection configuration */
1344 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1345 cxt_size = CONN_CXT_SIZE(p_hwfn);
1346 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1347 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1348
1349 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1350 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1351 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1352 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001353
1354 /* CDUT - type-0 tasks configuration */
1355 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1356 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1357 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1358 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1359
1360 /* cxt size and block-waste are multipes of 8 */
1361 cdu_params = 0;
1362 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1363 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1364 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1365 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1366
1367 /* CDUT - type-1 tasks configuration */
1368 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1369 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1370 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1371
1372 /* cxt size and block-waste are multipes of 8 */
1373 cdu_params = 0;
1374 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1375 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1376 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1377 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1378}
1379
1380/* CDU PF */
1381#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1382#define CDU_SEG_REG_TYPE_MASK 0x1
1383#define CDU_SEG_REG_OFFSET_SHIFT 0
1384#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1385
1386static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1387{
1388 struct qed_ilt_client_cfg *p_cli;
1389 struct qed_tid_seg *p_seg;
1390 u32 cdu_seg_params, offset;
1391 int i;
1392
1393 static const u32 rt_type_offset_arr[] = {
1394 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1395 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1396 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1397 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1398 };
1399
1400 static const u32 rt_type_offset_fl_arr[] = {
1401 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1402 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1403 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1404 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1405 };
1406
1407 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1408
1409 /* There are initializations only for CDUT during pf Phase */
1410 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1411 /* Segment 0 */
1412 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1413 if (!p_seg)
1414 continue;
1415
1416 /* Note: start_line is already adjusted for the CDU
1417 * segment register granularity, so we just need to
1418 * divide. Adjustment is implicit as we assume ILT
1419 * Page size is larger than 32K!
1420 */
1421 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1422 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1423 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1424
1425 cdu_seg_params = 0;
1426 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1427 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1428 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1429
1430 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1431 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1432 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1433
1434 cdu_seg_params = 0;
1435 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1436 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1437 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1438 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001439}
1440
Rahul Verma15582962017-04-06 15:58:29 +03001441void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001442{
1443 struct qed_qm_pf_rt_init_params params;
1444 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1445 struct qed_qm_iids iids;
1446
1447 memset(&iids, 0, sizeof(iids));
1448 qed_cxt_qm_iids(p_hwfn, &iids);
1449
1450 memset(&params, 0, sizeof(params));
1451 params.port_id = p_hwfn->port_id;
1452 params.pf_id = p_hwfn->rel_pf_id;
1453 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1454 params.is_first_pf = p_hwfn->first_on_engine;
1455 params.num_pf_cids = iids.cids;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001456 params.num_vf_cids = iids.vf_cids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001457 params.start_pq = qm_info->start_pq;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001458 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1459 params.num_vf_pqs = qm_info->num_vf_pqs;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001460 params.start_vport = qm_info->start_vport;
1461 params.num_vports = qm_info->num_vports;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001462 params.pf_wfq = qm_info->pf_wfq;
1463 params.pf_rl = qm_info->pf_rl;
1464 params.pq_params = qm_info->qm_pq_params;
1465 params.vport_params = qm_info->qm_vport_params;
1466
Rahul Verma15582962017-04-06 15:58:29 +03001467 qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001468}
1469
1470/* CM PF */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001471void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001472{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001473 /* XCM pure-LB queue */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001474 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1475 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001476}
1477
1478/* DQ PF */
1479static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1480{
1481 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001482 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001483
1484 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1485 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1486
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001487 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1488 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1489
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001490 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1491 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1492
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001493 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1494 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1495
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001496 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1497 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1498
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001499 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1500 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1501
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001502 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1503 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1504
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001505 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1506 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1507
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001508 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1509 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1510
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001511 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1512 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1513
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001514 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1515 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001516
1517 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1518 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1519
1520 /* Connection types 6 & 7 are not in use, yet they must be configured
1521 * as the highest possible connection. Not configuring them means the
1522 * defaults will be used, and with a large number of cids a bug may
1523 * occur, if the defaults will be smaller than dq_pf_max_cid /
1524 * dq_vf_max_cid.
1525 */
1526 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1527 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1528
1529 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1530 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001531}
1532
1533static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1534{
1535 struct qed_ilt_client_cfg *ilt_clients;
1536 int i;
1537
1538 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1539 for_each_ilt_valid_client(i, ilt_clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001540 STORE_RT_REG(p_hwfn,
1541 ilt_clients[i].first.reg,
1542 ilt_clients[i].first.val);
1543 STORE_RT_REG(p_hwfn,
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001544 ilt_clients[i].last.reg, ilt_clients[i].last.val);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001545 STORE_RT_REG(p_hwfn,
1546 ilt_clients[i].p_size.reg,
1547 ilt_clients[i].p_size.val);
1548 }
1549}
1550
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001551static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1552{
1553 struct qed_ilt_client_cfg *p_cli;
1554 u32 blk_factor;
1555
1556 /* For simplicty we set the 'block' to be an ILT page */
1557 if (p_hwfn->cdev->p_iov_info) {
1558 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1559
1560 STORE_RT_REG(p_hwfn,
1561 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1562 p_iov->first_vf_in_pf);
1563 STORE_RT_REG(p_hwfn,
1564 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1565 p_iov->first_vf_in_pf + p_iov->total_vfs);
1566 }
1567
1568 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1569 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1570 if (p_cli->active) {
1571 STORE_RT_REG(p_hwfn,
1572 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1573 blk_factor);
1574 STORE_RT_REG(p_hwfn,
1575 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1576 p_cli->pf_total_lines);
1577 STORE_RT_REG(p_hwfn,
1578 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1579 p_cli->vf_total_lines);
1580 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001581
1582 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1583 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1584 if (p_cli->active) {
1585 STORE_RT_REG(p_hwfn,
1586 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1587 blk_factor);
1588 STORE_RT_REG(p_hwfn,
1589 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1590 p_cli->pf_total_lines);
1591 STORE_RT_REG(p_hwfn,
1592 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1593 p_cli->vf_total_lines);
1594 }
1595
1596 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1597 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1598 if (p_cli->active) {
1599 STORE_RT_REG(p_hwfn,
1600 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1601 STORE_RT_REG(p_hwfn,
1602 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1603 p_cli->pf_total_lines);
1604 STORE_RT_REG(p_hwfn,
1605 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1606 p_cli->vf_total_lines);
1607 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001608}
1609
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001610/* ILT (PSWRQ2) PF */
1611static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1612{
1613 struct qed_ilt_client_cfg *clients;
1614 struct qed_cxt_mngr *p_mngr;
1615 struct qed_dma_mem *p_shdw;
1616 u32 line, rt_offst, i;
1617
1618 qed_ilt_bounds_init(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001619 qed_ilt_vf_bounds_init(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001620
1621 p_mngr = p_hwfn->p_cxt_mngr;
1622 p_shdw = p_mngr->ilt_shadow;
1623 clients = p_hwfn->p_cxt_mngr->clients;
1624
1625 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001626 /** Client's 1st val and RT array are absolute, ILT shadows'
1627 * lines are relative.
1628 */
1629 line = clients[i].first.val - p_mngr->pf_start_line;
1630 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1631 clients[i].first.val * ILT_ENTRY_IN_REGS;
1632
1633 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1634 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1635 u64 ilt_hw_entry = 0;
1636
1637 /** p_virt could be NULL incase of dynamic
1638 * allocation
1639 */
1640 if (p_shdw[line].p_virt) {
1641 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1642 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1643 (p_shdw[line].p_phys >> 12));
1644
1645 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1646 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1647 rt_offst, line, i,
1648 (u64)(p_shdw[line].p_phys >> 12));
1649 }
1650
1651 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1652 }
1653 }
1654}
1655
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001656/* SRC (Searcher) PF */
1657static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1658{
1659 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1660 u32 rounded_conn_num, conn_num, conn_max;
1661 struct qed_src_iids src_iids;
1662
1663 memset(&src_iids, 0, sizeof(src_iids));
1664 qed_cxt_src_iids(p_mngr, &src_iids);
1665 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1666 if (!conn_num)
1667 return;
1668
1669 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1670 rounded_conn_num = roundup_pow_of_two(conn_max);
1671
1672 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1673 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1674 ilog2(rounded_conn_num));
1675
1676 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1677 p_hwfn->p_cxt_mngr->first_free);
1678 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1679 p_hwfn->p_cxt_mngr->last_free);
1680}
1681
1682/* Timers PF */
1683#define TM_CFG_NUM_IDS_SHIFT 0
1684#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1685#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1686#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1687#define TM_CFG_PARENT_PF_SHIFT 25
1688#define TM_CFG_PARENT_PF_MASK 0x7ULL
1689
1690#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1691#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1692
1693#define TM_CFG_TID_OFFSET_SHIFT 30
1694#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1695#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1696#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1697
1698static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1699{
1700 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1701 u32 active_seg_mask = 0, tm_offset, rt_reg;
1702 struct qed_tm_iids tm_iids;
1703 u64 cfg_word;
1704 u8 i;
1705
1706 memset(&tm_iids, 0, sizeof(tm_iids));
Michal Kalderon44531ba2017-04-03 12:21:10 +03001707 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001708
1709 /* @@@TBD No pre-scan for now */
1710
1711 /* Note: We assume consecutive VFs for a PF */
1712 for (i = 0; i < p_mngr->vf_count; i++) {
1713 cfg_word = 0;
1714 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1715 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1716 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1717 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1718 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1719 (sizeof(cfg_word) / sizeof(u32)) *
1720 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1721 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1722 }
1723
1724 cfg_word = 0;
1725 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1726 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1727 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1728 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1729
1730 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1731 (sizeof(cfg_word) / sizeof(u32)) *
1732 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1733 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1734
1735 /* enale scan */
1736 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1737 tm_iids.pf_cids ? 0x1 : 0x0);
1738
1739 /* @@@TBD how to enable the scan for the VFs */
1740
1741 tm_offset = tm_iids.per_vf_cids;
1742
1743 /* Note: We assume consecutive VFs for a PF */
1744 for (i = 0; i < p_mngr->vf_count; i++) {
1745 cfg_word = 0;
1746 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1747 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1748 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1749 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1750 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1751
1752 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1753 (sizeof(cfg_word) / sizeof(u32)) *
1754 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1755
1756 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1757 }
1758
1759 tm_offset = tm_iids.pf_cids;
1760 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1761 cfg_word = 0;
1762 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1763 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1764 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1765 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1766 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1767
1768 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1769 (sizeof(cfg_word) / sizeof(u32)) *
1770 (NUM_OF_VFS(p_hwfn->cdev) +
1771 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1772
1773 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001774 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001775
1776 tm_offset += tm_iids.pf_tids[i];
1777 }
1778
1779 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
1780 active_seg_mask = 0;
1781
1782 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1783
1784 /* @@@TBD how to enable the scan for the VFs */
1785}
1786
Arun Easi1e128c82017-02-15 06:28:22 -08001787static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1788{
1789 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1790 p_hwfn->pf_params.fcoe_pf_params.is_target)
1791 STORE_RT_REG(p_hwfn,
1792 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1793}
1794
1795static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1796{
1797 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1798 struct qed_conn_type_cfg *p_fcoe;
1799 struct qed_tid_seg *p_tid;
1800
1801 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1802
1803 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1804 if (!p_fcoe->cid_count)
1805 return;
1806
1807 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1808 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1809 STORE_RT_REG_AGG(p_hwfn,
1810 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1811 p_tid->count);
1812 } else {
1813 STORE_RT_REG_AGG(p_hwfn,
1814 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1815 p_tid->count);
1816 }
1817}
1818
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001819void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1820{
1821 qed_cdu_init_common(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001822 qed_prs_init_common(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001823}
1824
Rahul Verma15582962017-04-06 15:58:29 +03001825void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001826{
Rahul Verma15582962017-04-06 15:58:29 +03001827 qed_qm_init_pf(p_hwfn, p_ptt);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001828 qed_cm_init_pf(p_hwfn);
1829 qed_dq_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001830 qed_cdu_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001831 qed_ilt_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001832 qed_src_init_pf(p_hwfn);
1833 qed_tm_init_pf(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001834 qed_prs_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001835}
1836
1837int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001838 enum protocol_type type, u32 *p_cid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001839{
1840 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1841 u32 rel_cid;
1842
1843 if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
1844 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1845 return -EINVAL;
1846 }
1847
1848 rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
1849 p_mngr->acquired[type].max_count);
1850
1851 if (rel_cid >= p_mngr->acquired[type].max_count) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001852 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001853 return -EINVAL;
1854 }
1855
1856 __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
1857
1858 *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
1859
1860 return 0;
1861}
1862
1863static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001864 u32 cid, enum protocol_type *p_type)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001865{
1866 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1867 struct qed_cid_acquired_map *p_map;
1868 enum protocol_type p;
1869 u32 rel_cid;
1870
1871 /* Iterate over protocols and find matching cid range */
1872 for (p = 0; p < MAX_CONN_TYPES; p++) {
1873 p_map = &p_mngr->acquired[p];
1874
1875 if (!p_map->cid_map)
1876 continue;
1877 if (cid >= p_map->start_cid &&
1878 cid < p_map->start_cid + p_map->max_count)
1879 break;
1880 }
1881 *p_type = p;
1882
1883 if (p == MAX_CONN_TYPES) {
1884 DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
1885 return false;
1886 }
1887
1888 rel_cid = cid - p_map->start_cid;
1889 if (!test_bit(rel_cid, p_map->cid_map)) {
1890 DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
1891 return false;
1892 }
1893 return true;
1894}
1895
Yuval Mintz1a635e42016-08-15 10:42:43 +03001896void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001897{
1898 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1899 enum protocol_type type;
1900 bool b_acquired;
1901 u32 rel_cid;
1902
1903 /* Test acquired and find matching per-protocol map */
1904 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
1905
1906 if (!b_acquired)
1907 return;
1908
1909 rel_cid = cid - p_mngr->acquired[type].start_cid;
1910 __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
1911}
1912
Yuval Mintz1a635e42016-08-15 10:42:43 +03001913int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001914{
1915 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1916 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1917 enum protocol_type type;
1918 bool b_acquired;
1919
1920 /* Test acquired and find matching per-protocol map */
1921 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
1922
1923 if (!b_acquired)
1924 return -EINVAL;
1925
1926 /* set the protocl type */
1927 p_info->type = type;
1928
1929 /* compute context virtual pointer */
1930 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1931
1932 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1933 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1934 line = p_info->iid / cxts_per_p;
1935
1936 /* Make sure context is allocated (dynamic allocation) */
1937 if (!p_mngr->ilt_shadow[line].p_virt)
1938 return -EINVAL;
1939
1940 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
1941 p_info->iid % cxts_per_p * conn_cxt_size;
1942
1943 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1944 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1945 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1946
1947 return 0;
1948}
1949
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001950static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
Ram Amranif9dc4d12017-04-03 12:21:13 +03001951 struct qed_rdma_pf_params *p_params,
1952 u32 num_tasks)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001953{
Ram Amranif9dc4d12017-04-03 12:21:13 +03001954 u32 num_cons, num_qps, num_srqs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001955 enum protocol_type proto;
1956
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001957 num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
1958
1959 switch (p_hwfn->hw_info.personality) {
1960 case QED_PCI_ETH_ROCE:
1961 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1962 num_cons = num_qps * 2; /* each QP requires two connections */
1963 proto = PROTOCOLID_ROCE;
1964 break;
1965 default:
1966 return;
1967 }
1968
1969 if (num_cons && num_tasks) {
1970 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
1971
1972 /* Deliberatly passing ROCE for tasks id. This is because
1973 * iWARP / RoCE share the task id.
1974 */
1975 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
1976 QED_CXT_ROCE_TID_SEG, 1,
1977 num_tasks, false);
1978 qed_cxt_set_srq_count(p_hwfn, num_srqs);
1979 } else {
1980 DP_INFO(p_hwfn->cdev,
1981 "RDMA personality used without setting params!\n");
1982 }
1983}
1984
Ram Amranif9dc4d12017-04-03 12:21:13 +03001985int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001986{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001987 /* Set the number of required CORE connections */
1988 u32 core_cids = 1; /* SPQ */
1989
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001990 if (p_hwfn->using_ll2)
1991 core_cids += 4;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001992 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001993
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001994 switch (p_hwfn->hw_info.personality) {
1995 case QED_PCI_ETH_ROCE:
1996 {
Ram Amranif9dc4d12017-04-03 12:21:13 +03001997 qed_rdma_set_pf_params(p_hwfn,
1998 &p_hwfn->
1999 pf_params.rdma_pf_params,
2000 rdma_tasks);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002001 /* no need for break since RoCE coexist with Ethernet */
2002 }
2003 case QED_PCI_ETH:
2004 {
2005 struct qed_eth_pf_params *p_params =
2006 &p_hwfn->pf_params.eth_pf_params;
2007
2008 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2009 p_params->num_cons, 1);
2010 break;
2011 }
Arun Easi1e128c82017-02-15 06:28:22 -08002012 case QED_PCI_FCOE:
2013 {
2014 struct qed_fcoe_pf_params *p_params;
2015
2016 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2017
2018 if (p_params->num_cons && p_params->num_tasks) {
2019 qed_cxt_set_proto_cid_count(p_hwfn,
2020 PROTOCOLID_FCOE,
2021 p_params->num_cons,
2022 0);
2023
2024 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2025 QED_CXT_FCOE_TID_SEG, 0,
2026 p_params->num_tasks, true);
2027 } else {
2028 DP_INFO(p_hwfn->cdev,
2029 "Fcoe personality used without setting params!\n");
2030 }
2031 break;
2032 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002033 case QED_PCI_ISCSI:
2034 {
2035 struct qed_iscsi_pf_params *p_params;
2036
2037 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2038
2039 if (p_params->num_cons && p_params->num_tasks) {
2040 qed_cxt_set_proto_cid_count(p_hwfn,
2041 PROTOCOLID_ISCSI,
2042 p_params->num_cons,
2043 0);
2044
2045 qed_cxt_set_proto_tid_count(p_hwfn,
2046 PROTOCOLID_ISCSI,
2047 QED_CXT_ISCSI_TID_SEG,
2048 0,
2049 p_params->num_tasks,
2050 true);
2051 } else {
2052 DP_INFO(p_hwfn->cdev,
2053 "Iscsi personality used without setting params!\n");
2054 }
2055 break;
2056 }
2057 default:
2058 return -EINVAL;
2059 }
2060
2061 return 0;
2062}
2063
2064int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2065 struct qed_tid_mem *p_info)
2066{
2067 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2068 u32 proto, seg, total_lines, i, shadow_line;
2069 struct qed_ilt_client_cfg *p_cli;
2070 struct qed_ilt_cli_blk *p_fl_seg;
2071 struct qed_tid_seg *p_seg_info;
2072
2073 /* Verify the personality */
2074 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002075 case QED_PCI_FCOE:
2076 proto = PROTOCOLID_FCOE;
2077 seg = QED_CXT_FCOE_TID_SEG;
2078 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002079 case QED_PCI_ISCSI:
2080 proto = PROTOCOLID_ISCSI;
2081 seg = QED_CXT_ISCSI_TID_SEG;
2082 break;
2083 default:
2084 return -EINVAL;
2085 }
2086
2087 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2088 if (!p_cli->active)
2089 return -EINVAL;
2090
2091 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2092 if (!p_seg_info->has_fl_mem)
2093 return -EINVAL;
2094
2095 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2096 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2097 p_fl_seg->real_size_in_page);
2098
2099 for (i = 0; i < total_lines; i++) {
2100 shadow_line = i + p_fl_seg->start_line -
2101 p_hwfn->p_cxt_mngr->pf_start_line;
2102 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2103 }
2104 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2105 p_fl_seg->real_size_in_page;
2106 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2107 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2108 p_info->tid_size;
2109
2110 return 0;
2111}
2112
2113/* This function is very RoCE oriented, if another protocol in the future
2114 * will want this feature we'll need to modify the function to be more generic
2115 */
2116int
2117qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2118 enum qed_cxt_elem_type elem_type, u32 iid)
2119{
2120 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2121 struct qed_ilt_client_cfg *p_cli;
2122 struct qed_ilt_cli_blk *p_blk;
2123 struct qed_ptt *p_ptt;
2124 dma_addr_t p_phys;
2125 u64 ilt_hw_entry;
2126 void *p_virt;
2127 int rc = 0;
2128
2129 switch (elem_type) {
2130 case QED_ELEM_CXT:
2131 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2132 elem_size = CONN_CXT_SIZE(p_hwfn);
2133 p_blk = &p_cli->pf_blks[CDUC_BLK];
2134 break;
2135 case QED_ELEM_SRQ:
2136 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2137 elem_size = SRQ_CXT_SIZE;
2138 p_blk = &p_cli->pf_blks[SRQ_BLK];
2139 break;
2140 case QED_ELEM_TASK:
2141 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2142 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2143 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2144 break;
2145 default:
2146 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2147 return -EINVAL;
2148 }
2149
2150 /* Calculate line in ilt */
2151 hw_p_size = p_cli->p_size.val;
2152 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2153 line = p_blk->start_line + (iid / elems_per_p);
2154 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2155
2156 /* If line is already allocated, do nothing, otherwise allocate it and
2157 * write it to the PSWRQ2 registers.
2158 * This section can be run in parallel from different contexts and thus
2159 * a mutex protection is needed.
2160 */
2161
2162 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2163
2164 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2165 goto out0;
2166
2167 p_ptt = qed_ptt_acquire(p_hwfn);
2168 if (!p_ptt) {
2169 DP_NOTICE(p_hwfn,
2170 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2171 rc = -EBUSY;
2172 goto out0;
2173 }
2174
2175 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2176 p_blk->real_size_in_page,
2177 &p_phys, GFP_KERNEL);
2178 if (!p_virt) {
2179 rc = -ENOMEM;
2180 goto out1;
2181 }
2182 memset(p_virt, 0, p_blk->real_size_in_page);
2183
2184 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2185 * to compensate for a HW bug, but it is configured even if DIF is not
2186 * enabled. This is harmless and allows us to avoid a dedicated API. We
2187 * configure the field for all of the contexts on the newly allocated
2188 * page.
2189 */
2190 if (elem_type == QED_ELEM_TASK) {
2191 u32 elem_i;
2192 u8 *elem_start = (u8 *)p_virt;
2193 union type1_task_context *elem;
2194
2195 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2196 elem = (union type1_task_context *)elem_start;
2197 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2198 TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
2199 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2200 }
2201 }
2202
2203 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2204 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2205 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2206 p_blk->real_size_in_page;
2207
2208 /* compute absolute offset */
2209 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2210 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2211
2212 ilt_hw_entry = 0;
2213 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2214 SET_FIELD(ilt_hw_entry,
2215 ILT_ENTRY_PHY_ADDR,
2216 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2217
2218 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2219 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2220 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2221
2222 if (elem_type == QED_ELEM_CXT) {
2223 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2224 elems_per_p;
2225
2226 /* Update the relevant register in the parser */
2227 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2228 last_cid_allocated - 1);
2229
2230 if (!p_hwfn->b_rdma_enabled_in_prs) {
2231 /* Enable RoCE search */
2232 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2233 p_hwfn->b_rdma_enabled_in_prs = true;
2234 }
2235 }
2236
2237out1:
2238 qed_ptt_release(p_hwfn, p_ptt);
2239out0:
2240 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2241
2242 return rc;
2243}
2244
2245/* This function is very RoCE oriented, if another protocol in the future
2246 * will want this feature we'll need to modify the function to be more generic
2247 */
2248static int
2249qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2250 enum qed_cxt_elem_type elem_type,
2251 u32 start_iid, u32 count)
2252{
2253 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2254 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2255 struct qed_ilt_client_cfg *p_cli;
2256 struct qed_ilt_cli_blk *p_blk;
2257 u32 end_iid = start_iid + count;
2258 struct qed_ptt *p_ptt;
2259 u64 ilt_hw_entry = 0;
2260 u32 i;
2261
2262 switch (elem_type) {
2263 case QED_ELEM_CXT:
2264 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2265 elem_size = CONN_CXT_SIZE(p_hwfn);
2266 p_blk = &p_cli->pf_blks[CDUC_BLK];
2267 break;
2268 case QED_ELEM_SRQ:
2269 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2270 elem_size = SRQ_CXT_SIZE;
2271 p_blk = &p_cli->pf_blks[SRQ_BLK];
2272 break;
2273 case QED_ELEM_TASK:
2274 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2275 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2276 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2277 break;
2278 default:
2279 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2280 return -EINVAL;
2281 }
2282
2283 /* Calculate line in ilt */
2284 hw_p_size = p_cli->p_size.val;
2285 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2286 start_line = p_blk->start_line + (start_iid / elems_per_p);
2287 end_line = p_blk->start_line + (end_iid / elems_per_p);
2288 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2289 end_line--;
2290
2291 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2292 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2293
2294 p_ptt = qed_ptt_acquire(p_hwfn);
2295 if (!p_ptt) {
2296 DP_NOTICE(p_hwfn,
2297 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2298 return -EBUSY;
2299 }
2300
2301 for (i = shadow_start_line; i < shadow_end_line; i++) {
2302 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2303 continue;
2304
2305 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2306 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2307 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2308 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2309
2310 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2311 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2312 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2313
2314 /* compute absolute offset */
2315 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2316 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2317 ILT_ENTRY_IN_REGS);
2318
2319 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2320 * wide-bus.
2321 */
2322 qed_dmae_host2grc(p_hwfn, p_ptt,
2323 (u64) (uintptr_t) &ilt_hw_entry,
2324 reg_offset,
2325 sizeof(ilt_hw_entry) / sizeof(u32),
2326 0);
2327 }
2328
2329 qed_ptt_release(p_hwfn, p_ptt);
2330
2331 return 0;
2332}
2333
2334int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2335{
2336 int rc;
2337 u32 cid;
2338
2339 /* Free Connection CXT */
2340 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2341 qed_cxt_get_proto_cid_start(p_hwfn,
2342 proto),
2343 qed_cxt_get_proto_cid_count(p_hwfn,
2344 proto, &cid));
2345
2346 if (rc)
2347 return rc;
2348
2349 /* Free Task CXT */
2350 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2351 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2352 if (rc)
2353 return rc;
2354
2355 /* Free TSDM CXT */
2356 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2357 qed_cxt_get_srq_count(p_hwfn));
2358
2359 return rc;
2360}
2361
2362int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2363 u32 tid, u8 ctx_type, void **pp_task_ctx)
2364{
2365 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2366 struct qed_ilt_client_cfg *p_cli;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002367 struct qed_tid_seg *p_seg_info;
Arun Easi1e128c82017-02-15 06:28:22 -08002368 struct qed_ilt_cli_blk *p_seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002369 u32 num_tids_per_block;
Arun Easi1e128c82017-02-15 06:28:22 -08002370 u32 tid_size, ilt_idx;
2371 u32 total_lines;
2372 u32 proto, seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002373
2374 /* Verify the personality */
2375 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002376 case QED_PCI_FCOE:
2377 proto = PROTOCOLID_FCOE;
2378 seg = QED_CXT_FCOE_TID_SEG;
2379 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002380 case QED_PCI_ISCSI:
2381 proto = PROTOCOLID_ISCSI;
2382 seg = QED_CXT_ISCSI_TID_SEG;
2383 break;
2384 default:
2385 return -EINVAL;
2386 }
2387
2388 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2389 if (!p_cli->active)
2390 return -EINVAL;
2391
2392 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2393
2394 if (ctx_type == QED_CTX_WORKING_MEM) {
2395 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2396 } else if (ctx_type == QED_CTX_FL_MEM) {
2397 if (!p_seg_info->has_fl_mem)
2398 return -EINVAL;
2399 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2400 } else {
2401 return -EINVAL;
2402 }
2403 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2404 tid_size = p_mngr->task_type_size[p_seg_info->type];
2405 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2406
2407 if (total_lines < tid / num_tids_per_block)
2408 return -EINVAL;
2409
2410 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2411 p_mngr->pf_start_line;
2412 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2413 (tid % num_tids_per_block) * tid_size;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002414
2415 return 0;
2416}