blob: 8db023422bfd89cab63dc0620e572c512d0a3ab8 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <linux/bitops.h>
35#include <linux/dma-mapping.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/list.h>
39#include <linux/log2.h>
40#include <linux/pci.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/bitops.h>
44#include "qed.h"
45#include "qed_cxt.h"
46#include "qed_dev_api.h"
47#include "qed_hsi.h"
48#include "qed_hw.h"
49#include "qed_init_ops.h"
50#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030051#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020052
53/* Max number of connection types in HW (DQ/CDU etc.) */
54#define MAX_CONN_TYPES PROTOCOLID_COMMON
55#define NUM_TASK_TYPES 2
56#define NUM_TASK_PF_SEGMENTS 4
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030057#define NUM_TASK_VF_SEGMENTS 1
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058
59/* QM constants */
60#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
61
62/* Doorbell-Queue constants */
63#define DQ_RANGE_SHIFT 4
64#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
65
Yuval Mintzdbb799c2016-06-03 14:35:35 +030066/* Searcher constants */
67#define SRC_MIN_NUM_ELEMS 256
68
69/* Timers constants */
70#define TM_SHIFT 7
71#define TM_ALIGN BIT(TM_SHIFT)
72#define TM_ELEM_SIZE 4
73
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020074#define ILT_DEFAULT_HW_P_SIZE 4
Ram Amrani51ff1722016-10-01 21:59:57 +030075
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020076#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78
79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52
84#define ILT_ENTRY_IN_REGS 2
85#define ILT_REG_SIZE_IN_BYTES 4
86
87/* connection context union */
88union conn_context {
89 struct core_conn_context core_ctx;
90 struct eth_conn_context eth_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +030091 struct iscsi_conn_context iscsi_ctx;
Arun Easi1e128c82017-02-15 06:28:22 -080092 struct fcoe_conn_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +030093 struct roce_conn_context roce_ctx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020094};
95
Arun Easi1e128c82017-02-15 06:28:22 -080096/* TYPE-0 task context - iSCSI, FCOE */
Yuval Mintzdbb799c2016-06-03 14:35:35 +030097union type0_task_context {
98 struct iscsi_task_context iscsi_ctx;
Arun Easi1e128c82017-02-15 06:28:22 -080099 struct fcoe_task_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300100};
101
102/* TYPE-1 task context - ROCE */
103union type1_task_context {
104 struct rdma_task_context roce_ctx;
105};
106
107struct src_ent {
108 u8 opaque[56];
109 u64 next;
110};
111
112#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
113#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
114
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200115#define CONN_CXT_SIZE(p_hwfn) \
116 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
117
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300118#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
119
120#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
121 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
122
123/* Alignment is inherent to the type1_task_context structure */
124#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
125
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126/* PF per protocl configuration object */
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300127#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
128#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
129
130struct qed_tid_seg {
131 u32 count;
132 u8 type;
133 bool has_fl_mem;
134};
135
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200136struct qed_conn_type_cfg {
137 u32 cid_count;
138 u32 cid_start;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300139 u32 cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300140 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200141};
142
143/* ILT Client configuration, Per connection type (protocol) resources. */
144#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300145#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200146#define CDUC_BLK (0)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300147#define SRQ_BLK (0)
148#define CDUT_SEG_BLK(n) (1 + (u8)(n))
149#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200150
151enum ilt_clients {
152 ILT_CLI_CDUC,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300153 ILT_CLI_CDUT,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200154 ILT_CLI_QM,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300155 ILT_CLI_TM,
156 ILT_CLI_SRC,
157 ILT_CLI_TSDM,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200158 ILT_CLI_MAX
159};
160
161struct ilt_cfg_pair {
162 u32 reg;
163 u32 val;
164};
165
166struct qed_ilt_cli_blk {
167 u32 total_size; /* 0 means not active */
168 u32 real_size_in_page;
169 u32 start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300170 u32 dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200171};
172
173struct qed_ilt_client_cfg {
174 bool active;
175
176 /* ILT boundaries */
177 struct ilt_cfg_pair first;
178 struct ilt_cfg_pair last;
179 struct ilt_cfg_pair p_size;
180
181 /* ILT client blocks for PF */
182 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
183 u32 pf_total_lines;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300184
185 /* ILT client blocks for VFs */
186 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
187 u32 vf_total_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200188};
189
190/* Per Path -
191 * ILT shadow table
192 * Protocol acquired CID lists
193 * PF start line in ILT
194 */
195struct qed_dma_mem {
196 dma_addr_t p_phys;
197 void *p_virt;
198 size_t size;
199};
200
201struct qed_cid_acquired_map {
202 u32 start_cid;
203 u32 max_count;
204 unsigned long *cid_map;
205};
206
207struct qed_cxt_mngr {
208 /* Per protocl configuration */
209 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
210
211 /* computed ILT structure */
212 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
213
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300214 /* Task type sizes */
215 u32 task_type_size[NUM_TASK_TYPES];
216
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300217 /* total number of VFs for this hwfn -
218 * ALL VFs are symmetric in terms of HW resources
219 */
220 u32 vf_count;
221
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300222 /* total number of SRQ's for this hwfn */
223 u32 srq_count;
224
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200225 /* Acquired CIDs */
226 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
227
228 /* ILT shadow table */
229 struct qed_dma_mem *ilt_shadow;
230 u32 pf_start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300231
232 /* Mutex for a dynamic ILT allocation */
233 struct mutex mutex;
234
235 /* SRC T2 */
236 struct qed_dma_mem *t2;
237 u32 t2_num_pages;
238 u64 first_free;
239 u64 last_free;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200240};
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300241static bool src_proto(enum protocol_type type)
242{
243 return type == PROTOCOLID_ISCSI ||
Mintz, Yuval5f8cb032017-04-03 12:21:12 +0300244 type == PROTOCOLID_FCOE;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300245}
246
247static bool tm_cid_proto(enum protocol_type type)
248{
249 return type == PROTOCOLID_ISCSI ||
Arun Easi1e128c82017-02-15 06:28:22 -0800250 type == PROTOCOLID_FCOE ||
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300251 type == PROTOCOLID_ROCE;
252}
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200253
Arun Easi1e128c82017-02-15 06:28:22 -0800254static bool tm_tid_proto(enum protocol_type type)
255{
256 return type == PROTOCOLID_FCOE;
257}
258
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300259/* counts the iids for the CDU/CDUC ILT client configuration */
260struct qed_cdu_iids {
261 u32 pf_cids;
262 u32 per_vf_cids;
263};
264
265static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
266 struct qed_cdu_iids *iids)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200267{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300268 u32 type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200269
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300270 for (type = 0; type < MAX_CONN_TYPES; type++) {
271 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
272 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
273 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200274}
275
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300276/* counts the iids for the Searcher block configuration */
277struct qed_src_iids {
278 u32 pf_cids;
279 u32 per_vf_cids;
280};
281
282static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
283 struct qed_src_iids *iids)
284{
285 u32 i;
286
287 for (i = 0; i < MAX_CONN_TYPES; i++) {
288 if (!src_proto(i))
289 continue;
290
291 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
292 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
293 }
294}
295
296/* counts the iids for the Timers block configuration */
297struct qed_tm_iids {
298 u32 pf_cids;
299 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
300 u32 pf_tids_total;
301 u32 per_vf_cids;
302 u32 per_vf_tids;
303};
304
Michal Kalderon44531ba2017-04-03 12:21:10 +0300305static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
306 struct qed_cxt_mngr *p_mngr,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300307 struct qed_tm_iids *iids)
308{
Michal Kalderon44531ba2017-04-03 12:21:10 +0300309 bool tm_vf_required = false;
310 bool tm_required = false;
311 int i, j;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300312
Michal Kalderon44531ba2017-04-03 12:21:10 +0300313 /* Timers is a special case -> we don't count how many cids require
314 * timers but what's the max cid that will be used by the timer block.
315 * therefore we traverse in reverse order, and once we hit a protocol
316 * that requires the timers memory, we'll sum all the protocols up
317 * to that one.
318 */
319 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300320 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
321
Michal Kalderon44531ba2017-04-03 12:21:10 +0300322 if (tm_cid_proto(i) || tm_required) {
323 if (p_cfg->cid_count)
324 tm_required = true;
325
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300326 iids->pf_cids += p_cfg->cid_count;
Michal Kalderon44531ba2017-04-03 12:21:10 +0300327 }
328
329 if (tm_cid_proto(i) || tm_vf_required) {
330 if (p_cfg->cids_per_vf)
331 tm_vf_required = true;
332
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300333 iids->per_vf_cids += p_cfg->cids_per_vf;
334 }
Arun Easi1e128c82017-02-15 06:28:22 -0800335
336 if (tm_tid_proto(i)) {
337 struct qed_tid_seg *segs = p_cfg->tid_seg;
338
339 /* for each segment there is at most one
340 * protocol for which count is not 0.
341 */
342 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
343 iids->pf_tids[j] += segs[j].count;
344
345 /* The last array elelment is for the VFs. As for PF
346 * segments there can be only one protocol for
347 * which this value is not 0.
348 */
349 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
350 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300351 }
352
353 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
354 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
355 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
356
357 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
358 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
359 iids->pf_tids_total += iids->pf_tids[j];
360 }
361}
362
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200363static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
364 struct qed_qm_iids *iids)
365{
366 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300367 struct qed_tid_seg *segs;
368 u32 vf_cids = 0, type, j;
369 u32 vf_tids = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200370
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300371 for (type = 0; type < MAX_CONN_TYPES; type++) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200372 iids->cids += p_mngr->conn_cfg[type].cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300373 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300374
375 segs = p_mngr->conn_cfg[type].tid_seg;
376 /* for each segment there is at most one
377 * protocol for which count is not 0.
378 */
379 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
380 iids->tids += segs[j].count;
381
382 /* The last array elelment is for the VFs. As for PF
383 * segments there can be only one protocol for
384 * which this value is not 0.
385 */
386 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300387 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200388
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300389 iids->vf_cids += vf_cids * p_mngr->vf_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300390 iids->tids += vf_tids * p_mngr->vf_count;
391
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300392 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300393 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
394 iids->cids, iids->vf_cids, iids->tids, vf_tids);
395}
396
397static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
398 u32 seg)
399{
400 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
401 u32 i;
402
403 /* Find the protocol with tid count > 0 for this segment.
404 * Note: there can only be one and this is already validated.
405 */
406 for (i = 0; i < MAX_CONN_TYPES; i++)
407 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
408 return &p_cfg->conn_cfg[i].tid_seg[seg];
409 return NULL;
410}
411
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300412static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300413{
414 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
415
416 p_mgr->srq_count = num_srqs;
417}
418
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300419static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300420{
421 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
422
423 return p_mgr->srq_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200424}
425
426/* set the iids count per protocol */
427static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
428 enum protocol_type type,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300429 u32 cid_count, u32 vf_cid_cnt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200430{
431 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
432 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
433
434 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300435 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300436
437 if (type == PROTOCOLID_ROCE) {
438 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
439 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
440 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
Ram Amranif3e48112017-03-14 15:25:58 +0200441 u32 align = elems_per_page * DQ_RANGE_ALIGN;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300442
Ram Amranif3e48112017-03-14 15:25:58 +0200443 p_conn->cid_count = roundup(p_conn->cid_count, align);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300444 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300445}
446
Yuval Mintz1a635e42016-08-15 10:42:43 +0300447u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
448 enum protocol_type type, u32 *vf_cid)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300449{
450 if (vf_cid)
451 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
452
453 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200454}
455
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300456u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
457 enum protocol_type type)
458{
459 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
460}
461
462u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
463 enum protocol_type type)
464{
465 u32 cnt = 0;
466 int i;
467
468 for (i = 0; i < TASK_SEGMENTS; i++)
469 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
470
471 return cnt;
472}
473
Yuval Mintz1a635e42016-08-15 10:42:43 +0300474static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
475 enum protocol_type proto,
476 u8 seg,
477 u8 seg_type, u32 count, bool has_fl)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300478{
479 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
480 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
481
482 p_seg->count = count;
483 p_seg->has_fl_mem = has_fl;
484 p_seg->type = seg_type;
485}
486
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200487static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
488 struct qed_ilt_cli_blk *p_blk,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300489 u32 start_line, u32 total_size, u32 elem_size)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200490{
491 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
492
493 /* verify thatits called only once for each block */
494 if (p_blk->total_size)
495 return;
496
497 p_blk->total_size = total_size;
498 p_blk->real_size_in_page = 0;
499 if (elem_size)
500 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
501 p_blk->start_line = start_line;
502}
503
504static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
505 struct qed_ilt_client_cfg *p_cli,
506 struct qed_ilt_cli_blk *p_blk,
507 u32 *p_line, enum ilt_clients client_id)
508{
509 if (!p_blk->total_size)
510 return;
511
512 if (!p_cli->active)
513 p_cli->first.val = *p_line;
514
515 p_cli->active = true;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300516 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200517 p_cli->last.val = *p_line - 1;
518
519 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
520 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
521 client_id, p_cli->first.val,
522 p_cli->last.val, p_blk->total_size,
523 p_blk->real_size_in_page, p_blk->start_line);
524}
525
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300526static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
527 enum ilt_clients ilt_client)
528{
529 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
530 struct qed_ilt_client_cfg *p_cli;
531 u32 lines_to_skip = 0;
532 u32 cxts_per_p;
533
534 if (ilt_client == ILT_CLI_CDUC) {
535 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
536
537 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
538 (u32) CONN_CXT_SIZE(p_hwfn);
539
540 lines_to_skip = cid_count / cxts_per_p;
541 }
542
543 return lines_to_skip;
544}
545
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200546int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
547{
548 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300549 u32 curr_line, total, i, task_size, line;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200550 struct qed_ilt_client_cfg *p_cli;
551 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300552 struct qed_cdu_iids cdu_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300553 struct qed_src_iids src_iids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200554 struct qed_qm_iids qm_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300555 struct qed_tm_iids tm_iids;
556 struct qed_tid_seg *p_seg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200557
558 memset(&qm_iids, 0, sizeof(qm_iids));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300559 memset(&cdu_iids, 0, sizeof(cdu_iids));
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300560 memset(&src_iids, 0, sizeof(src_iids));
561 memset(&tm_iids, 0, sizeof(tm_iids));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200562
563 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
564
565 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
566 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
567 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
568
569 /* CDUC */
570 p_cli = &p_mngr->clients[ILT_CLI_CDUC];
571 curr_line = p_mngr->pf_start_line;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300572
573 /* CDUC PF */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200574 p_cli->pf_total_lines = 0;
575
576 /* get the counters for the CDUC and QM clients */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300577 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200578
579 p_blk = &p_cli->pf_blks[CDUC_BLK];
580
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300581 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200582
583 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
584 total, CONN_CXT_SIZE(p_hwfn));
585
586 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
587 p_cli->pf_total_lines = curr_line - p_blk->start_line;
588
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300589 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
590 ILT_CLI_CDUC);
591
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300592 /* CDUC VF */
593 p_blk = &p_cli->vf_blks[CDUC_BLK];
594 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
595
596 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
597 total, CONN_CXT_SIZE(p_hwfn));
598
599 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
600 p_cli->vf_total_lines = curr_line - p_blk->start_line;
601
602 for (i = 1; i < p_mngr->vf_count; i++)
603 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
604 ILT_CLI_CDUC);
605
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300606 /* CDUT PF */
607 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
608 p_cli->first.val = curr_line;
609
610 /* first the 'working' task memory */
611 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
612 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
613 if (!p_seg || p_seg->count == 0)
614 continue;
615
616 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
617 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
618 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
619 p_mngr->task_type_size[p_seg->type]);
620
621 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
622 ILT_CLI_CDUT);
623 }
624
625 /* next the 'init' task memory (forced load memory) */
626 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
627 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
628 if (!p_seg || p_seg->count == 0)
629 continue;
630
631 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
632
633 if (!p_seg->has_fl_mem) {
634 /* The segment is active (total size pf 'working'
635 * memory is > 0) but has no FL (forced-load, Init)
636 * memory. Thus:
637 *
638 * 1. The total-size in the corrsponding FL block of
639 * the ILT client is set to 0 - No ILT line are
640 * provisioned and no ILT memory allocated.
641 *
642 * 2. The start-line of said block is set to the
643 * start line of the matching working memory
644 * block in the ILT client. This is later used to
645 * configure the CDU segment offset registers and
646 * results in an FL command for TIDs of this
647 * segement behaves as regular load commands
648 * (loading TIDs from the working memory).
649 */
650 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
651
652 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
653 continue;
654 }
655 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
656
657 qed_ilt_cli_blk_fill(p_cli, p_blk,
658 curr_line, total,
659 p_mngr->task_type_size[p_seg->type]);
660
661 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
662 ILT_CLI_CDUT);
663 }
664 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
665
666 /* CDUT VF */
667 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
668 if (p_seg && p_seg->count) {
669 /* Stricly speaking we need to iterate over all VF
670 * task segment types, but a VF has only 1 segment
671 */
672
673 /* 'working' memory */
674 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
675
676 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
677 qed_ilt_cli_blk_fill(p_cli, p_blk,
678 curr_line, total,
679 p_mngr->task_type_size[p_seg->type]);
680
681 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
682 ILT_CLI_CDUT);
683
684 /* 'init' memory */
685 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
686 if (!p_seg->has_fl_mem) {
687 /* see comment above */
688 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
689 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
690 } else {
691 task_size = p_mngr->task_type_size[p_seg->type];
692 qed_ilt_cli_blk_fill(p_cli, p_blk,
693 curr_line, total, task_size);
694 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
695 ILT_CLI_CDUT);
696 }
697 p_cli->vf_total_lines = curr_line -
698 p_cli->vf_blks[0].start_line;
699
700 /* Now for the rest of the VFs */
701 for (i = 1; i < p_mngr->vf_count; i++) {
702 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
703 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
704 ILT_CLI_CDUT);
705
706 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
707 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
708 ILT_CLI_CDUT);
709 }
710 }
711
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200712 /* QM */
713 p_cli = &p_mngr->clients[ILT_CLI_QM];
714 p_blk = &p_cli->pf_blks[0];
715
716 qed_cxt_qm_iids(p_hwfn, &qm_iids);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300717 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300718 qm_iids.vf_cids, qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300719 p_hwfn->qm_info.num_pqs,
720 p_hwfn->qm_info.num_vf_pqs);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200721
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300722 DP_VERBOSE(p_hwfn,
723 QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300724 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300725 qm_iids.cids,
726 qm_iids.vf_cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300727 qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300728 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200729
730 qed_ilt_cli_blk_fill(p_cli, p_blk,
731 curr_line, total * 0x1000,
732 QM_PQ_ELEMENT_SIZE);
733
734 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
735 p_cli->pf_total_lines = curr_line - p_blk->start_line;
736
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300737 /* SRC */
738 p_cli = &p_mngr->clients[ILT_CLI_SRC];
739 qed_cxt_src_iids(p_mngr, &src_iids);
740
741 /* Both the PF and VFs searcher connections are stored in the per PF
742 * database. Thus sum the PF searcher cids and all the VFs searcher
743 * cids.
744 */
745 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
746 if (total) {
747 u32 local_max = max_t(u32, total,
748 SRC_MIN_NUM_ELEMS);
749
750 total = roundup_pow_of_two(local_max);
751
752 p_blk = &p_cli->pf_blks[0];
753 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
754 total * sizeof(struct src_ent),
755 sizeof(struct src_ent));
756
757 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
758 ILT_CLI_SRC);
759 p_cli->pf_total_lines = curr_line - p_blk->start_line;
760 }
761
762 /* TM PF */
763 p_cli = &p_mngr->clients[ILT_CLI_TM];
Michal Kalderon44531ba2017-04-03 12:21:10 +0300764 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300765 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
766 if (total) {
767 p_blk = &p_cli->pf_blks[0];
768 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
769 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
770
771 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
772 ILT_CLI_TM);
773 p_cli->pf_total_lines = curr_line - p_blk->start_line;
774 }
775
776 /* TM VF */
777 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
778 if (total) {
779 p_blk = &p_cli->vf_blks[0];
780 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
781 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
782
783 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
784 ILT_CLI_TM);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300785
Mintz, Yuval70566b42017-04-03 12:21:11 +0300786 p_cli->vf_total_lines = curr_line - p_blk->start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300787 for (i = 1; i < p_mngr->vf_count; i++)
788 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
789 ILT_CLI_TM);
790 }
791
792 /* TSDM (SRQ CONTEXT) */
793 total = qed_cxt_get_srq_count(p_hwfn);
794
795 if (total) {
796 p_cli = &p_mngr->clients[ILT_CLI_TSDM];
797 p_blk = &p_cli->pf_blks[SRQ_BLK];
798 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
799 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
800
801 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
802 ILT_CLI_TSDM);
803 p_cli->pf_total_lines = curr_line - p_blk->start_line;
804 }
805
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200806 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
807 RESC_NUM(p_hwfn, QED_ILT)) {
808 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
809 curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
810 return -EINVAL;
811 }
812
813 return 0;
814}
815
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300816static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
817{
818 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
819 u32 i;
820
821 if (!p_mngr->t2)
822 return;
823
824 for (i = 0; i < p_mngr->t2_num_pages; i++)
825 if (p_mngr->t2[i].p_virt)
826 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
827 p_mngr->t2[i].size,
828 p_mngr->t2[i].p_virt,
829 p_mngr->t2[i].p_phys);
830
831 kfree(p_mngr->t2);
832 p_mngr->t2 = NULL;
833}
834
835static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
836{
837 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
838 u32 conn_num, total_size, ent_per_page, psz, i;
839 struct qed_ilt_client_cfg *p_src;
840 struct qed_src_iids src_iids;
841 struct qed_dma_mem *p_t2;
842 int rc;
843
844 memset(&src_iids, 0, sizeof(src_iids));
845
846 /* if the SRC ILT client is inactive - there are no connection
847 * requiring the searcer, leave.
848 */
849 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
850 if (!p_src->active)
851 return 0;
852
853 qed_cxt_src_iids(p_mngr, &src_iids);
854 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
855 total_size = conn_num * sizeof(struct src_ent);
856
857 /* use the same page size as the SRC ILT client */
858 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
859 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
860
861 /* allocate t2 */
Joe Perches2591c282016-09-04 14:24:03 -0700862 p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300863 GFP_KERNEL);
864 if (!p_mngr->t2) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300865 rc = -ENOMEM;
866 goto t2_fail;
867 }
868
869 /* allocate t2 pages */
870 for (i = 0; i < p_mngr->t2_num_pages; i++) {
871 u32 size = min_t(u32, total_size, psz);
872 void **p_virt = &p_mngr->t2[i].p_virt;
873
874 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
875 size,
876 &p_mngr->t2[i].p_phys, GFP_KERNEL);
877 if (!p_mngr->t2[i].p_virt) {
878 rc = -ENOMEM;
879 goto t2_fail;
880 }
881 memset(*p_virt, 0, size);
882 p_mngr->t2[i].size = size;
883 total_size -= size;
884 }
885
886 /* Set the t2 pointers */
887
888 /* entries per page - must be a power of two */
889 ent_per_page = psz / sizeof(struct src_ent);
890
891 p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
892
893 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
894 p_mngr->last_free = (u64) p_t2->p_phys +
895 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
896
897 for (i = 0; i < p_mngr->t2_num_pages; i++) {
898 u32 ent_num = min_t(u32,
899 ent_per_page,
900 conn_num);
901 struct src_ent *entries = p_mngr->t2[i].p_virt;
902 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
903 u32 j;
904
905 for (j = 0; j < ent_num - 1; j++) {
906 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
907 entries[j].next = cpu_to_be64(val);
908 }
909
910 if (i < p_mngr->t2_num_pages - 1)
911 val = (u64) p_mngr->t2[i + 1].p_phys;
912 else
913 val = 0;
914 entries[j].next = cpu_to_be64(val);
915
Dan Carpenter01e517f2016-06-07 15:04:16 +0300916 conn_num -= ent_num;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300917 }
918
919 return 0;
920
921t2_fail:
922 qed_cxt_src_t2_free(p_hwfn);
923 return rc;
924}
925
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200926#define for_each_ilt_valid_client(pos, clients) \
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300927 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
928 if (!clients[pos].active) { \
929 continue; \
930 } else \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200931
932/* Total number of ILT lines used by this PF */
933static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
934{
935 u32 size = 0;
936 u32 i;
937
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300938 for_each_ilt_valid_client(i, ilt_clients)
939 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200940
941 return size;
942}
943
944static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
945{
946 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
947 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
948 u32 ilt_size, i;
949
950 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
951
952 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
953 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
954
955 if (p_dma->p_virt)
956 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
957 p_dma->size, p_dma->p_virt,
958 p_dma->p_phys);
959 p_dma->p_virt = NULL;
960 }
961 kfree(p_mngr->ilt_shadow);
962}
963
964static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
965 struct qed_ilt_cli_blk *p_blk,
966 enum ilt_clients ilt_client,
967 u32 start_line_offset)
968{
969 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300970 u32 lines, line, sz_left, lines_to_skip = 0;
971
972 /* Special handling for RoCE that supports dynamic allocation */
973 if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
974 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
975 return 0;
976
977 lines_to_skip = p_blk->dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200978
979 if (!p_blk->total_size)
980 return 0;
981
982 sz_left = p_blk->total_size;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300983 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200984 line = p_blk->start_line + start_line_offset -
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300985 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200986
987 for (; lines; lines--) {
988 dma_addr_t p_phys;
989 void *p_virt;
990 u32 size;
991
Yuval Mintz1a635e42016-08-15 10:42:43 +0300992 size = min_t(u32, sz_left, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200993 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300994 size, &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200995 if (!p_virt)
996 return -ENOMEM;
997 memset(p_virt, 0, size);
998
999 ilt_shadow[line].p_phys = p_phys;
1000 ilt_shadow[line].p_virt = p_virt;
1001 ilt_shadow[line].size = size;
1002
1003 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1004 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1005 line, (u64)p_phys, p_virt, size);
1006
1007 sz_left -= size;
1008 line++;
1009 }
1010
1011 return 0;
1012}
1013
1014static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1015{
1016 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1017 struct qed_ilt_client_cfg *clients = p_mngr->clients;
1018 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001019 u32 size, i, j, k;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001020 int rc;
1021
1022 size = qed_cxt_ilt_shadow_size(clients);
1023 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1024 GFP_KERNEL);
1025 if (!p_mngr->ilt_shadow) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001026 rc = -ENOMEM;
1027 goto ilt_shadow_fail;
1028 }
1029
1030 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1031 "Allocated 0x%x bytes for ilt shadow\n",
1032 (u32)(size * sizeof(struct qed_dma_mem)));
1033
1034 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001035 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1036 p_blk = &clients[i].pf_blks[j];
1037 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001038 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001039 goto ilt_shadow_fail;
1040 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001041 for (k = 0; k < p_mngr->vf_count; k++) {
1042 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1043 u32 lines = clients[i].vf_total_lines * k;
1044
1045 p_blk = &clients[i].vf_blks[j];
1046 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001047 if (rc)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001048 goto ilt_shadow_fail;
1049 }
1050 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001051 }
1052
1053 return 0;
1054
1055ilt_shadow_fail:
1056 qed_ilt_shadow_free(p_hwfn);
1057 return rc;
1058}
1059
1060static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1061{
1062 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1063 u32 type;
1064
1065 for (type = 0; type < MAX_CONN_TYPES; type++) {
1066 kfree(p_mngr->acquired[type].cid_map);
1067 p_mngr->acquired[type].max_count = 0;
1068 p_mngr->acquired[type].start_cid = 0;
1069 }
1070}
1071
1072static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1073{
1074 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1075 u32 start_cid = 0;
1076 u32 type;
1077
1078 for (type = 0; type < MAX_CONN_TYPES; type++) {
1079 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1080 u32 size;
1081
1082 if (cid_cnt == 0)
1083 continue;
1084
1085 size = DIV_ROUND_UP(cid_cnt,
1086 sizeof(unsigned long) * BITS_PER_BYTE) *
1087 sizeof(unsigned long);
1088 p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
1089 if (!p_mngr->acquired[type].cid_map)
1090 goto cid_map_fail;
1091
1092 p_mngr->acquired[type].max_count = cid_cnt;
1093 p_mngr->acquired[type].start_cid = start_cid;
1094
1095 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
1096
1097 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1098 "Type %08x start: %08x count %08x\n",
1099 type, p_mngr->acquired[type].start_cid,
1100 p_mngr->acquired[type].max_count);
1101 start_cid += cid_cnt;
1102 }
1103
1104 return 0;
1105
1106cid_map_fail:
1107 qed_cid_map_free(p_hwfn);
1108 return -ENOMEM;
1109}
1110
1111int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1112{
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001113 struct qed_ilt_client_cfg *clients;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001114 struct qed_cxt_mngr *p_mngr;
1115 u32 i;
1116
Yuval Mintz60fffb32016-02-21 11:40:07 +02001117 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -07001118 if (!p_mngr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001119 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001120
1121 /* Initialize ILT client registers */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001122 clients = p_mngr->clients;
1123 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1124 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1125 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001126
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001127 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1128 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1129 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001130
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001131 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1132 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1133 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1134
1135 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1136 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1137 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1138
1139 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1140 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1141 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1142
1143 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1144 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1145 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001146 /* default ILT page size for all clients is 64K */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001147 for (i = 0; i < ILT_CLI_MAX; i++)
1148 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1149
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001150 /* Initialize task sizes */
1151 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1152 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1153
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001154 if (p_hwfn->cdev->p_iov_info)
1155 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001156 /* Initialize the dynamic ILT allocation mutex */
1157 mutex_init(&p_mngr->mutex);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001158
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001159 /* Set the cxt mangr pointer priori to further allocations */
1160 p_hwfn->p_cxt_mngr = p_mngr;
1161
1162 return 0;
1163}
1164
1165int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1166{
1167 int rc;
1168
1169 /* Allocate the ILT shadow table */
1170 rc = qed_ilt_shadow_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001171 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001172 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001173
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001174 /* Allocate the T2 table */
1175 rc = qed_cxt_src_t2_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001176 if (rc)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001177 goto tables_alloc_fail;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001178
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001179 /* Allocate and initialize the acquired cids bitmaps */
1180 rc = qed_cid_map_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001181 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001182 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001183
1184 return 0;
1185
1186tables_alloc_fail:
1187 qed_cxt_mngr_free(p_hwfn);
1188 return rc;
1189}
1190
1191void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1192{
1193 if (!p_hwfn->p_cxt_mngr)
1194 return;
1195
1196 qed_cid_map_free(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001197 qed_cxt_src_t2_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001198 qed_ilt_shadow_free(p_hwfn);
1199 kfree(p_hwfn->p_cxt_mngr);
1200
1201 p_hwfn->p_cxt_mngr = NULL;
1202}
1203
1204void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1205{
1206 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1207 int type;
1208
1209 /* Reset acquired cids */
1210 for (type = 0; type < MAX_CONN_TYPES; type++) {
1211 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1212
1213 if (cid_cnt == 0)
1214 continue;
1215
1216 memset(p_mngr->acquired[type].cid_map, 0,
1217 DIV_ROUND_UP(cid_cnt,
1218 sizeof(unsigned long) * BITS_PER_BYTE) *
1219 sizeof(unsigned long));
1220 }
1221}
1222
1223/* CDU Common */
1224#define CDUC_CXT_SIZE_SHIFT \
1225 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1226
1227#define CDUC_CXT_SIZE_MASK \
1228 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1229
1230#define CDUC_BLOCK_WASTE_SHIFT \
1231 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1232
1233#define CDUC_BLOCK_WASTE_MASK \
1234 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1235
1236#define CDUC_NCIB_SHIFT \
1237 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1238
1239#define CDUC_NCIB_MASK \
1240 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1241
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001242#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1243 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1244
1245#define CDUT_TYPE0_CXT_SIZE_MASK \
1246 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1247 CDUT_TYPE0_CXT_SIZE_SHIFT)
1248
1249#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1250 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1251
1252#define CDUT_TYPE0_BLOCK_WASTE_MASK \
1253 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1254 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1255
1256#define CDUT_TYPE0_NCIB_SHIFT \
1257 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1258
1259#define CDUT_TYPE0_NCIB_MASK \
1260 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1261 CDUT_TYPE0_NCIB_SHIFT)
1262
1263#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1264 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1265
1266#define CDUT_TYPE1_CXT_SIZE_MASK \
1267 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1268 CDUT_TYPE1_CXT_SIZE_SHIFT)
1269
1270#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1271 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1272
1273#define CDUT_TYPE1_BLOCK_WASTE_MASK \
1274 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1275 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1276
1277#define CDUT_TYPE1_NCIB_SHIFT \
1278 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1279
1280#define CDUT_TYPE1_NCIB_MASK \
1281 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1282 CDUT_TYPE1_NCIB_SHIFT)
1283
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001284static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1285{
1286 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1287
1288 /* CDUC - connection configuration */
1289 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1290 cxt_size = CONN_CXT_SIZE(p_hwfn);
1291 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1292 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1293
1294 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1295 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1296 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1297 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001298
1299 /* CDUT - type-0 tasks configuration */
1300 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1301 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1302 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1303 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1304
1305 /* cxt size and block-waste are multipes of 8 */
1306 cdu_params = 0;
1307 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1308 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1309 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1310 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1311
1312 /* CDUT - type-1 tasks configuration */
1313 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1314 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1315 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1316
1317 /* cxt size and block-waste are multipes of 8 */
1318 cdu_params = 0;
1319 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1320 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1321 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1322 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1323}
1324
1325/* CDU PF */
1326#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1327#define CDU_SEG_REG_TYPE_MASK 0x1
1328#define CDU_SEG_REG_OFFSET_SHIFT 0
1329#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1330
1331static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1332{
1333 struct qed_ilt_client_cfg *p_cli;
1334 struct qed_tid_seg *p_seg;
1335 u32 cdu_seg_params, offset;
1336 int i;
1337
1338 static const u32 rt_type_offset_arr[] = {
1339 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1340 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1341 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1342 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1343 };
1344
1345 static const u32 rt_type_offset_fl_arr[] = {
1346 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1347 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1348 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1349 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1350 };
1351
1352 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1353
1354 /* There are initializations only for CDUT during pf Phase */
1355 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1356 /* Segment 0 */
1357 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1358 if (!p_seg)
1359 continue;
1360
1361 /* Note: start_line is already adjusted for the CDU
1362 * segment register granularity, so we just need to
1363 * divide. Adjustment is implicit as we assume ILT
1364 * Page size is larger than 32K!
1365 */
1366 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1367 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1368 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1369
1370 cdu_seg_params = 0;
1371 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1372 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1373 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1374
1375 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1376 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1377 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1378
1379 cdu_seg_params = 0;
1380 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1381 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1382 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1383 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001384}
1385
1386void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
1387{
1388 struct qed_qm_pf_rt_init_params params;
1389 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1390 struct qed_qm_iids iids;
1391
1392 memset(&iids, 0, sizeof(iids));
1393 qed_cxt_qm_iids(p_hwfn, &iids);
1394
1395 memset(&params, 0, sizeof(params));
1396 params.port_id = p_hwfn->port_id;
1397 params.pf_id = p_hwfn->rel_pf_id;
1398 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1399 params.is_first_pf = p_hwfn->first_on_engine;
1400 params.num_pf_cids = iids.cids;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001401 params.num_vf_cids = iids.vf_cids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001402 params.start_pq = qm_info->start_pq;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001403 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1404 params.num_vf_pqs = qm_info->num_vf_pqs;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001405 params.start_vport = qm_info->start_vport;
1406 params.num_vports = qm_info->num_vports;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001407 params.pf_wfq = qm_info->pf_wfq;
1408 params.pf_rl = qm_info->pf_rl;
1409 params.pq_params = qm_info->qm_pq_params;
1410 params.vport_params = qm_info->qm_vport_params;
1411
1412 qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
1413}
1414
1415/* CM PF */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001416void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001417{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001418 /* XCM pure-LB queue */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001419 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1420 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001421}
1422
1423/* DQ PF */
1424static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1425{
1426 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001427 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001428
1429 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1430 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1431
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001432 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1433 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1434
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001435 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1436 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1437
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001438 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1439 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1440
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001441 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1442 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1443
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001444 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1445 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1446
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001447 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1448 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1449
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001450 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1451 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1452
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001453 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1454 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1455
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001456 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1457 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1458
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001459 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1460 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001461
1462 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1463 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1464
1465 /* Connection types 6 & 7 are not in use, yet they must be configured
1466 * as the highest possible connection. Not configuring them means the
1467 * defaults will be used, and with a large number of cids a bug may
1468 * occur, if the defaults will be smaller than dq_pf_max_cid /
1469 * dq_vf_max_cid.
1470 */
1471 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1472 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1473
1474 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1475 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001476}
1477
1478static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1479{
1480 struct qed_ilt_client_cfg *ilt_clients;
1481 int i;
1482
1483 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1484 for_each_ilt_valid_client(i, ilt_clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001485 STORE_RT_REG(p_hwfn,
1486 ilt_clients[i].first.reg,
1487 ilt_clients[i].first.val);
1488 STORE_RT_REG(p_hwfn,
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001489 ilt_clients[i].last.reg, ilt_clients[i].last.val);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001490 STORE_RT_REG(p_hwfn,
1491 ilt_clients[i].p_size.reg,
1492 ilt_clients[i].p_size.val);
1493 }
1494}
1495
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001496static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1497{
1498 struct qed_ilt_client_cfg *p_cli;
1499 u32 blk_factor;
1500
1501 /* For simplicty we set the 'block' to be an ILT page */
1502 if (p_hwfn->cdev->p_iov_info) {
1503 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1504
1505 STORE_RT_REG(p_hwfn,
1506 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1507 p_iov->first_vf_in_pf);
1508 STORE_RT_REG(p_hwfn,
1509 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1510 p_iov->first_vf_in_pf + p_iov->total_vfs);
1511 }
1512
1513 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1514 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1515 if (p_cli->active) {
1516 STORE_RT_REG(p_hwfn,
1517 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1518 blk_factor);
1519 STORE_RT_REG(p_hwfn,
1520 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1521 p_cli->pf_total_lines);
1522 STORE_RT_REG(p_hwfn,
1523 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1524 p_cli->vf_total_lines);
1525 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001526
1527 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1528 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1529 if (p_cli->active) {
1530 STORE_RT_REG(p_hwfn,
1531 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1532 blk_factor);
1533 STORE_RT_REG(p_hwfn,
1534 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1535 p_cli->pf_total_lines);
1536 STORE_RT_REG(p_hwfn,
1537 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1538 p_cli->vf_total_lines);
1539 }
1540
1541 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1542 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1543 if (p_cli->active) {
1544 STORE_RT_REG(p_hwfn,
1545 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1546 STORE_RT_REG(p_hwfn,
1547 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1548 p_cli->pf_total_lines);
1549 STORE_RT_REG(p_hwfn,
1550 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1551 p_cli->vf_total_lines);
1552 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001553}
1554
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001555/* ILT (PSWRQ2) PF */
1556static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1557{
1558 struct qed_ilt_client_cfg *clients;
1559 struct qed_cxt_mngr *p_mngr;
1560 struct qed_dma_mem *p_shdw;
1561 u32 line, rt_offst, i;
1562
1563 qed_ilt_bounds_init(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001564 qed_ilt_vf_bounds_init(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001565
1566 p_mngr = p_hwfn->p_cxt_mngr;
1567 p_shdw = p_mngr->ilt_shadow;
1568 clients = p_hwfn->p_cxt_mngr->clients;
1569
1570 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001571 /** Client's 1st val and RT array are absolute, ILT shadows'
1572 * lines are relative.
1573 */
1574 line = clients[i].first.val - p_mngr->pf_start_line;
1575 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1576 clients[i].first.val * ILT_ENTRY_IN_REGS;
1577
1578 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1579 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1580 u64 ilt_hw_entry = 0;
1581
1582 /** p_virt could be NULL incase of dynamic
1583 * allocation
1584 */
1585 if (p_shdw[line].p_virt) {
1586 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1587 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1588 (p_shdw[line].p_phys >> 12));
1589
1590 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1591 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1592 rt_offst, line, i,
1593 (u64)(p_shdw[line].p_phys >> 12));
1594 }
1595
1596 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1597 }
1598 }
1599}
1600
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001601/* SRC (Searcher) PF */
1602static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1603{
1604 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1605 u32 rounded_conn_num, conn_num, conn_max;
1606 struct qed_src_iids src_iids;
1607
1608 memset(&src_iids, 0, sizeof(src_iids));
1609 qed_cxt_src_iids(p_mngr, &src_iids);
1610 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1611 if (!conn_num)
1612 return;
1613
1614 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1615 rounded_conn_num = roundup_pow_of_two(conn_max);
1616
1617 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1618 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1619 ilog2(rounded_conn_num));
1620
1621 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1622 p_hwfn->p_cxt_mngr->first_free);
1623 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1624 p_hwfn->p_cxt_mngr->last_free);
1625}
1626
1627/* Timers PF */
1628#define TM_CFG_NUM_IDS_SHIFT 0
1629#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1630#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1631#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1632#define TM_CFG_PARENT_PF_SHIFT 25
1633#define TM_CFG_PARENT_PF_MASK 0x7ULL
1634
1635#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1636#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1637
1638#define TM_CFG_TID_OFFSET_SHIFT 30
1639#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1640#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1641#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1642
1643static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1644{
1645 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1646 u32 active_seg_mask = 0, tm_offset, rt_reg;
1647 struct qed_tm_iids tm_iids;
1648 u64 cfg_word;
1649 u8 i;
1650
1651 memset(&tm_iids, 0, sizeof(tm_iids));
Michal Kalderon44531ba2017-04-03 12:21:10 +03001652 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001653
1654 /* @@@TBD No pre-scan for now */
1655
1656 /* Note: We assume consecutive VFs for a PF */
1657 for (i = 0; i < p_mngr->vf_count; i++) {
1658 cfg_word = 0;
1659 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1660 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1661 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1662 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1663 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1664 (sizeof(cfg_word) / sizeof(u32)) *
1665 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1666 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1667 }
1668
1669 cfg_word = 0;
1670 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1671 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1672 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1673 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1674
1675 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1676 (sizeof(cfg_word) / sizeof(u32)) *
1677 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1678 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1679
1680 /* enale scan */
1681 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1682 tm_iids.pf_cids ? 0x1 : 0x0);
1683
1684 /* @@@TBD how to enable the scan for the VFs */
1685
1686 tm_offset = tm_iids.per_vf_cids;
1687
1688 /* Note: We assume consecutive VFs for a PF */
1689 for (i = 0; i < p_mngr->vf_count; i++) {
1690 cfg_word = 0;
1691 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1692 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1693 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1694 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1695 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1696
1697 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1698 (sizeof(cfg_word) / sizeof(u32)) *
1699 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1700
1701 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1702 }
1703
1704 tm_offset = tm_iids.pf_cids;
1705 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1706 cfg_word = 0;
1707 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1708 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1709 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1710 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1711 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1712
1713 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1714 (sizeof(cfg_word) / sizeof(u32)) *
1715 (NUM_OF_VFS(p_hwfn->cdev) +
1716 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1717
1718 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001719 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001720
1721 tm_offset += tm_iids.pf_tids[i];
1722 }
1723
1724 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
1725 active_seg_mask = 0;
1726
1727 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1728
1729 /* @@@TBD how to enable the scan for the VFs */
1730}
1731
Arun Easi1e128c82017-02-15 06:28:22 -08001732static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1733{
1734 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1735 p_hwfn->pf_params.fcoe_pf_params.is_target)
1736 STORE_RT_REG(p_hwfn,
1737 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1738}
1739
1740static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1741{
1742 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1743 struct qed_conn_type_cfg *p_fcoe;
1744 struct qed_tid_seg *p_tid;
1745
1746 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1747
1748 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1749 if (!p_fcoe->cid_count)
1750 return;
1751
1752 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1753 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1754 STORE_RT_REG_AGG(p_hwfn,
1755 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1756 p_tid->count);
1757 } else {
1758 STORE_RT_REG_AGG(p_hwfn,
1759 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1760 p_tid->count);
1761 }
1762}
1763
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001764void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1765{
1766 qed_cdu_init_common(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001767 qed_prs_init_common(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001768}
1769
1770void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
1771{
1772 qed_qm_init_pf(p_hwfn);
1773 qed_cm_init_pf(p_hwfn);
1774 qed_dq_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001775 qed_cdu_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001776 qed_ilt_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001777 qed_src_init_pf(p_hwfn);
1778 qed_tm_init_pf(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001779 qed_prs_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001780}
1781
1782int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001783 enum protocol_type type, u32 *p_cid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001784{
1785 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1786 u32 rel_cid;
1787
1788 if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
1789 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1790 return -EINVAL;
1791 }
1792
1793 rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
1794 p_mngr->acquired[type].max_count);
1795
1796 if (rel_cid >= p_mngr->acquired[type].max_count) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001797 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001798 return -EINVAL;
1799 }
1800
1801 __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
1802
1803 *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
1804
1805 return 0;
1806}
1807
1808static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001809 u32 cid, enum protocol_type *p_type)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001810{
1811 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1812 struct qed_cid_acquired_map *p_map;
1813 enum protocol_type p;
1814 u32 rel_cid;
1815
1816 /* Iterate over protocols and find matching cid range */
1817 for (p = 0; p < MAX_CONN_TYPES; p++) {
1818 p_map = &p_mngr->acquired[p];
1819
1820 if (!p_map->cid_map)
1821 continue;
1822 if (cid >= p_map->start_cid &&
1823 cid < p_map->start_cid + p_map->max_count)
1824 break;
1825 }
1826 *p_type = p;
1827
1828 if (p == MAX_CONN_TYPES) {
1829 DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
1830 return false;
1831 }
1832
1833 rel_cid = cid - p_map->start_cid;
1834 if (!test_bit(rel_cid, p_map->cid_map)) {
1835 DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
1836 return false;
1837 }
1838 return true;
1839}
1840
Yuval Mintz1a635e42016-08-15 10:42:43 +03001841void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001842{
1843 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1844 enum protocol_type type;
1845 bool b_acquired;
1846 u32 rel_cid;
1847
1848 /* Test acquired and find matching per-protocol map */
1849 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
1850
1851 if (!b_acquired)
1852 return;
1853
1854 rel_cid = cid - p_mngr->acquired[type].start_cid;
1855 __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
1856}
1857
Yuval Mintz1a635e42016-08-15 10:42:43 +03001858int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001859{
1860 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1861 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1862 enum protocol_type type;
1863 bool b_acquired;
1864
1865 /* Test acquired and find matching per-protocol map */
1866 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
1867
1868 if (!b_acquired)
1869 return -EINVAL;
1870
1871 /* set the protocl type */
1872 p_info->type = type;
1873
1874 /* compute context virtual pointer */
1875 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1876
1877 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1878 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1879 line = p_info->iid / cxts_per_p;
1880
1881 /* Make sure context is allocated (dynamic allocation) */
1882 if (!p_mngr->ilt_shadow[line].p_virt)
1883 return -EINVAL;
1884
1885 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
1886 p_info->iid % cxts_per_p * conn_cxt_size;
1887
1888 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1889 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1890 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1891
1892 return 0;
1893}
1894
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001895static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1896 struct qed_rdma_pf_params *p_params)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001897{
1898 u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
1899 enum protocol_type proto;
1900
1901 num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
1902 num_tasks = num_mrs; /* each mr uses a single task id */
1903 num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
1904
1905 switch (p_hwfn->hw_info.personality) {
1906 case QED_PCI_ETH_ROCE:
1907 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1908 num_cons = num_qps * 2; /* each QP requires two connections */
1909 proto = PROTOCOLID_ROCE;
1910 break;
1911 default:
1912 return;
1913 }
1914
1915 if (num_cons && num_tasks) {
1916 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
1917
1918 /* Deliberatly passing ROCE for tasks id. This is because
1919 * iWARP / RoCE share the task id.
1920 */
1921 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
1922 QED_CXT_ROCE_TID_SEG, 1,
1923 num_tasks, false);
1924 qed_cxt_set_srq_count(p_hwfn, num_srqs);
1925 } else {
1926 DP_INFO(p_hwfn->cdev,
1927 "RDMA personality used without setting params!\n");
1928 }
1929}
1930
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001931int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
1932{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001933 /* Set the number of required CORE connections */
1934 u32 core_cids = 1; /* SPQ */
1935
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001936 if (p_hwfn->using_ll2)
1937 core_cids += 4;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001938 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001939
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001940 switch (p_hwfn->hw_info.personality) {
1941 case QED_PCI_ETH_ROCE:
1942 {
1943 qed_rdma_set_pf_params(p_hwfn,
1944 &p_hwfn->
1945 pf_params.rdma_pf_params);
1946 /* no need for break since RoCE coexist with Ethernet */
1947 }
1948 case QED_PCI_ETH:
1949 {
1950 struct qed_eth_pf_params *p_params =
1951 &p_hwfn->pf_params.eth_pf_params;
1952
1953 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1954 p_params->num_cons, 1);
1955 break;
1956 }
Arun Easi1e128c82017-02-15 06:28:22 -08001957 case QED_PCI_FCOE:
1958 {
1959 struct qed_fcoe_pf_params *p_params;
1960
1961 p_params = &p_hwfn->pf_params.fcoe_pf_params;
1962
1963 if (p_params->num_cons && p_params->num_tasks) {
1964 qed_cxt_set_proto_cid_count(p_hwfn,
1965 PROTOCOLID_FCOE,
1966 p_params->num_cons,
1967 0);
1968
1969 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
1970 QED_CXT_FCOE_TID_SEG, 0,
1971 p_params->num_tasks, true);
1972 } else {
1973 DP_INFO(p_hwfn->cdev,
1974 "Fcoe personality used without setting params!\n");
1975 }
1976 break;
1977 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001978 case QED_PCI_ISCSI:
1979 {
1980 struct qed_iscsi_pf_params *p_params;
1981
1982 p_params = &p_hwfn->pf_params.iscsi_pf_params;
1983
1984 if (p_params->num_cons && p_params->num_tasks) {
1985 qed_cxt_set_proto_cid_count(p_hwfn,
1986 PROTOCOLID_ISCSI,
1987 p_params->num_cons,
1988 0);
1989
1990 qed_cxt_set_proto_tid_count(p_hwfn,
1991 PROTOCOLID_ISCSI,
1992 QED_CXT_ISCSI_TID_SEG,
1993 0,
1994 p_params->num_tasks,
1995 true);
1996 } else {
1997 DP_INFO(p_hwfn->cdev,
1998 "Iscsi personality used without setting params!\n");
1999 }
2000 break;
2001 }
2002 default:
2003 return -EINVAL;
2004 }
2005
2006 return 0;
2007}
2008
2009int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2010 struct qed_tid_mem *p_info)
2011{
2012 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2013 u32 proto, seg, total_lines, i, shadow_line;
2014 struct qed_ilt_client_cfg *p_cli;
2015 struct qed_ilt_cli_blk *p_fl_seg;
2016 struct qed_tid_seg *p_seg_info;
2017
2018 /* Verify the personality */
2019 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002020 case QED_PCI_FCOE:
2021 proto = PROTOCOLID_FCOE;
2022 seg = QED_CXT_FCOE_TID_SEG;
2023 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002024 case QED_PCI_ISCSI:
2025 proto = PROTOCOLID_ISCSI;
2026 seg = QED_CXT_ISCSI_TID_SEG;
2027 break;
2028 default:
2029 return -EINVAL;
2030 }
2031
2032 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2033 if (!p_cli->active)
2034 return -EINVAL;
2035
2036 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2037 if (!p_seg_info->has_fl_mem)
2038 return -EINVAL;
2039
2040 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2041 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2042 p_fl_seg->real_size_in_page);
2043
2044 for (i = 0; i < total_lines; i++) {
2045 shadow_line = i + p_fl_seg->start_line -
2046 p_hwfn->p_cxt_mngr->pf_start_line;
2047 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2048 }
2049 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2050 p_fl_seg->real_size_in_page;
2051 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2052 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2053 p_info->tid_size;
2054
2055 return 0;
2056}
2057
2058/* This function is very RoCE oriented, if another protocol in the future
2059 * will want this feature we'll need to modify the function to be more generic
2060 */
2061int
2062qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2063 enum qed_cxt_elem_type elem_type, u32 iid)
2064{
2065 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2066 struct qed_ilt_client_cfg *p_cli;
2067 struct qed_ilt_cli_blk *p_blk;
2068 struct qed_ptt *p_ptt;
2069 dma_addr_t p_phys;
2070 u64 ilt_hw_entry;
2071 void *p_virt;
2072 int rc = 0;
2073
2074 switch (elem_type) {
2075 case QED_ELEM_CXT:
2076 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2077 elem_size = CONN_CXT_SIZE(p_hwfn);
2078 p_blk = &p_cli->pf_blks[CDUC_BLK];
2079 break;
2080 case QED_ELEM_SRQ:
2081 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2082 elem_size = SRQ_CXT_SIZE;
2083 p_blk = &p_cli->pf_blks[SRQ_BLK];
2084 break;
2085 case QED_ELEM_TASK:
2086 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2087 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2088 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2089 break;
2090 default:
2091 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2092 return -EINVAL;
2093 }
2094
2095 /* Calculate line in ilt */
2096 hw_p_size = p_cli->p_size.val;
2097 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2098 line = p_blk->start_line + (iid / elems_per_p);
2099 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2100
2101 /* If line is already allocated, do nothing, otherwise allocate it and
2102 * write it to the PSWRQ2 registers.
2103 * This section can be run in parallel from different contexts and thus
2104 * a mutex protection is needed.
2105 */
2106
2107 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2108
2109 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2110 goto out0;
2111
2112 p_ptt = qed_ptt_acquire(p_hwfn);
2113 if (!p_ptt) {
2114 DP_NOTICE(p_hwfn,
2115 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2116 rc = -EBUSY;
2117 goto out0;
2118 }
2119
2120 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2121 p_blk->real_size_in_page,
2122 &p_phys, GFP_KERNEL);
2123 if (!p_virt) {
2124 rc = -ENOMEM;
2125 goto out1;
2126 }
2127 memset(p_virt, 0, p_blk->real_size_in_page);
2128
2129 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2130 * to compensate for a HW bug, but it is configured even if DIF is not
2131 * enabled. This is harmless and allows us to avoid a dedicated API. We
2132 * configure the field for all of the contexts on the newly allocated
2133 * page.
2134 */
2135 if (elem_type == QED_ELEM_TASK) {
2136 u32 elem_i;
2137 u8 *elem_start = (u8 *)p_virt;
2138 union type1_task_context *elem;
2139
2140 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2141 elem = (union type1_task_context *)elem_start;
2142 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2143 TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
2144 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2145 }
2146 }
2147
2148 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2149 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2150 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2151 p_blk->real_size_in_page;
2152
2153 /* compute absolute offset */
2154 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2155 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2156
2157 ilt_hw_entry = 0;
2158 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2159 SET_FIELD(ilt_hw_entry,
2160 ILT_ENTRY_PHY_ADDR,
2161 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2162
2163 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2164 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2165 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2166
2167 if (elem_type == QED_ELEM_CXT) {
2168 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2169 elems_per_p;
2170
2171 /* Update the relevant register in the parser */
2172 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2173 last_cid_allocated - 1);
2174
2175 if (!p_hwfn->b_rdma_enabled_in_prs) {
2176 /* Enable RoCE search */
2177 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2178 p_hwfn->b_rdma_enabled_in_prs = true;
2179 }
2180 }
2181
2182out1:
2183 qed_ptt_release(p_hwfn, p_ptt);
2184out0:
2185 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2186
2187 return rc;
2188}
2189
2190/* This function is very RoCE oriented, if another protocol in the future
2191 * will want this feature we'll need to modify the function to be more generic
2192 */
2193static int
2194qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2195 enum qed_cxt_elem_type elem_type,
2196 u32 start_iid, u32 count)
2197{
2198 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2199 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2200 struct qed_ilt_client_cfg *p_cli;
2201 struct qed_ilt_cli_blk *p_blk;
2202 u32 end_iid = start_iid + count;
2203 struct qed_ptt *p_ptt;
2204 u64 ilt_hw_entry = 0;
2205 u32 i;
2206
2207 switch (elem_type) {
2208 case QED_ELEM_CXT:
2209 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2210 elem_size = CONN_CXT_SIZE(p_hwfn);
2211 p_blk = &p_cli->pf_blks[CDUC_BLK];
2212 break;
2213 case QED_ELEM_SRQ:
2214 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2215 elem_size = SRQ_CXT_SIZE;
2216 p_blk = &p_cli->pf_blks[SRQ_BLK];
2217 break;
2218 case QED_ELEM_TASK:
2219 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2220 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2221 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2222 break;
2223 default:
2224 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2225 return -EINVAL;
2226 }
2227
2228 /* Calculate line in ilt */
2229 hw_p_size = p_cli->p_size.val;
2230 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2231 start_line = p_blk->start_line + (start_iid / elems_per_p);
2232 end_line = p_blk->start_line + (end_iid / elems_per_p);
2233 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2234 end_line--;
2235
2236 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2237 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2238
2239 p_ptt = qed_ptt_acquire(p_hwfn);
2240 if (!p_ptt) {
2241 DP_NOTICE(p_hwfn,
2242 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2243 return -EBUSY;
2244 }
2245
2246 for (i = shadow_start_line; i < shadow_end_line; i++) {
2247 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2248 continue;
2249
2250 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2251 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2252 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2253 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2254
2255 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2256 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2257 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2258
2259 /* compute absolute offset */
2260 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2261 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2262 ILT_ENTRY_IN_REGS);
2263
2264 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2265 * wide-bus.
2266 */
2267 qed_dmae_host2grc(p_hwfn, p_ptt,
2268 (u64) (uintptr_t) &ilt_hw_entry,
2269 reg_offset,
2270 sizeof(ilt_hw_entry) / sizeof(u32),
2271 0);
2272 }
2273
2274 qed_ptt_release(p_hwfn, p_ptt);
2275
2276 return 0;
2277}
2278
2279int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2280{
2281 int rc;
2282 u32 cid;
2283
2284 /* Free Connection CXT */
2285 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2286 qed_cxt_get_proto_cid_start(p_hwfn,
2287 proto),
2288 qed_cxt_get_proto_cid_count(p_hwfn,
2289 proto, &cid));
2290
2291 if (rc)
2292 return rc;
2293
2294 /* Free Task CXT */
2295 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2296 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2297 if (rc)
2298 return rc;
2299
2300 /* Free TSDM CXT */
2301 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2302 qed_cxt_get_srq_count(p_hwfn));
2303
2304 return rc;
2305}
2306
2307int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2308 u32 tid, u8 ctx_type, void **pp_task_ctx)
2309{
2310 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2311 struct qed_ilt_client_cfg *p_cli;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002312 struct qed_tid_seg *p_seg_info;
Arun Easi1e128c82017-02-15 06:28:22 -08002313 struct qed_ilt_cli_blk *p_seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002314 u32 num_tids_per_block;
Arun Easi1e128c82017-02-15 06:28:22 -08002315 u32 tid_size, ilt_idx;
2316 u32 total_lines;
2317 u32 proto, seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002318
2319 /* Verify the personality */
2320 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002321 case QED_PCI_FCOE:
2322 proto = PROTOCOLID_FCOE;
2323 seg = QED_CXT_FCOE_TID_SEG;
2324 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002325 case QED_PCI_ISCSI:
2326 proto = PROTOCOLID_ISCSI;
2327 seg = QED_CXT_ISCSI_TID_SEG;
2328 break;
2329 default:
2330 return -EINVAL;
2331 }
2332
2333 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2334 if (!p_cli->active)
2335 return -EINVAL;
2336
2337 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2338
2339 if (ctx_type == QED_CTX_WORKING_MEM) {
2340 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2341 } else if (ctx_type == QED_CTX_FL_MEM) {
2342 if (!p_seg_info->has_fl_mem)
2343 return -EINVAL;
2344 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2345 } else {
2346 return -EINVAL;
2347 }
2348 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2349 tid_size = p_mngr->task_type_size[p_seg_info->type];
2350 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2351
2352 if (total_lines < tid / num_tids_per_block)
2353 return -EINVAL;
2354
2355 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2356 p_mngr->pf_start_line;
2357 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2358 (tid % num_tids_per_block) * tid_size;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002359
2360 return 0;
2361}