blob: f7b1a3732b04f31b90329420d010612a3bbf5c0a [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <linux/bitops.h>
35#include <linux/dma-mapping.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/list.h>
39#include <linux/log2.h>
40#include <linux/pci.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/bitops.h>
44#include "qed.h"
45#include "qed_cxt.h"
46#include "qed_dev_api.h"
47#include "qed_hsi.h"
48#include "qed_hw.h"
49#include "qed_init_ops.h"
50#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030051#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020052
53/* Max number of connection types in HW (DQ/CDU etc.) */
54#define MAX_CONN_TYPES PROTOCOLID_COMMON
55#define NUM_TASK_TYPES 2
56#define NUM_TASK_PF_SEGMENTS 4
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030057#define NUM_TASK_VF_SEGMENTS 1
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058
59/* QM constants */
60#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
61
62/* Doorbell-Queue constants */
63#define DQ_RANGE_SHIFT 4
64#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
65
Yuval Mintzdbb799c2016-06-03 14:35:35 +030066/* Searcher constants */
67#define SRC_MIN_NUM_ELEMS 256
68
69/* Timers constants */
70#define TM_SHIFT 7
71#define TM_ALIGN BIT(TM_SHIFT)
72#define TM_ELEM_SIZE 4
73
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020074#define ILT_DEFAULT_HW_P_SIZE 4
Ram Amrani51ff1722016-10-01 21:59:57 +030075
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020076#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78
79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52
84#define ILT_ENTRY_IN_REGS 2
85#define ILT_REG_SIZE_IN_BYTES 4
86
87/* connection context union */
88union conn_context {
89 struct core_conn_context core_ctx;
90 struct eth_conn_context eth_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +030091 struct iscsi_conn_context iscsi_ctx;
Arun Easi1e128c82017-02-15 06:28:22 -080092 struct fcoe_conn_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +030093 struct roce_conn_context roce_ctx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020094};
95
Arun Easi1e128c82017-02-15 06:28:22 -080096/* TYPE-0 task context - iSCSI, FCOE */
Yuval Mintzdbb799c2016-06-03 14:35:35 +030097union type0_task_context {
98 struct iscsi_task_context iscsi_ctx;
Arun Easi1e128c82017-02-15 06:28:22 -080099 struct fcoe_task_context fcoe_ctx;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300100};
101
102/* TYPE-1 task context - ROCE */
103union type1_task_context {
104 struct rdma_task_context roce_ctx;
105};
106
107struct src_ent {
108 u8 opaque[56];
109 u64 next;
110};
111
112#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
113#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
114
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200115#define CONN_CXT_SIZE(p_hwfn) \
116 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
117
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300118#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
119
120#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
121 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
122
123/* Alignment is inherent to the type1_task_context structure */
124#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
125
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126/* PF per protocl configuration object */
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300127#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
128#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
129
130struct qed_tid_seg {
131 u32 count;
132 u8 type;
133 bool has_fl_mem;
134};
135
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200136struct qed_conn_type_cfg {
137 u32 cid_count;
138 u32 cid_start;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300139 u32 cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300140 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200141};
142
143/* ILT Client configuration, Per connection type (protocol) resources. */
144#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300145#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200146#define CDUC_BLK (0)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300147#define SRQ_BLK (0)
148#define CDUT_SEG_BLK(n) (1 + (u8)(n))
149#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200150
151enum ilt_clients {
152 ILT_CLI_CDUC,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300153 ILT_CLI_CDUT,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200154 ILT_CLI_QM,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300155 ILT_CLI_TM,
156 ILT_CLI_SRC,
157 ILT_CLI_TSDM,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200158 ILT_CLI_MAX
159};
160
161struct ilt_cfg_pair {
162 u32 reg;
163 u32 val;
164};
165
166struct qed_ilt_cli_blk {
167 u32 total_size; /* 0 means not active */
168 u32 real_size_in_page;
169 u32 start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300170 u32 dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200171};
172
173struct qed_ilt_client_cfg {
174 bool active;
175
176 /* ILT boundaries */
177 struct ilt_cfg_pair first;
178 struct ilt_cfg_pair last;
179 struct ilt_cfg_pair p_size;
180
181 /* ILT client blocks for PF */
182 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
183 u32 pf_total_lines;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300184
185 /* ILT client blocks for VFs */
186 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
187 u32 vf_total_lines;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200188};
189
190/* Per Path -
191 * ILT shadow table
192 * Protocol acquired CID lists
193 * PF start line in ILT
194 */
195struct qed_dma_mem {
196 dma_addr_t p_phys;
197 void *p_virt;
198 size_t size;
199};
200
201struct qed_cid_acquired_map {
202 u32 start_cid;
203 u32 max_count;
204 unsigned long *cid_map;
205};
206
207struct qed_cxt_mngr {
208 /* Per protocl configuration */
209 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
210
211 /* computed ILT structure */
212 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
213
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300214 /* Task type sizes */
215 u32 task_type_size[NUM_TASK_TYPES];
216
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300217 /* total number of VFs for this hwfn -
218 * ALL VFs are symmetric in terms of HW resources
219 */
220 u32 vf_count;
221
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300222 /* total number of SRQ's for this hwfn */
223 u32 srq_count;
224
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200225 /* Acquired CIDs */
226 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
227
228 /* ILT shadow table */
229 struct qed_dma_mem *ilt_shadow;
230 u32 pf_start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300231
232 /* Mutex for a dynamic ILT allocation */
233 struct mutex mutex;
234
235 /* SRC T2 */
236 struct qed_dma_mem *t2;
237 u32 t2_num_pages;
238 u64 first_free;
239 u64 last_free;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200240};
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300241static bool src_proto(enum protocol_type type)
242{
243 return type == PROTOCOLID_ISCSI ||
Arun Easi1e128c82017-02-15 06:28:22 -0800244 type == PROTOCOLID_FCOE ||
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300245 type == PROTOCOLID_ROCE;
246}
247
248static bool tm_cid_proto(enum protocol_type type)
249{
250 return type == PROTOCOLID_ISCSI ||
Arun Easi1e128c82017-02-15 06:28:22 -0800251 type == PROTOCOLID_FCOE ||
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300252 type == PROTOCOLID_ROCE;
253}
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200254
Arun Easi1e128c82017-02-15 06:28:22 -0800255static bool tm_tid_proto(enum protocol_type type)
256{
257 return type == PROTOCOLID_FCOE;
258}
259
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300260/* counts the iids for the CDU/CDUC ILT client configuration */
261struct qed_cdu_iids {
262 u32 pf_cids;
263 u32 per_vf_cids;
264};
265
266static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
267 struct qed_cdu_iids *iids)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200268{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300269 u32 type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200270
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300271 for (type = 0; type < MAX_CONN_TYPES; type++) {
272 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
273 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
274 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200275}
276
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300277/* counts the iids for the Searcher block configuration */
278struct qed_src_iids {
279 u32 pf_cids;
280 u32 per_vf_cids;
281};
282
283static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
284 struct qed_src_iids *iids)
285{
286 u32 i;
287
288 for (i = 0; i < MAX_CONN_TYPES; i++) {
289 if (!src_proto(i))
290 continue;
291
292 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
293 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
294 }
295}
296
297/* counts the iids for the Timers block configuration */
298struct qed_tm_iids {
299 u32 pf_cids;
300 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
301 u32 pf_tids_total;
302 u32 per_vf_cids;
303 u32 per_vf_tids;
304};
305
Michal Kalderon44531ba2017-04-03 12:21:10 +0300306static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
307 struct qed_cxt_mngr *p_mngr,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300308 struct qed_tm_iids *iids)
309{
Michal Kalderon44531ba2017-04-03 12:21:10 +0300310 bool tm_vf_required = false;
311 bool tm_required = false;
312 int i, j;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300313
Michal Kalderon44531ba2017-04-03 12:21:10 +0300314 /* Timers is a special case -> we don't count how many cids require
315 * timers but what's the max cid that will be used by the timer block.
316 * therefore we traverse in reverse order, and once we hit a protocol
317 * that requires the timers memory, we'll sum all the protocols up
318 * to that one.
319 */
320 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300321 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
322
Michal Kalderon44531ba2017-04-03 12:21:10 +0300323 if (tm_cid_proto(i) || tm_required) {
324 if (p_cfg->cid_count)
325 tm_required = true;
326
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300327 iids->pf_cids += p_cfg->cid_count;
Michal Kalderon44531ba2017-04-03 12:21:10 +0300328 }
329
330 if (tm_cid_proto(i) || tm_vf_required) {
331 if (p_cfg->cids_per_vf)
332 tm_vf_required = true;
333
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300334 iids->per_vf_cids += p_cfg->cids_per_vf;
335 }
Arun Easi1e128c82017-02-15 06:28:22 -0800336
337 if (tm_tid_proto(i)) {
338 struct qed_tid_seg *segs = p_cfg->tid_seg;
339
340 /* for each segment there is at most one
341 * protocol for which count is not 0.
342 */
343 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
344 iids->pf_tids[j] += segs[j].count;
345
346 /* The last array elelment is for the VFs. As for PF
347 * segments there can be only one protocol for
348 * which this value is not 0.
349 */
350 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
351 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300352 }
353
354 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
355 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
356 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
357
358 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
359 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
360 iids->pf_tids_total += iids->pf_tids[j];
361 }
362}
363
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200364static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
365 struct qed_qm_iids *iids)
366{
367 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300368 struct qed_tid_seg *segs;
369 u32 vf_cids = 0, type, j;
370 u32 vf_tids = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200371
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300372 for (type = 0; type < MAX_CONN_TYPES; type++) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200373 iids->cids += p_mngr->conn_cfg[type].cid_count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300374 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300375
376 segs = p_mngr->conn_cfg[type].tid_seg;
377 /* for each segment there is at most one
378 * protocol for which count is not 0.
379 */
380 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
381 iids->tids += segs[j].count;
382
383 /* The last array elelment is for the VFs. As for PF
384 * segments there can be only one protocol for
385 * which this value is not 0.
386 */
387 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300388 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200389
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300390 iids->vf_cids += vf_cids * p_mngr->vf_count;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300391 iids->tids += vf_tids * p_mngr->vf_count;
392
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300393 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300394 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
395 iids->cids, iids->vf_cids, iids->tids, vf_tids);
396}
397
398static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
399 u32 seg)
400{
401 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
402 u32 i;
403
404 /* Find the protocol with tid count > 0 for this segment.
405 * Note: there can only be one and this is already validated.
406 */
407 for (i = 0; i < MAX_CONN_TYPES; i++)
408 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
409 return &p_cfg->conn_cfg[i].tid_seg[seg];
410 return NULL;
411}
412
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300413static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300414{
415 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
416
417 p_mgr->srq_count = num_srqs;
418}
419
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300420static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300421{
422 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
423
424 return p_mgr->srq_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200425}
426
427/* set the iids count per protocol */
428static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
429 enum protocol_type type,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300430 u32 cid_count, u32 vf_cid_cnt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200431{
432 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
433 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
434
435 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300436 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300437
438 if (type == PROTOCOLID_ROCE) {
439 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
440 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
441 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
Ram Amranif3e48112017-03-14 15:25:58 +0200442 u32 align = elems_per_page * DQ_RANGE_ALIGN;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300443
Ram Amranif3e48112017-03-14 15:25:58 +0200444 p_conn->cid_count = roundup(p_conn->cid_count, align);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300445 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300446}
447
Yuval Mintz1a635e42016-08-15 10:42:43 +0300448u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
449 enum protocol_type type, u32 *vf_cid)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300450{
451 if (vf_cid)
452 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
453
454 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200455}
456
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300457u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
458 enum protocol_type type)
459{
460 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
461}
462
463u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
464 enum protocol_type type)
465{
466 u32 cnt = 0;
467 int i;
468
469 for (i = 0; i < TASK_SEGMENTS; i++)
470 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
471
472 return cnt;
473}
474
Yuval Mintz1a635e42016-08-15 10:42:43 +0300475static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
476 enum protocol_type proto,
477 u8 seg,
478 u8 seg_type, u32 count, bool has_fl)
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300479{
480 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
481 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
482
483 p_seg->count = count;
484 p_seg->has_fl_mem = has_fl;
485 p_seg->type = seg_type;
486}
487
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200488static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
489 struct qed_ilt_cli_blk *p_blk,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300490 u32 start_line, u32 total_size, u32 elem_size)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200491{
492 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
493
494 /* verify thatits called only once for each block */
495 if (p_blk->total_size)
496 return;
497
498 p_blk->total_size = total_size;
499 p_blk->real_size_in_page = 0;
500 if (elem_size)
501 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
502 p_blk->start_line = start_line;
503}
504
505static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
506 struct qed_ilt_client_cfg *p_cli,
507 struct qed_ilt_cli_blk *p_blk,
508 u32 *p_line, enum ilt_clients client_id)
509{
510 if (!p_blk->total_size)
511 return;
512
513 if (!p_cli->active)
514 p_cli->first.val = *p_line;
515
516 p_cli->active = true;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300517 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200518 p_cli->last.val = *p_line - 1;
519
520 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
521 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
522 client_id, p_cli->first.val,
523 p_cli->last.val, p_blk->total_size,
524 p_blk->real_size_in_page, p_blk->start_line);
525}
526
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300527static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
528 enum ilt_clients ilt_client)
529{
530 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
531 struct qed_ilt_client_cfg *p_cli;
532 u32 lines_to_skip = 0;
533 u32 cxts_per_p;
534
535 if (ilt_client == ILT_CLI_CDUC) {
536 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
537
538 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
539 (u32) CONN_CXT_SIZE(p_hwfn);
540
541 lines_to_skip = cid_count / cxts_per_p;
542 }
543
544 return lines_to_skip;
545}
546
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200547int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
548{
549 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300550 u32 curr_line, total, i, task_size, line;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200551 struct qed_ilt_client_cfg *p_cli;
552 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300553 struct qed_cdu_iids cdu_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300554 struct qed_src_iids src_iids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200555 struct qed_qm_iids qm_iids;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300556 struct qed_tm_iids tm_iids;
557 struct qed_tid_seg *p_seg;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200558
559 memset(&qm_iids, 0, sizeof(qm_iids));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300560 memset(&cdu_iids, 0, sizeof(cdu_iids));
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300561 memset(&src_iids, 0, sizeof(src_iids));
562 memset(&tm_iids, 0, sizeof(tm_iids));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200563
564 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
565
566 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
567 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
568 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
569
570 /* CDUC */
571 p_cli = &p_mngr->clients[ILT_CLI_CDUC];
572 curr_line = p_mngr->pf_start_line;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300573
574 /* CDUC PF */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200575 p_cli->pf_total_lines = 0;
576
577 /* get the counters for the CDUC and QM clients */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300578 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200579
580 p_blk = &p_cli->pf_blks[CDUC_BLK];
581
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300582 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200583
584 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
585 total, CONN_CXT_SIZE(p_hwfn));
586
587 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
588 p_cli->pf_total_lines = curr_line - p_blk->start_line;
589
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300590 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
591 ILT_CLI_CDUC);
592
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300593 /* CDUC VF */
594 p_blk = &p_cli->vf_blks[CDUC_BLK];
595 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
596
597 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
598 total, CONN_CXT_SIZE(p_hwfn));
599
600 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
601 p_cli->vf_total_lines = curr_line - p_blk->start_line;
602
603 for (i = 1; i < p_mngr->vf_count; i++)
604 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
605 ILT_CLI_CDUC);
606
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300607 /* CDUT PF */
608 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
609 p_cli->first.val = curr_line;
610
611 /* first the 'working' task memory */
612 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
613 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
614 if (!p_seg || p_seg->count == 0)
615 continue;
616
617 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
618 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
619 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
620 p_mngr->task_type_size[p_seg->type]);
621
622 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
623 ILT_CLI_CDUT);
624 }
625
626 /* next the 'init' task memory (forced load memory) */
627 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
628 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
629 if (!p_seg || p_seg->count == 0)
630 continue;
631
632 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
633
634 if (!p_seg->has_fl_mem) {
635 /* The segment is active (total size pf 'working'
636 * memory is > 0) but has no FL (forced-load, Init)
637 * memory. Thus:
638 *
639 * 1. The total-size in the corrsponding FL block of
640 * the ILT client is set to 0 - No ILT line are
641 * provisioned and no ILT memory allocated.
642 *
643 * 2. The start-line of said block is set to the
644 * start line of the matching working memory
645 * block in the ILT client. This is later used to
646 * configure the CDU segment offset registers and
647 * results in an FL command for TIDs of this
648 * segement behaves as regular load commands
649 * (loading TIDs from the working memory).
650 */
651 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
652
653 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
654 continue;
655 }
656 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
657
658 qed_ilt_cli_blk_fill(p_cli, p_blk,
659 curr_line, total,
660 p_mngr->task_type_size[p_seg->type]);
661
662 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
663 ILT_CLI_CDUT);
664 }
665 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
666
667 /* CDUT VF */
668 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
669 if (p_seg && p_seg->count) {
670 /* Stricly speaking we need to iterate over all VF
671 * task segment types, but a VF has only 1 segment
672 */
673
674 /* 'working' memory */
675 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
676
677 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
678 qed_ilt_cli_blk_fill(p_cli, p_blk,
679 curr_line, total,
680 p_mngr->task_type_size[p_seg->type]);
681
682 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
683 ILT_CLI_CDUT);
684
685 /* 'init' memory */
686 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
687 if (!p_seg->has_fl_mem) {
688 /* see comment above */
689 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
690 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
691 } else {
692 task_size = p_mngr->task_type_size[p_seg->type];
693 qed_ilt_cli_blk_fill(p_cli, p_blk,
694 curr_line, total, task_size);
695 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
696 ILT_CLI_CDUT);
697 }
698 p_cli->vf_total_lines = curr_line -
699 p_cli->vf_blks[0].start_line;
700
701 /* Now for the rest of the VFs */
702 for (i = 1; i < p_mngr->vf_count; i++) {
703 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
704 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
705 ILT_CLI_CDUT);
706
707 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
708 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
709 ILT_CLI_CDUT);
710 }
711 }
712
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200713 /* QM */
714 p_cli = &p_mngr->clients[ILT_CLI_QM];
715 p_blk = &p_cli->pf_blks[0];
716
717 qed_cxt_qm_iids(p_hwfn, &qm_iids);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300718 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300719 qm_iids.vf_cids, qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300720 p_hwfn->qm_info.num_pqs,
721 p_hwfn->qm_info.num_vf_pqs);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200722
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300723 DP_VERBOSE(p_hwfn,
724 QED_MSG_ILT,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300725 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300726 qm_iids.cids,
727 qm_iids.vf_cids,
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300728 qm_iids.tids,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300729 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200730
731 qed_ilt_cli_blk_fill(p_cli, p_blk,
732 curr_line, total * 0x1000,
733 QM_PQ_ELEMENT_SIZE);
734
735 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
736 p_cli->pf_total_lines = curr_line - p_blk->start_line;
737
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300738 /* SRC */
739 p_cli = &p_mngr->clients[ILT_CLI_SRC];
740 qed_cxt_src_iids(p_mngr, &src_iids);
741
742 /* Both the PF and VFs searcher connections are stored in the per PF
743 * database. Thus sum the PF searcher cids and all the VFs searcher
744 * cids.
745 */
746 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
747 if (total) {
748 u32 local_max = max_t(u32, total,
749 SRC_MIN_NUM_ELEMS);
750
751 total = roundup_pow_of_two(local_max);
752
753 p_blk = &p_cli->pf_blks[0];
754 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
755 total * sizeof(struct src_ent),
756 sizeof(struct src_ent));
757
758 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
759 ILT_CLI_SRC);
760 p_cli->pf_total_lines = curr_line - p_blk->start_line;
761 }
762
763 /* TM PF */
764 p_cli = &p_mngr->clients[ILT_CLI_TM];
Michal Kalderon44531ba2017-04-03 12:21:10 +0300765 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300766 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
767 if (total) {
768 p_blk = &p_cli->pf_blks[0];
769 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
770 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
771
772 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
773 ILT_CLI_TM);
774 p_cli->pf_total_lines = curr_line - p_blk->start_line;
775 }
776
777 /* TM VF */
778 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
779 if (total) {
780 p_blk = &p_cli->vf_blks[0];
781 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
782 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
783
784 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
785 ILT_CLI_TM);
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300786
Mintz, Yuval70566b42017-04-03 12:21:11 +0300787 p_cli->vf_total_lines = curr_line - p_blk->start_line;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300788 for (i = 1; i < p_mngr->vf_count; i++)
789 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
790 ILT_CLI_TM);
791 }
792
793 /* TSDM (SRQ CONTEXT) */
794 total = qed_cxt_get_srq_count(p_hwfn);
795
796 if (total) {
797 p_cli = &p_mngr->clients[ILT_CLI_TSDM];
798 p_blk = &p_cli->pf_blks[SRQ_BLK];
799 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
800 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
801
802 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
803 ILT_CLI_TSDM);
804 p_cli->pf_total_lines = curr_line - p_blk->start_line;
805 }
806
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200807 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
808 RESC_NUM(p_hwfn, QED_ILT)) {
809 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
810 curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
811 return -EINVAL;
812 }
813
814 return 0;
815}
816
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300817static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
818{
819 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
820 u32 i;
821
822 if (!p_mngr->t2)
823 return;
824
825 for (i = 0; i < p_mngr->t2_num_pages; i++)
826 if (p_mngr->t2[i].p_virt)
827 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
828 p_mngr->t2[i].size,
829 p_mngr->t2[i].p_virt,
830 p_mngr->t2[i].p_phys);
831
832 kfree(p_mngr->t2);
833 p_mngr->t2 = NULL;
834}
835
836static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
837{
838 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
839 u32 conn_num, total_size, ent_per_page, psz, i;
840 struct qed_ilt_client_cfg *p_src;
841 struct qed_src_iids src_iids;
842 struct qed_dma_mem *p_t2;
843 int rc;
844
845 memset(&src_iids, 0, sizeof(src_iids));
846
847 /* if the SRC ILT client is inactive - there are no connection
848 * requiring the searcer, leave.
849 */
850 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
851 if (!p_src->active)
852 return 0;
853
854 qed_cxt_src_iids(p_mngr, &src_iids);
855 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
856 total_size = conn_num * sizeof(struct src_ent);
857
858 /* use the same page size as the SRC ILT client */
859 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
860 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
861
862 /* allocate t2 */
Joe Perches2591c282016-09-04 14:24:03 -0700863 p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300864 GFP_KERNEL);
865 if (!p_mngr->t2) {
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300866 rc = -ENOMEM;
867 goto t2_fail;
868 }
869
870 /* allocate t2 pages */
871 for (i = 0; i < p_mngr->t2_num_pages; i++) {
872 u32 size = min_t(u32, total_size, psz);
873 void **p_virt = &p_mngr->t2[i].p_virt;
874
875 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
876 size,
877 &p_mngr->t2[i].p_phys, GFP_KERNEL);
878 if (!p_mngr->t2[i].p_virt) {
879 rc = -ENOMEM;
880 goto t2_fail;
881 }
882 memset(*p_virt, 0, size);
883 p_mngr->t2[i].size = size;
884 total_size -= size;
885 }
886
887 /* Set the t2 pointers */
888
889 /* entries per page - must be a power of two */
890 ent_per_page = psz / sizeof(struct src_ent);
891
892 p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
893
894 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
895 p_mngr->last_free = (u64) p_t2->p_phys +
896 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
897
898 for (i = 0; i < p_mngr->t2_num_pages; i++) {
899 u32 ent_num = min_t(u32,
900 ent_per_page,
901 conn_num);
902 struct src_ent *entries = p_mngr->t2[i].p_virt;
903 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
904 u32 j;
905
906 for (j = 0; j < ent_num - 1; j++) {
907 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
908 entries[j].next = cpu_to_be64(val);
909 }
910
911 if (i < p_mngr->t2_num_pages - 1)
912 val = (u64) p_mngr->t2[i + 1].p_phys;
913 else
914 val = 0;
915 entries[j].next = cpu_to_be64(val);
916
Dan Carpenter01e517f2016-06-07 15:04:16 +0300917 conn_num -= ent_num;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300918 }
919
920 return 0;
921
922t2_fail:
923 qed_cxt_src_t2_free(p_hwfn);
924 return rc;
925}
926
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200927#define for_each_ilt_valid_client(pos, clients) \
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300928 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
929 if (!clients[pos].active) { \
930 continue; \
931 } else \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200932
933/* Total number of ILT lines used by this PF */
934static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
935{
936 u32 size = 0;
937 u32 i;
938
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300939 for_each_ilt_valid_client(i, ilt_clients)
940 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200941
942 return size;
943}
944
945static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
946{
947 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
948 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
949 u32 ilt_size, i;
950
951 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
952
953 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
954 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
955
956 if (p_dma->p_virt)
957 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
958 p_dma->size, p_dma->p_virt,
959 p_dma->p_phys);
960 p_dma->p_virt = NULL;
961 }
962 kfree(p_mngr->ilt_shadow);
963}
964
965static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
966 struct qed_ilt_cli_blk *p_blk,
967 enum ilt_clients ilt_client,
968 u32 start_line_offset)
969{
970 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300971 u32 lines, line, sz_left, lines_to_skip = 0;
972
973 /* Special handling for RoCE that supports dynamic allocation */
974 if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
975 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
976 return 0;
977
978 lines_to_skip = p_blk->dynamic_line_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200979
980 if (!p_blk->total_size)
981 return 0;
982
983 sz_left = p_blk->total_size;
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300984 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200985 line = p_blk->start_line + start_line_offset -
Yuval Mintzdbb799c2016-06-03 14:35:35 +0300986 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200987
988 for (; lines; lines--) {
989 dma_addr_t p_phys;
990 void *p_virt;
991 u32 size;
992
Yuval Mintz1a635e42016-08-15 10:42:43 +0300993 size = min_t(u32, sz_left, p_blk->real_size_in_page);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200994 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300995 size, &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200996 if (!p_virt)
997 return -ENOMEM;
998 memset(p_virt, 0, size);
999
1000 ilt_shadow[line].p_phys = p_phys;
1001 ilt_shadow[line].p_virt = p_virt;
1002 ilt_shadow[line].size = size;
1003
1004 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1005 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1006 line, (u64)p_phys, p_virt, size);
1007
1008 sz_left -= size;
1009 line++;
1010 }
1011
1012 return 0;
1013}
1014
1015static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1016{
1017 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1018 struct qed_ilt_client_cfg *clients = p_mngr->clients;
1019 struct qed_ilt_cli_blk *p_blk;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001020 u32 size, i, j, k;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001021 int rc;
1022
1023 size = qed_cxt_ilt_shadow_size(clients);
1024 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1025 GFP_KERNEL);
1026 if (!p_mngr->ilt_shadow) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001027 rc = -ENOMEM;
1028 goto ilt_shadow_fail;
1029 }
1030
1031 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1032 "Allocated 0x%x bytes for ilt shadow\n",
1033 (u32)(size * sizeof(struct qed_dma_mem)));
1034
1035 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001036 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1037 p_blk = &clients[i].pf_blks[j];
1038 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001039 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001040 goto ilt_shadow_fail;
1041 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001042 for (k = 0; k < p_mngr->vf_count; k++) {
1043 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1044 u32 lines = clients[i].vf_total_lines * k;
1045
1046 p_blk = &clients[i].vf_blks[j];
1047 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001048 if (rc)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001049 goto ilt_shadow_fail;
1050 }
1051 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001052 }
1053
1054 return 0;
1055
1056ilt_shadow_fail:
1057 qed_ilt_shadow_free(p_hwfn);
1058 return rc;
1059}
1060
1061static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1062{
1063 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1064 u32 type;
1065
1066 for (type = 0; type < MAX_CONN_TYPES; type++) {
1067 kfree(p_mngr->acquired[type].cid_map);
1068 p_mngr->acquired[type].max_count = 0;
1069 p_mngr->acquired[type].start_cid = 0;
1070 }
1071}
1072
1073static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1074{
1075 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1076 u32 start_cid = 0;
1077 u32 type;
1078
1079 for (type = 0; type < MAX_CONN_TYPES; type++) {
1080 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1081 u32 size;
1082
1083 if (cid_cnt == 0)
1084 continue;
1085
1086 size = DIV_ROUND_UP(cid_cnt,
1087 sizeof(unsigned long) * BITS_PER_BYTE) *
1088 sizeof(unsigned long);
1089 p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
1090 if (!p_mngr->acquired[type].cid_map)
1091 goto cid_map_fail;
1092
1093 p_mngr->acquired[type].max_count = cid_cnt;
1094 p_mngr->acquired[type].start_cid = start_cid;
1095
1096 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
1097
1098 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1099 "Type %08x start: %08x count %08x\n",
1100 type, p_mngr->acquired[type].start_cid,
1101 p_mngr->acquired[type].max_count);
1102 start_cid += cid_cnt;
1103 }
1104
1105 return 0;
1106
1107cid_map_fail:
1108 qed_cid_map_free(p_hwfn);
1109 return -ENOMEM;
1110}
1111
1112int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1113{
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001114 struct qed_ilt_client_cfg *clients;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001115 struct qed_cxt_mngr *p_mngr;
1116 u32 i;
1117
Yuval Mintz60fffb32016-02-21 11:40:07 +02001118 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -07001119 if (!p_mngr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001120 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001121
1122 /* Initialize ILT client registers */
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001123 clients = p_mngr->clients;
1124 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1125 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1126 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001127
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001128 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1129 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1130 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001131
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001132 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1133 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1134 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1135
1136 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1137 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1138 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1139
1140 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1141 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1142 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1143
1144 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1145 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1146 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001147 /* default ILT page size for all clients is 64K */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001148 for (i = 0; i < ILT_CLI_MAX; i++)
1149 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1150
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001151 /* Initialize task sizes */
1152 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1153 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1154
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001155 if (p_hwfn->cdev->p_iov_info)
1156 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001157 /* Initialize the dynamic ILT allocation mutex */
1158 mutex_init(&p_mngr->mutex);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001159
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001160 /* Set the cxt mangr pointer priori to further allocations */
1161 p_hwfn->p_cxt_mngr = p_mngr;
1162
1163 return 0;
1164}
1165
1166int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1167{
1168 int rc;
1169
1170 /* Allocate the ILT shadow table */
1171 rc = qed_ilt_shadow_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001172 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001173 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001174
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001175 /* Allocate the T2 table */
1176 rc = qed_cxt_src_t2_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001177 if (rc)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001178 goto tables_alloc_fail;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001179
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001180 /* Allocate and initialize the acquired cids bitmaps */
1181 rc = qed_cid_map_alloc(p_hwfn);
Joe Perches2591c282016-09-04 14:24:03 -07001182 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001183 goto tables_alloc_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001184
1185 return 0;
1186
1187tables_alloc_fail:
1188 qed_cxt_mngr_free(p_hwfn);
1189 return rc;
1190}
1191
1192void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1193{
1194 if (!p_hwfn->p_cxt_mngr)
1195 return;
1196
1197 qed_cid_map_free(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001198 qed_cxt_src_t2_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001199 qed_ilt_shadow_free(p_hwfn);
1200 kfree(p_hwfn->p_cxt_mngr);
1201
1202 p_hwfn->p_cxt_mngr = NULL;
1203}
1204
1205void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1206{
1207 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1208 int type;
1209
1210 /* Reset acquired cids */
1211 for (type = 0; type < MAX_CONN_TYPES; type++) {
1212 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1213
1214 if (cid_cnt == 0)
1215 continue;
1216
1217 memset(p_mngr->acquired[type].cid_map, 0,
1218 DIV_ROUND_UP(cid_cnt,
1219 sizeof(unsigned long) * BITS_PER_BYTE) *
1220 sizeof(unsigned long));
1221 }
1222}
1223
1224/* CDU Common */
1225#define CDUC_CXT_SIZE_SHIFT \
1226 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1227
1228#define CDUC_CXT_SIZE_MASK \
1229 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1230
1231#define CDUC_BLOCK_WASTE_SHIFT \
1232 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1233
1234#define CDUC_BLOCK_WASTE_MASK \
1235 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1236
1237#define CDUC_NCIB_SHIFT \
1238 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1239
1240#define CDUC_NCIB_MASK \
1241 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1242
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001243#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1244 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1245
1246#define CDUT_TYPE0_CXT_SIZE_MASK \
1247 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1248 CDUT_TYPE0_CXT_SIZE_SHIFT)
1249
1250#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1251 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1252
1253#define CDUT_TYPE0_BLOCK_WASTE_MASK \
1254 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1255 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1256
1257#define CDUT_TYPE0_NCIB_SHIFT \
1258 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1259
1260#define CDUT_TYPE0_NCIB_MASK \
1261 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1262 CDUT_TYPE0_NCIB_SHIFT)
1263
1264#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1265 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1266
1267#define CDUT_TYPE1_CXT_SIZE_MASK \
1268 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1269 CDUT_TYPE1_CXT_SIZE_SHIFT)
1270
1271#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1272 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1273
1274#define CDUT_TYPE1_BLOCK_WASTE_MASK \
1275 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1276 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1277
1278#define CDUT_TYPE1_NCIB_SHIFT \
1279 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1280
1281#define CDUT_TYPE1_NCIB_MASK \
1282 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1283 CDUT_TYPE1_NCIB_SHIFT)
1284
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001285static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1286{
1287 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1288
1289 /* CDUC - connection configuration */
1290 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1291 cxt_size = CONN_CXT_SIZE(p_hwfn);
1292 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1293 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1294
1295 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1296 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1297 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1298 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001299
1300 /* CDUT - type-0 tasks configuration */
1301 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1302 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1303 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1304 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1305
1306 /* cxt size and block-waste are multipes of 8 */
1307 cdu_params = 0;
1308 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1309 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1310 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1311 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1312
1313 /* CDUT - type-1 tasks configuration */
1314 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1315 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1316 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1317
1318 /* cxt size and block-waste are multipes of 8 */
1319 cdu_params = 0;
1320 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1321 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1322 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1323 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1324}
1325
1326/* CDU PF */
1327#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1328#define CDU_SEG_REG_TYPE_MASK 0x1
1329#define CDU_SEG_REG_OFFSET_SHIFT 0
1330#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1331
1332static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1333{
1334 struct qed_ilt_client_cfg *p_cli;
1335 struct qed_tid_seg *p_seg;
1336 u32 cdu_seg_params, offset;
1337 int i;
1338
1339 static const u32 rt_type_offset_arr[] = {
1340 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1341 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1342 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1343 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1344 };
1345
1346 static const u32 rt_type_offset_fl_arr[] = {
1347 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1348 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1349 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1350 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1351 };
1352
1353 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1354
1355 /* There are initializations only for CDUT during pf Phase */
1356 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1357 /* Segment 0 */
1358 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1359 if (!p_seg)
1360 continue;
1361
1362 /* Note: start_line is already adjusted for the CDU
1363 * segment register granularity, so we just need to
1364 * divide. Adjustment is implicit as we assume ILT
1365 * Page size is larger than 32K!
1366 */
1367 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1368 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1369 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1370
1371 cdu_seg_params = 0;
1372 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1373 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1374 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1375
1376 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1377 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1378 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1379
1380 cdu_seg_params = 0;
1381 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1382 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1383 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1384 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001385}
1386
1387void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
1388{
1389 struct qed_qm_pf_rt_init_params params;
1390 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1391 struct qed_qm_iids iids;
1392
1393 memset(&iids, 0, sizeof(iids));
1394 qed_cxt_qm_iids(p_hwfn, &iids);
1395
1396 memset(&params, 0, sizeof(params));
1397 params.port_id = p_hwfn->port_id;
1398 params.pf_id = p_hwfn->rel_pf_id;
1399 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1400 params.is_first_pf = p_hwfn->first_on_engine;
1401 params.num_pf_cids = iids.cids;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001402 params.num_vf_cids = iids.vf_cids;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001403 params.start_pq = qm_info->start_pq;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001404 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1405 params.num_vf_pqs = qm_info->num_vf_pqs;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001406 params.start_vport = qm_info->start_vport;
1407 params.num_vports = qm_info->num_vports;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001408 params.pf_wfq = qm_info->pf_wfq;
1409 params.pf_rl = qm_info->pf_rl;
1410 params.pq_params = qm_info->qm_pq_params;
1411 params.vport_params = qm_info->qm_vport_params;
1412
1413 qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
1414}
1415
1416/* CM PF */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001417void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001418{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001419 /* XCM pure-LB queue */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001420 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1421 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001422}
1423
1424/* DQ PF */
1425static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1426{
1427 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001428 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001429
1430 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1431 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1432
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001433 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1434 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1435
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001436 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1437 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1438
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001439 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1440 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1441
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001442 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1443 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1444
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001445 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1446 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1447
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001448 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1449 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1450
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001451 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1452 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1453
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001454 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1455 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1456
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001457 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1458 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1459
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001460 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1461 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001462
1463 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1464 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1465
1466 /* Connection types 6 & 7 are not in use, yet they must be configured
1467 * as the highest possible connection. Not configuring them means the
1468 * defaults will be used, and with a large number of cids a bug may
1469 * occur, if the defaults will be smaller than dq_pf_max_cid /
1470 * dq_vf_max_cid.
1471 */
1472 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1473 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1474
1475 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1476 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001477}
1478
1479static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1480{
1481 struct qed_ilt_client_cfg *ilt_clients;
1482 int i;
1483
1484 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1485 for_each_ilt_valid_client(i, ilt_clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001486 STORE_RT_REG(p_hwfn,
1487 ilt_clients[i].first.reg,
1488 ilt_clients[i].first.val);
1489 STORE_RT_REG(p_hwfn,
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001490 ilt_clients[i].last.reg, ilt_clients[i].last.val);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001491 STORE_RT_REG(p_hwfn,
1492 ilt_clients[i].p_size.reg,
1493 ilt_clients[i].p_size.val);
1494 }
1495}
1496
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001497static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1498{
1499 struct qed_ilt_client_cfg *p_cli;
1500 u32 blk_factor;
1501
1502 /* For simplicty we set the 'block' to be an ILT page */
1503 if (p_hwfn->cdev->p_iov_info) {
1504 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1505
1506 STORE_RT_REG(p_hwfn,
1507 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1508 p_iov->first_vf_in_pf);
1509 STORE_RT_REG(p_hwfn,
1510 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1511 p_iov->first_vf_in_pf + p_iov->total_vfs);
1512 }
1513
1514 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1515 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1516 if (p_cli->active) {
1517 STORE_RT_REG(p_hwfn,
1518 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1519 blk_factor);
1520 STORE_RT_REG(p_hwfn,
1521 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1522 p_cli->pf_total_lines);
1523 STORE_RT_REG(p_hwfn,
1524 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1525 p_cli->vf_total_lines);
1526 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001527
1528 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1529 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1530 if (p_cli->active) {
1531 STORE_RT_REG(p_hwfn,
1532 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1533 blk_factor);
1534 STORE_RT_REG(p_hwfn,
1535 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1536 p_cli->pf_total_lines);
1537 STORE_RT_REG(p_hwfn,
1538 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1539 p_cli->vf_total_lines);
1540 }
1541
1542 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1543 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1544 if (p_cli->active) {
1545 STORE_RT_REG(p_hwfn,
1546 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1547 STORE_RT_REG(p_hwfn,
1548 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1549 p_cli->pf_total_lines);
1550 STORE_RT_REG(p_hwfn,
1551 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1552 p_cli->vf_total_lines);
1553 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001554}
1555
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001556/* ILT (PSWRQ2) PF */
1557static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1558{
1559 struct qed_ilt_client_cfg *clients;
1560 struct qed_cxt_mngr *p_mngr;
1561 struct qed_dma_mem *p_shdw;
1562 u32 line, rt_offst, i;
1563
1564 qed_ilt_bounds_init(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001565 qed_ilt_vf_bounds_init(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001566
1567 p_mngr = p_hwfn->p_cxt_mngr;
1568 p_shdw = p_mngr->ilt_shadow;
1569 clients = p_hwfn->p_cxt_mngr->clients;
1570
1571 for_each_ilt_valid_client(i, clients) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001572 /** Client's 1st val and RT array are absolute, ILT shadows'
1573 * lines are relative.
1574 */
1575 line = clients[i].first.val - p_mngr->pf_start_line;
1576 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1577 clients[i].first.val * ILT_ENTRY_IN_REGS;
1578
1579 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1580 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1581 u64 ilt_hw_entry = 0;
1582
1583 /** p_virt could be NULL incase of dynamic
1584 * allocation
1585 */
1586 if (p_shdw[line].p_virt) {
1587 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1588 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1589 (p_shdw[line].p_phys >> 12));
1590
1591 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1592 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1593 rt_offst, line, i,
1594 (u64)(p_shdw[line].p_phys >> 12));
1595 }
1596
1597 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1598 }
1599 }
1600}
1601
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001602/* SRC (Searcher) PF */
1603static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1604{
1605 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1606 u32 rounded_conn_num, conn_num, conn_max;
1607 struct qed_src_iids src_iids;
1608
1609 memset(&src_iids, 0, sizeof(src_iids));
1610 qed_cxt_src_iids(p_mngr, &src_iids);
1611 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1612 if (!conn_num)
1613 return;
1614
1615 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1616 rounded_conn_num = roundup_pow_of_two(conn_max);
1617
1618 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1619 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1620 ilog2(rounded_conn_num));
1621
1622 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1623 p_hwfn->p_cxt_mngr->first_free);
1624 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1625 p_hwfn->p_cxt_mngr->last_free);
1626}
1627
1628/* Timers PF */
1629#define TM_CFG_NUM_IDS_SHIFT 0
1630#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1631#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1632#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1633#define TM_CFG_PARENT_PF_SHIFT 25
1634#define TM_CFG_PARENT_PF_MASK 0x7ULL
1635
1636#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1637#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1638
1639#define TM_CFG_TID_OFFSET_SHIFT 30
1640#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1641#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1642#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1643
1644static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1645{
1646 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1647 u32 active_seg_mask = 0, tm_offset, rt_reg;
1648 struct qed_tm_iids tm_iids;
1649 u64 cfg_word;
1650 u8 i;
1651
1652 memset(&tm_iids, 0, sizeof(tm_iids));
Michal Kalderon44531ba2017-04-03 12:21:10 +03001653 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001654
1655 /* @@@TBD No pre-scan for now */
1656
1657 /* Note: We assume consecutive VFs for a PF */
1658 for (i = 0; i < p_mngr->vf_count; i++) {
1659 cfg_word = 0;
1660 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1661 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1662 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1663 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1664 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1665 (sizeof(cfg_word) / sizeof(u32)) *
1666 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1667 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1668 }
1669
1670 cfg_word = 0;
1671 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1672 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1673 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1674 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1675
1676 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1677 (sizeof(cfg_word) / sizeof(u32)) *
1678 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1679 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1680
1681 /* enale scan */
1682 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1683 tm_iids.pf_cids ? 0x1 : 0x0);
1684
1685 /* @@@TBD how to enable the scan for the VFs */
1686
1687 tm_offset = tm_iids.per_vf_cids;
1688
1689 /* Note: We assume consecutive VFs for a PF */
1690 for (i = 0; i < p_mngr->vf_count; i++) {
1691 cfg_word = 0;
1692 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1693 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1694 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1695 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1696 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1697
1698 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1699 (sizeof(cfg_word) / sizeof(u32)) *
1700 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1701
1702 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1703 }
1704
1705 tm_offset = tm_iids.pf_cids;
1706 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1707 cfg_word = 0;
1708 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1709 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1710 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1711 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1712 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1713
1714 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1715 (sizeof(cfg_word) / sizeof(u32)) *
1716 (NUM_OF_VFS(p_hwfn->cdev) +
1717 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1718
1719 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001720 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001721
1722 tm_offset += tm_iids.pf_tids[i];
1723 }
1724
1725 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
1726 active_seg_mask = 0;
1727
1728 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1729
1730 /* @@@TBD how to enable the scan for the VFs */
1731}
1732
Arun Easi1e128c82017-02-15 06:28:22 -08001733static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1734{
1735 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1736 p_hwfn->pf_params.fcoe_pf_params.is_target)
1737 STORE_RT_REG(p_hwfn,
1738 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1739}
1740
1741static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1742{
1743 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1744 struct qed_conn_type_cfg *p_fcoe;
1745 struct qed_tid_seg *p_tid;
1746
1747 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1748
1749 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1750 if (!p_fcoe->cid_count)
1751 return;
1752
1753 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1754 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1755 STORE_RT_REG_AGG(p_hwfn,
1756 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1757 p_tid->count);
1758 } else {
1759 STORE_RT_REG_AGG(p_hwfn,
1760 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1761 p_tid->count);
1762 }
1763}
1764
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001765void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1766{
1767 qed_cdu_init_common(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001768 qed_prs_init_common(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001769}
1770
1771void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
1772{
1773 qed_qm_init_pf(p_hwfn);
1774 qed_cm_init_pf(p_hwfn);
1775 qed_dq_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001776 qed_cdu_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001777 qed_ilt_init_pf(p_hwfn);
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001778 qed_src_init_pf(p_hwfn);
1779 qed_tm_init_pf(p_hwfn);
Arun Easi1e128c82017-02-15 06:28:22 -08001780 qed_prs_init_pf(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001781}
1782
1783int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001784 enum protocol_type type, u32 *p_cid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001785{
1786 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1787 u32 rel_cid;
1788
1789 if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
1790 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1791 return -EINVAL;
1792 }
1793
1794 rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
1795 p_mngr->acquired[type].max_count);
1796
1797 if (rel_cid >= p_mngr->acquired[type].max_count) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001798 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001799 return -EINVAL;
1800 }
1801
1802 __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
1803
1804 *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
1805
1806 return 0;
1807}
1808
1809static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001810 u32 cid, enum protocol_type *p_type)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001811{
1812 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1813 struct qed_cid_acquired_map *p_map;
1814 enum protocol_type p;
1815 u32 rel_cid;
1816
1817 /* Iterate over protocols and find matching cid range */
1818 for (p = 0; p < MAX_CONN_TYPES; p++) {
1819 p_map = &p_mngr->acquired[p];
1820
1821 if (!p_map->cid_map)
1822 continue;
1823 if (cid >= p_map->start_cid &&
1824 cid < p_map->start_cid + p_map->max_count)
1825 break;
1826 }
1827 *p_type = p;
1828
1829 if (p == MAX_CONN_TYPES) {
1830 DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
1831 return false;
1832 }
1833
1834 rel_cid = cid - p_map->start_cid;
1835 if (!test_bit(rel_cid, p_map->cid_map)) {
1836 DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
1837 return false;
1838 }
1839 return true;
1840}
1841
Yuval Mintz1a635e42016-08-15 10:42:43 +03001842void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001843{
1844 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1845 enum protocol_type type;
1846 bool b_acquired;
1847 u32 rel_cid;
1848
1849 /* Test acquired and find matching per-protocol map */
1850 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
1851
1852 if (!b_acquired)
1853 return;
1854
1855 rel_cid = cid - p_mngr->acquired[type].start_cid;
1856 __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
1857}
1858
Yuval Mintz1a635e42016-08-15 10:42:43 +03001859int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001860{
1861 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1862 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1863 enum protocol_type type;
1864 bool b_acquired;
1865
1866 /* Test acquired and find matching per-protocol map */
1867 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
1868
1869 if (!b_acquired)
1870 return -EINVAL;
1871
1872 /* set the protocl type */
1873 p_info->type = type;
1874
1875 /* compute context virtual pointer */
1876 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1877
1878 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1879 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1880 line = p_info->iid / cxts_per_p;
1881
1882 /* Make sure context is allocated (dynamic allocation) */
1883 if (!p_mngr->ilt_shadow[line].p_virt)
1884 return -EINVAL;
1885
1886 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
1887 p_info->iid % cxts_per_p * conn_cxt_size;
1888
1889 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1890 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1891 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1892
1893 return 0;
1894}
1895
Yuval Mintz8c93bea2016-10-13 22:57:03 +03001896static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1897 struct qed_rdma_pf_params *p_params)
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001898{
1899 u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
1900 enum protocol_type proto;
1901
1902 num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
1903 num_tasks = num_mrs; /* each mr uses a single task id */
1904 num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
1905
1906 switch (p_hwfn->hw_info.personality) {
1907 case QED_PCI_ETH_ROCE:
1908 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1909 num_cons = num_qps * 2; /* each QP requires two connections */
1910 proto = PROTOCOLID_ROCE;
1911 break;
1912 default:
1913 return;
1914 }
1915
1916 if (num_cons && num_tasks) {
1917 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
1918
1919 /* Deliberatly passing ROCE for tasks id. This is because
1920 * iWARP / RoCE share the task id.
1921 */
1922 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
1923 QED_CXT_ROCE_TID_SEG, 1,
1924 num_tasks, false);
1925 qed_cxt_set_srq_count(p_hwfn, num_srqs);
1926 } else {
1927 DP_INFO(p_hwfn->cdev,
1928 "RDMA personality used without setting params!\n");
1929 }
1930}
1931
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001932int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
1933{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001934 /* Set the number of required CORE connections */
1935 u32 core_cids = 1; /* SPQ */
1936
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001937 if (p_hwfn->using_ll2)
1938 core_cids += 4;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001939 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001940
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001941 switch (p_hwfn->hw_info.personality) {
1942 case QED_PCI_ETH_ROCE:
1943 {
1944 qed_rdma_set_pf_params(p_hwfn,
1945 &p_hwfn->
1946 pf_params.rdma_pf_params);
1947 /* no need for break since RoCE coexist with Ethernet */
1948 }
1949 case QED_PCI_ETH:
1950 {
1951 struct qed_eth_pf_params *p_params =
1952 &p_hwfn->pf_params.eth_pf_params;
1953
1954 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1955 p_params->num_cons, 1);
1956 break;
1957 }
Arun Easi1e128c82017-02-15 06:28:22 -08001958 case QED_PCI_FCOE:
1959 {
1960 struct qed_fcoe_pf_params *p_params;
1961
1962 p_params = &p_hwfn->pf_params.fcoe_pf_params;
1963
1964 if (p_params->num_cons && p_params->num_tasks) {
1965 qed_cxt_set_proto_cid_count(p_hwfn,
1966 PROTOCOLID_FCOE,
1967 p_params->num_cons,
1968 0);
1969
1970 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
1971 QED_CXT_FCOE_TID_SEG, 0,
1972 p_params->num_tasks, true);
1973 } else {
1974 DP_INFO(p_hwfn->cdev,
1975 "Fcoe personality used without setting params!\n");
1976 }
1977 break;
1978 }
Yuval Mintzdbb799c2016-06-03 14:35:35 +03001979 case QED_PCI_ISCSI:
1980 {
1981 struct qed_iscsi_pf_params *p_params;
1982
1983 p_params = &p_hwfn->pf_params.iscsi_pf_params;
1984
1985 if (p_params->num_cons && p_params->num_tasks) {
1986 qed_cxt_set_proto_cid_count(p_hwfn,
1987 PROTOCOLID_ISCSI,
1988 p_params->num_cons,
1989 0);
1990
1991 qed_cxt_set_proto_tid_count(p_hwfn,
1992 PROTOCOLID_ISCSI,
1993 QED_CXT_ISCSI_TID_SEG,
1994 0,
1995 p_params->num_tasks,
1996 true);
1997 } else {
1998 DP_INFO(p_hwfn->cdev,
1999 "Iscsi personality used without setting params!\n");
2000 }
2001 break;
2002 }
2003 default:
2004 return -EINVAL;
2005 }
2006
2007 return 0;
2008}
2009
2010int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2011 struct qed_tid_mem *p_info)
2012{
2013 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2014 u32 proto, seg, total_lines, i, shadow_line;
2015 struct qed_ilt_client_cfg *p_cli;
2016 struct qed_ilt_cli_blk *p_fl_seg;
2017 struct qed_tid_seg *p_seg_info;
2018
2019 /* Verify the personality */
2020 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002021 case QED_PCI_FCOE:
2022 proto = PROTOCOLID_FCOE;
2023 seg = QED_CXT_FCOE_TID_SEG;
2024 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002025 case QED_PCI_ISCSI:
2026 proto = PROTOCOLID_ISCSI;
2027 seg = QED_CXT_ISCSI_TID_SEG;
2028 break;
2029 default:
2030 return -EINVAL;
2031 }
2032
2033 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2034 if (!p_cli->active)
2035 return -EINVAL;
2036
2037 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2038 if (!p_seg_info->has_fl_mem)
2039 return -EINVAL;
2040
2041 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2042 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2043 p_fl_seg->real_size_in_page);
2044
2045 for (i = 0; i < total_lines; i++) {
2046 shadow_line = i + p_fl_seg->start_line -
2047 p_hwfn->p_cxt_mngr->pf_start_line;
2048 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2049 }
2050 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2051 p_fl_seg->real_size_in_page;
2052 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2053 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2054 p_info->tid_size;
2055
2056 return 0;
2057}
2058
2059/* This function is very RoCE oriented, if another protocol in the future
2060 * will want this feature we'll need to modify the function to be more generic
2061 */
2062int
2063qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2064 enum qed_cxt_elem_type elem_type, u32 iid)
2065{
2066 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2067 struct qed_ilt_client_cfg *p_cli;
2068 struct qed_ilt_cli_blk *p_blk;
2069 struct qed_ptt *p_ptt;
2070 dma_addr_t p_phys;
2071 u64 ilt_hw_entry;
2072 void *p_virt;
2073 int rc = 0;
2074
2075 switch (elem_type) {
2076 case QED_ELEM_CXT:
2077 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2078 elem_size = CONN_CXT_SIZE(p_hwfn);
2079 p_blk = &p_cli->pf_blks[CDUC_BLK];
2080 break;
2081 case QED_ELEM_SRQ:
2082 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2083 elem_size = SRQ_CXT_SIZE;
2084 p_blk = &p_cli->pf_blks[SRQ_BLK];
2085 break;
2086 case QED_ELEM_TASK:
2087 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2088 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2089 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2090 break;
2091 default:
2092 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2093 return -EINVAL;
2094 }
2095
2096 /* Calculate line in ilt */
2097 hw_p_size = p_cli->p_size.val;
2098 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2099 line = p_blk->start_line + (iid / elems_per_p);
2100 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2101
2102 /* If line is already allocated, do nothing, otherwise allocate it and
2103 * write it to the PSWRQ2 registers.
2104 * This section can be run in parallel from different contexts and thus
2105 * a mutex protection is needed.
2106 */
2107
2108 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2109
2110 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2111 goto out0;
2112
2113 p_ptt = qed_ptt_acquire(p_hwfn);
2114 if (!p_ptt) {
2115 DP_NOTICE(p_hwfn,
2116 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2117 rc = -EBUSY;
2118 goto out0;
2119 }
2120
2121 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2122 p_blk->real_size_in_page,
2123 &p_phys, GFP_KERNEL);
2124 if (!p_virt) {
2125 rc = -ENOMEM;
2126 goto out1;
2127 }
2128 memset(p_virt, 0, p_blk->real_size_in_page);
2129
2130 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2131 * to compensate for a HW bug, but it is configured even if DIF is not
2132 * enabled. This is harmless and allows us to avoid a dedicated API. We
2133 * configure the field for all of the contexts on the newly allocated
2134 * page.
2135 */
2136 if (elem_type == QED_ELEM_TASK) {
2137 u32 elem_i;
2138 u8 *elem_start = (u8 *)p_virt;
2139 union type1_task_context *elem;
2140
2141 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2142 elem = (union type1_task_context *)elem_start;
2143 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2144 TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
2145 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2146 }
2147 }
2148
2149 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2150 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2151 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2152 p_blk->real_size_in_page;
2153
2154 /* compute absolute offset */
2155 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2156 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2157
2158 ilt_hw_entry = 0;
2159 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2160 SET_FIELD(ilt_hw_entry,
2161 ILT_ENTRY_PHY_ADDR,
2162 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2163
2164 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2165 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2166 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2167
2168 if (elem_type == QED_ELEM_CXT) {
2169 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2170 elems_per_p;
2171
2172 /* Update the relevant register in the parser */
2173 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2174 last_cid_allocated - 1);
2175
2176 if (!p_hwfn->b_rdma_enabled_in_prs) {
2177 /* Enable RoCE search */
2178 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2179 p_hwfn->b_rdma_enabled_in_prs = true;
2180 }
2181 }
2182
2183out1:
2184 qed_ptt_release(p_hwfn, p_ptt);
2185out0:
2186 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2187
2188 return rc;
2189}
2190
2191/* This function is very RoCE oriented, if another protocol in the future
2192 * will want this feature we'll need to modify the function to be more generic
2193 */
2194static int
2195qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2196 enum qed_cxt_elem_type elem_type,
2197 u32 start_iid, u32 count)
2198{
2199 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2200 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2201 struct qed_ilt_client_cfg *p_cli;
2202 struct qed_ilt_cli_blk *p_blk;
2203 u32 end_iid = start_iid + count;
2204 struct qed_ptt *p_ptt;
2205 u64 ilt_hw_entry = 0;
2206 u32 i;
2207
2208 switch (elem_type) {
2209 case QED_ELEM_CXT:
2210 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2211 elem_size = CONN_CXT_SIZE(p_hwfn);
2212 p_blk = &p_cli->pf_blks[CDUC_BLK];
2213 break;
2214 case QED_ELEM_SRQ:
2215 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2216 elem_size = SRQ_CXT_SIZE;
2217 p_blk = &p_cli->pf_blks[SRQ_BLK];
2218 break;
2219 case QED_ELEM_TASK:
2220 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2221 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2222 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2223 break;
2224 default:
2225 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2226 return -EINVAL;
2227 }
2228
2229 /* Calculate line in ilt */
2230 hw_p_size = p_cli->p_size.val;
2231 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2232 start_line = p_blk->start_line + (start_iid / elems_per_p);
2233 end_line = p_blk->start_line + (end_iid / elems_per_p);
2234 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2235 end_line--;
2236
2237 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2238 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2239
2240 p_ptt = qed_ptt_acquire(p_hwfn);
2241 if (!p_ptt) {
2242 DP_NOTICE(p_hwfn,
2243 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2244 return -EBUSY;
2245 }
2246
2247 for (i = shadow_start_line; i < shadow_end_line; i++) {
2248 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2249 continue;
2250
2251 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2252 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2253 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2254 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2255
2256 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2257 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2258 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2259
2260 /* compute absolute offset */
2261 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2262 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2263 ILT_ENTRY_IN_REGS);
2264
2265 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2266 * wide-bus.
2267 */
2268 qed_dmae_host2grc(p_hwfn, p_ptt,
2269 (u64) (uintptr_t) &ilt_hw_entry,
2270 reg_offset,
2271 sizeof(ilt_hw_entry) / sizeof(u32),
2272 0);
2273 }
2274
2275 qed_ptt_release(p_hwfn, p_ptt);
2276
2277 return 0;
2278}
2279
2280int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2281{
2282 int rc;
2283 u32 cid;
2284
2285 /* Free Connection CXT */
2286 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2287 qed_cxt_get_proto_cid_start(p_hwfn,
2288 proto),
2289 qed_cxt_get_proto_cid_count(p_hwfn,
2290 proto, &cid));
2291
2292 if (rc)
2293 return rc;
2294
2295 /* Free Task CXT */
2296 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2297 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2298 if (rc)
2299 return rc;
2300
2301 /* Free TSDM CXT */
2302 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2303 qed_cxt_get_srq_count(p_hwfn));
2304
2305 return rc;
2306}
2307
2308int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2309 u32 tid, u8 ctx_type, void **pp_task_ctx)
2310{
2311 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2312 struct qed_ilt_client_cfg *p_cli;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002313 struct qed_tid_seg *p_seg_info;
Arun Easi1e128c82017-02-15 06:28:22 -08002314 struct qed_ilt_cli_blk *p_seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002315 u32 num_tids_per_block;
Arun Easi1e128c82017-02-15 06:28:22 -08002316 u32 tid_size, ilt_idx;
2317 u32 total_lines;
2318 u32 proto, seg;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002319
2320 /* Verify the personality */
2321 switch (p_hwfn->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002322 case QED_PCI_FCOE:
2323 proto = PROTOCOLID_FCOE;
2324 seg = QED_CXT_FCOE_TID_SEG;
2325 break;
Yuval Mintzdbb799c2016-06-03 14:35:35 +03002326 case QED_PCI_ISCSI:
2327 proto = PROTOCOLID_ISCSI;
2328 seg = QED_CXT_ISCSI_TID_SEG;
2329 break;
2330 default:
2331 return -EINVAL;
2332 }
2333
2334 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2335 if (!p_cli->active)
2336 return -EINVAL;
2337
2338 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2339
2340 if (ctx_type == QED_CTX_WORKING_MEM) {
2341 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2342 } else if (ctx_type == QED_CTX_FL_MEM) {
2343 if (!p_seg_info->has_fl_mem)
2344 return -EINVAL;
2345 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2346 } else {
2347 return -EINVAL;
2348 }
2349 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2350 tid_size = p_mngr->task_type_size[p_seg_info->type];
2351 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2352
2353 if (total_lines < tid / num_tids_per_block)
2354 return -EINVAL;
2355
2356 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2357 p_mngr->pf_start_line;
2358 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2359 (tid % num_tids_per_block) * tid_size;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002360
2361 return 0;
2362}