blob: 5cd7a4608c9b7699ee649aeafda7f5953434c24d [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#ifndef _QED_CHAIN_H
34#define _QED_CHAIN_H
35
36#include <linux/types.h>
37#include <asm/byteorder.h>
38#include <linux/kernel.h>
39#include <linux/list.h>
40#include <linux/slab.h>
41#include <linux/qed/common_hsi.h>
42
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020043enum qed_chain_mode {
44 /* Each Page contains a next pointer at its end */
45 QED_CHAIN_MODE_NEXT_PTR,
46
47 /* Chain is a single page (next ptr) is unrequired */
48 QED_CHAIN_MODE_SINGLE,
49
50 /* Page pointers are located in a side list */
51 QED_CHAIN_MODE_PBL,
52};
53
54enum qed_chain_use_mode {
55 QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
56 QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
58};
59
Yuval Mintza91eb522016-06-03 14:35:32 +030060enum qed_chain_cnt_type {
61 /* The chain's size/prod/cons are kept in 16-bit variables */
62 QED_CHAIN_CNT_TYPE_U16,
63
64 /* The chain's size/prod/cons are kept in 32-bit variables */
65 QED_CHAIN_CNT_TYPE_U32,
66};
67
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020068struct qed_chain_next {
69 struct regpair next_phys;
70 void *next_virt;
71};
72
Yuval Mintza91eb522016-06-03 14:35:32 +030073struct qed_chain_pbl_u16 {
74 u16 prod_page_idx;
75 u16 cons_page_idx;
76};
77
78struct qed_chain_pbl_u32 {
79 u32 prod_page_idx;
80 u32 cons_page_idx;
81};
82
Yuval Mintza91eb522016-06-03 14:35:32 +030083struct qed_chain_u16 {
84 /* Cyclic index of next element to produce/consme */
85 u16 prod_idx;
86 u16 cons_idx;
87};
88
89struct qed_chain_u32 {
90 /* Cyclic index of next element to produce/consme */
91 u32 prod_idx;
92 u32 cons_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020093};
94
95struct qed_chain {
Mintz, Yuval6d937ac2016-11-29 16:47:01 +020096 /* fastpath portion of the chain - required for commands such
97 * as produce / consume.
98 */
99 /* Point to next element to produce/consume */
100 void *p_prod_elem;
101 void *p_cons_elem;
Yuval Mintza91eb522016-06-03 14:35:32 +0300102
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200103 /* Fastpath portions of the PBL [if exists] */
104 struct {
105 /* Table for keeping the virtual addresses of the chain pages,
106 * respectively to the physical addresses in the pbl table.
107 */
108 void **pp_virt_addr_tbl;
109
110 union {
111 struct qed_chain_pbl_u16 u16;
112 struct qed_chain_pbl_u32 u32;
113 } c;
114 } pbl;
Yuval Mintza91eb522016-06-03 14:35:32 +0300115
116 union {
117 struct qed_chain_u16 chain16;
118 struct qed_chain_u32 chain32;
119 } u;
120
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200121 /* Capacity counts only usable elements */
122 u32 capacity;
Yuval Mintza91eb522016-06-03 14:35:32 +0300123 u32 page_cnt;
124
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200125 enum qed_chain_mode mode;
Yuval Mintza91eb522016-06-03 14:35:32 +0300126
127 /* Elements information for fast calculations */
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200128 u16 elem_per_page;
129 u16 elem_per_page_mask;
130 u16 elem_size;
131 u16 next_page_mask;
132 u16 usable_per_page;
133 u8 elem_unusable;
134
135 u8 cnt_type;
136
137 /* Slowpath of the chain - required for initialization and destruction,
138 * but isn't involved in regular functionality.
139 */
140
141 /* Base address of a pre-allocated buffer for pbl */
142 struct {
143 dma_addr_t p_phys_table;
144 void *p_virt_table;
145 } pbl_sp;
146
147 /* Address of first page of the chain - the address is required
148 * for fastpath operation [consume/produce] but only for the the SINGLE
149 * flavour which isn't considered fastpath [== SPQ].
150 */
151 void *p_virt_addr;
152 dma_addr_t p_phys_addr;
153
154 /* Total number of elements [for entire chain] */
155 u32 size;
156
157 u8 intended_use;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200158};
159
160#define QED_CHAIN_PBL_ENTRY_SIZE (8)
161#define QED_CHAIN_PAGE_SIZE (0x1000)
162#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
163
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200164#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
165 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
166 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
167 (elem_size))) : 0)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200168
169#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
170 ((u32)(ELEMS_PER_PAGE(elem_size) - \
171 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
172
173#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
174 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
175
Yuval Mintza91eb522016-06-03 14:35:32 +0300176#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
177#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
178
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200179/* Accessors */
180static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
181{
Yuval Mintza91eb522016-06-03 14:35:32 +0300182 return p_chain->u.chain16.prod_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200183}
184
185static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
186{
Yuval Mintza91eb522016-06-03 14:35:32 +0300187 return p_chain->u.chain16.cons_idx;
188}
189
190static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
191{
192 return p_chain->u.chain32.cons_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200193}
194
195static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
196{
197 u16 used;
198
Yuval Mintza91eb522016-06-03 14:35:32 +0300199 used = (u16) (((u32)0x10000 +
200 (u32)p_chain->u.chain16.prod_idx) -
201 (u32)p_chain->u.chain16.cons_idx);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200202 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
Yuval Mintza91eb522016-06-03 14:35:32 +0300203 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
204 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
205
206 return (u16)(p_chain->capacity - used);
207}
208
209static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
210{
211 u32 used;
212
213 used = (u32) (((u64)0x100000000ULL +
214 (u64)p_chain->u.chain32.prod_idx) -
215 (u64)p_chain->u.chain32.cons_idx);
216 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
217 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
218 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200219
220 return p_chain->capacity - used;
221}
222
Yuval Mintza91eb522016-06-03 14:35:32 +0300223static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200224{
225 return p_chain->usable_per_page;
226}
227
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200228static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200229{
230 return p_chain->elem_unusable;
231}
232
Yuval Mintza91eb522016-06-03 14:35:32 +0300233static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200234{
Yuval Mintza91eb522016-06-03 14:35:32 +0300235 return p_chain->page_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200236}
237
Yuval Mintza91eb522016-06-03 14:35:32 +0300238static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200239{
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200240 return p_chain->pbl_sp.p_phys_table;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200241}
242
243/**
244 * @brief qed_chain_advance_page -
245 *
246 * Advance the next element accros pages for a linked chain
247 *
248 * @param p_chain
249 * @param p_next_elem
250 * @param idx_to_inc
251 * @param page_to_inc
252 */
253static inline void
254qed_chain_advance_page(struct qed_chain *p_chain,
Yuval Mintza91eb522016-06-03 14:35:32 +0300255 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200256{
Yuval Mintza91eb522016-06-03 14:35:32 +0300257 struct qed_chain_next *p_next = NULL;
258 u32 page_index = 0;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200259
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200260 switch (p_chain->mode) {
261 case QED_CHAIN_MODE_NEXT_PTR:
Yuval Mintza91eb522016-06-03 14:35:32 +0300262 p_next = *p_next_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200263 *p_next_elem = p_next->next_virt;
Yuval Mintza91eb522016-06-03 14:35:32 +0300264 if (is_chain_u16(p_chain))
265 *(u16 *)idx_to_inc += p_chain->elem_unusable;
266 else
267 *(u32 *)idx_to_inc += p_chain->elem_unusable;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200268 break;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200269 case QED_CHAIN_MODE_SINGLE:
270 *p_next_elem = p_chain->p_virt_addr;
271 break;
272
273 case QED_CHAIN_MODE_PBL:
Yuval Mintza91eb522016-06-03 14:35:32 +0300274 if (is_chain_u16(p_chain)) {
275 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
276 *(u16 *)page_to_inc = 0;
277 page_index = *(u16 *)page_to_inc;
278 } else {
279 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
280 *(u32 *)page_to_inc = 0;
281 page_index = *(u32 *)page_to_inc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200282 }
Yuval Mintza91eb522016-06-03 14:35:32 +0300283 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200284 }
285}
286
287#define is_unusable_idx(p, idx) \
Yuval Mintza91eb522016-06-03 14:35:32 +0300288 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200289
Yuval Mintza91eb522016-06-03 14:35:32 +0300290#define is_unusable_idx_u32(p, idx) \
291 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
292#define is_unusable_next_idx(p, idx) \
293 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
294 (p)->usable_per_page)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200295
Yuval Mintza91eb522016-06-03 14:35:32 +0300296#define is_unusable_next_idx_u32(p, idx) \
297 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
298 (p)->usable_per_page)
299
300#define test_and_skip(p, idx) \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200301 do { \
Yuval Mintza91eb522016-06-03 14:35:32 +0300302 if (is_chain_u16(p)) { \
303 if (is_unusable_idx(p, idx)) \
304 (p)->u.chain16.idx += (p)->elem_unusable; \
305 } else { \
306 if (is_unusable_idx_u32(p, idx)) \
307 (p)->u.chain32.idx += (p)->elem_unusable; \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200308 } \
309 } while (0)
310
311/**
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200312 * @brief qed_chain_return_produced -
313 *
314 * A chain in which the driver "Produces" elements should use this API
315 * to indicate previous produced elements are now consumed.
316 *
317 * @param p_chain
318 */
319static inline void qed_chain_return_produced(struct qed_chain *p_chain)
320{
Yuval Mintza91eb522016-06-03 14:35:32 +0300321 if (is_chain_u16(p_chain))
322 p_chain->u.chain16.cons_idx++;
323 else
324 p_chain->u.chain32.cons_idx++;
325 test_and_skip(p_chain, cons_idx);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200326}
327
328/**
329 * @brief qed_chain_produce -
330 *
331 * A chain in which the driver "Produces" elements should use this to get
332 * a pointer to the next element which can be "Produced". It's driver
333 * responsibility to validate that the chain has room for new element.
334 *
335 * @param p_chain
336 *
337 * @return void*, a pointer to next element
338 */
339static inline void *qed_chain_produce(struct qed_chain *p_chain)
340{
Yuval Mintza91eb522016-06-03 14:35:32 +0300341 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200342
Yuval Mintza91eb522016-06-03 14:35:32 +0300343 if (is_chain_u16(p_chain)) {
344 if ((p_chain->u.chain16.prod_idx &
345 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
346 p_prod_idx = &p_chain->u.chain16.prod_idx;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200347 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
Yuval Mintza91eb522016-06-03 14:35:32 +0300348 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
349 p_prod_idx, p_prod_page_idx);
350 }
351 p_chain->u.chain16.prod_idx++;
352 } else {
353 if ((p_chain->u.chain32.prod_idx &
354 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
355 p_prod_idx = &p_chain->u.chain32.prod_idx;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200356 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
Yuval Mintza91eb522016-06-03 14:35:32 +0300357 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
358 p_prod_idx, p_prod_page_idx);
359 }
360 p_chain->u.chain32.prod_idx++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200361 }
362
Yuval Mintza91eb522016-06-03 14:35:32 +0300363 p_ret = p_chain->p_prod_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200364 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
365 p_chain->elem_size);
366
Yuval Mintza91eb522016-06-03 14:35:32 +0300367 return p_ret;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200368}
369
370/**
371 * @brief qed_chain_get_capacity -
372 *
373 * Get the maximum number of BDs in chain
374 *
375 * @param p_chain
376 * @param num
377 *
Yuval Mintza91eb522016-06-03 14:35:32 +0300378 * @return number of unusable BDs
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200379 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300380static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200381{
382 return p_chain->capacity;
383}
384
385/**
386 * @brief qed_chain_recycle_consumed -
387 *
388 * Returns an element which was previously consumed;
389 * Increments producers so they could be written to FW.
390 *
391 * @param p_chain
392 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300393static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200394{
Yuval Mintza91eb522016-06-03 14:35:32 +0300395 test_and_skip(p_chain, prod_idx);
396 if (is_chain_u16(p_chain))
397 p_chain->u.chain16.prod_idx++;
398 else
399 p_chain->u.chain32.prod_idx++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200400}
401
402/**
403 * @brief qed_chain_consume -
404 *
405 * A Chain in which the driver utilizes data written by a different source
406 * (i.e., FW) should use this to access passed buffers.
407 *
408 * @param p_chain
409 *
410 * @return void*, a pointer to the next buffer written
411 */
412static inline void *qed_chain_consume(struct qed_chain *p_chain)
413{
Yuval Mintza91eb522016-06-03 14:35:32 +0300414 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200415
Yuval Mintza91eb522016-06-03 14:35:32 +0300416 if (is_chain_u16(p_chain)) {
417 if ((p_chain->u.chain16.cons_idx &
418 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
419 p_cons_idx = &p_chain->u.chain16.cons_idx;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200420 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
Yuval Mintza91eb522016-06-03 14:35:32 +0300421 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
422 p_cons_idx, p_cons_page_idx);
423 }
424 p_chain->u.chain16.cons_idx++;
425 } else {
426 if ((p_chain->u.chain32.cons_idx &
427 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
428 p_cons_idx = &p_chain->u.chain32.cons_idx;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200429 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
430 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
Yuval Mintza91eb522016-06-03 14:35:32 +0300431 p_cons_idx, p_cons_page_idx);
432 }
433 p_chain->u.chain32.cons_idx++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200434 }
435
Yuval Mintza91eb522016-06-03 14:35:32 +0300436 p_ret = p_chain->p_cons_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200437 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
438 p_chain->elem_size);
439
Yuval Mintza91eb522016-06-03 14:35:32 +0300440 return p_ret;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200441}
442
443/**
444 * @brief qed_chain_reset - Resets the chain to its start state
445 *
446 * @param p_chain pointer to a previously allocted chain
447 */
448static inline void qed_chain_reset(struct qed_chain *p_chain)
449{
Yuval Mintza91eb522016-06-03 14:35:32 +0300450 u32 i;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200451
Yuval Mintza91eb522016-06-03 14:35:32 +0300452 if (is_chain_u16(p_chain)) {
453 p_chain->u.chain16.prod_idx = 0;
454 p_chain->u.chain16.cons_idx = 0;
455 } else {
456 p_chain->u.chain32.prod_idx = 0;
457 p_chain->u.chain32.cons_idx = 0;
458 }
459 p_chain->p_cons_elem = p_chain->p_virt_addr;
460 p_chain->p_prod_elem = p_chain->p_virt_addr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200461
462 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
Yuval Mintza91eb522016-06-03 14:35:32 +0300463 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
464 * indices, to avoid unnecessary page advancing on the first
465 * call to qed_chain_produce/consume. Instead, the indices
466 * will be advanced to page_cnt and then will be wrapped to 0.
467 */
468 u32 reset_val = p_chain->page_cnt - 1;
469
470 if (is_chain_u16(p_chain)) {
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200471 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
472 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
Yuval Mintza91eb522016-06-03 14:35:32 +0300473 } else {
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200474 p_chain->pbl.c.u32.prod_page_idx = reset_val;
475 p_chain->pbl.c.u32.cons_page_idx = reset_val;
Yuval Mintza91eb522016-06-03 14:35:32 +0300476 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200477 }
478
479 switch (p_chain->intended_use) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200480 case QED_CHAIN_USE_TO_CONSUME:
481 /* produce empty elements */
482 for (i = 0; i < p_chain->capacity; i++)
483 qed_chain_recycle_consumed(p_chain);
484 break;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200485
486 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
487 case QED_CHAIN_USE_TO_PRODUCE:
488 default:
489 /* Do nothing */
490 break;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200491 }
492}
493
494/**
495 * @brief qed_chain_init - Initalizes a basic chain struct
496 *
497 * @param p_chain
498 * @param p_virt_addr
499 * @param p_phys_addr physical address of allocated buffer's beginning
500 * @param page_cnt number of pages in the allocated buffer
501 * @param elem_size size of each element in the chain
502 * @param intended_use
503 * @param mode
504 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300505static inline void qed_chain_init_params(struct qed_chain *p_chain,
506 u32 page_cnt,
507 u8 elem_size,
508 enum qed_chain_use_mode intended_use,
509 enum qed_chain_mode mode,
510 enum qed_chain_cnt_type cnt_type)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200511{
512 /* chain fixed parameters */
Yuval Mintza91eb522016-06-03 14:35:32 +0300513 p_chain->p_virt_addr = NULL;
514 p_chain->p_phys_addr = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200515 p_chain->elem_size = elem_size;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200516 p_chain->intended_use = (u8)intended_use;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200517 p_chain->mode = mode;
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200518 p_chain->cnt_type = (u8)cnt_type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200519
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200520 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
Yuval Mintza91eb522016-06-03 14:35:32 +0300521 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200522 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200523 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200524 p_chain->next_page_mask = (p_chain->usable_per_page &
525 p_chain->elem_per_page_mask);
526
Yuval Mintza91eb522016-06-03 14:35:32 +0300527 p_chain->page_cnt = page_cnt;
528 p_chain->capacity = p_chain->usable_per_page * page_cnt;
529 p_chain->size = p_chain->elem_per_page * page_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200530
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200531 p_chain->pbl_sp.p_phys_table = 0;
532 p_chain->pbl_sp.p_virt_table = NULL;
Yuval Mintza91eb522016-06-03 14:35:32 +0300533 p_chain->pbl.pp_virt_addr_tbl = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200534}
535
536/**
Yuval Mintza91eb522016-06-03 14:35:32 +0300537 * @brief qed_chain_init_mem -
538 *
539 * Initalizes a basic chain struct with its chain buffers
540 *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200541 * @param p_chain
542 * @param p_virt_addr virtual address of allocated buffer's beginning
543 * @param p_phys_addr physical address of allocated buffer's beginning
Yuval Mintza91eb522016-06-03 14:35:32 +0300544 *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200545 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300546static inline void qed_chain_init_mem(struct qed_chain *p_chain,
547 void *p_virt_addr, dma_addr_t p_phys_addr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200548{
Yuval Mintza91eb522016-06-03 14:35:32 +0300549 p_chain->p_virt_addr = p_virt_addr;
550 p_chain->p_phys_addr = p_phys_addr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200551}
552
553/**
Yuval Mintza91eb522016-06-03 14:35:32 +0300554 * @brief qed_chain_init_pbl_mem -
555 *
556 * Initalizes a basic chain struct with its pbl buffers
557 *
558 * @param p_chain
559 * @param p_virt_pbl pointer to a pre allocated side table which will hold
560 * virtual page addresses.
561 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
562 * physical page addresses.
563 * @param pp_virt_addr_tbl
564 * pointer to a pre-allocated side table which will hold
565 * the virtual addresses of the chain pages.
566 *
567 */
568static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
569 void *p_virt_pbl,
570 dma_addr_t p_phys_pbl,
571 void **pp_virt_addr_tbl)
572{
Mintz, Yuval6d937ac2016-11-29 16:47:01 +0200573 p_chain->pbl_sp.p_phys_table = p_phys_pbl;
574 p_chain->pbl_sp.p_virt_table = p_virt_pbl;
Yuval Mintza91eb522016-06-03 14:35:32 +0300575 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
576}
577
578/**
579 * @brief qed_chain_init_next_ptr_elem -
580 *
581 * Initalizes a next pointer element
582 *
583 * @param p_chain
584 * @param p_virt_curr virtual address of a chain page of which the next
585 * pointer element is initialized
586 * @param p_virt_next virtual address of the next chain page
587 * @param p_phys_next physical address of the next chain page
588 *
589 */
590static inline void
591qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
592 void *p_virt_curr,
593 void *p_virt_next, dma_addr_t p_phys_next)
594{
595 struct qed_chain_next *p_next;
596 u32 size;
597
598 size = p_chain->elem_size * p_chain->usable_per_page;
599 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
600
601 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
602
603 p_next->next_virt = p_virt_next;
604}
605
606/**
607 * @brief qed_chain_get_last_elem -
608 *
609 * Returns a pointer to the last element of the chain
610 *
611 * @param p_chain
612 *
613 * @return void*
614 */
615static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
616{
617 struct qed_chain_next *p_next = NULL;
618 void *p_virt_addr = NULL;
619 u32 size, last_page_idx;
620
621 if (!p_chain->p_virt_addr)
622 goto out;
623
624 switch (p_chain->mode) {
625 case QED_CHAIN_MODE_NEXT_PTR:
626 size = p_chain->elem_size * p_chain->usable_per_page;
627 p_virt_addr = p_chain->p_virt_addr;
628 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
629 while (p_next->next_virt != p_chain->p_virt_addr) {
630 p_virt_addr = p_next->next_virt;
631 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
632 size);
633 }
634 break;
635 case QED_CHAIN_MODE_SINGLE:
636 p_virt_addr = p_chain->p_virt_addr;
637 break;
638 case QED_CHAIN_MODE_PBL:
639 last_page_idx = p_chain->page_cnt - 1;
640 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
641 break;
642 }
643 /* p_virt_addr points at this stage to the last page of the chain */
644 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
645 p_virt_addr = (u8 *)p_virt_addr + size;
646out:
647 return p_virt_addr;
648}
649
650/**
651 * @brief qed_chain_set_prod - sets the prod to the given value
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200652 *
653 * @param prod_idx
654 * @param p_prod_elem
655 */
656static inline void qed_chain_set_prod(struct qed_chain *p_chain,
Yuval Mintza91eb522016-06-03 14:35:32 +0300657 u32 prod_idx, void *p_prod_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200658{
Yuval Mintza91eb522016-06-03 14:35:32 +0300659 if (is_chain_u16(p_chain))
660 p_chain->u.chain16.prod_idx = (u16) prod_idx;
661 else
662 p_chain->u.chain32.prod_idx = prod_idx;
663 p_chain->p_prod_elem = p_prod_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200664}
665
666/**
Yuval Mintza91eb522016-06-03 14:35:32 +0300667 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200668 *
669 * @param p_chain
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200670 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300671static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200672{
Yuval Mintza91eb522016-06-03 14:35:32 +0300673 u32 i, page_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200674
Yuval Mintza91eb522016-06-03 14:35:32 +0300675 if (p_chain->mode != QED_CHAIN_MODE_PBL)
676 return;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200677
Yuval Mintza91eb522016-06-03 14:35:32 +0300678 page_cnt = qed_chain_get_page_cnt(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200679
Yuval Mintza91eb522016-06-03 14:35:32 +0300680 for (i = 0; i < page_cnt; i++)
681 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
682 QED_CHAIN_PAGE_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200683}
684
685#endif