blob: eceaa9ed2ae99ab124406c76d3b899c67126e402 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_CHAIN_H
10#define _QED_CHAIN_H
11
12#include <linux/types.h>
13#include <asm/byteorder.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17#include <linux/qed/common_hsi.h>
18
19/* dma_addr_t manip */
20#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
21#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
Yuval Mintz94494592016-02-21 11:40:10 +020022#define DMA_REGPAIR_LE(x, val) do { \
23 (x).hi = DMA_HI_LE((val)); \
24 (x).lo = DMA_LO_LE((val)); \
25 } while (0)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020026
27#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
28#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
29#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
30#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
31#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
32
33enum qed_chain_mode {
34 /* Each Page contains a next pointer at its end */
35 QED_CHAIN_MODE_NEXT_PTR,
36
37 /* Chain is a single page (next ptr) is unrequired */
38 QED_CHAIN_MODE_SINGLE,
39
40 /* Page pointers are located in a side list */
41 QED_CHAIN_MODE_PBL,
42};
43
44enum qed_chain_use_mode {
45 QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
46 QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
47 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
48};
49
Yuval Mintza91eb522016-06-03 14:35:32 +030050enum qed_chain_cnt_type {
51 /* The chain's size/prod/cons are kept in 16-bit variables */
52 QED_CHAIN_CNT_TYPE_U16,
53
54 /* The chain's size/prod/cons are kept in 32-bit variables */
55 QED_CHAIN_CNT_TYPE_U32,
56};
57
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058struct qed_chain_next {
59 struct regpair next_phys;
60 void *next_virt;
61};
62
Yuval Mintza91eb522016-06-03 14:35:32 +030063struct qed_chain_pbl_u16 {
64 u16 prod_page_idx;
65 u16 cons_page_idx;
66};
67
68struct qed_chain_pbl_u32 {
69 u32 prod_page_idx;
70 u32 cons_page_idx;
71};
72
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020073struct qed_chain_pbl {
Yuval Mintza91eb522016-06-03 14:35:32 +030074 /* Base address of a pre-allocated buffer for pbl */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020075 dma_addr_t p_phys_table;
76 void *p_virt_table;
Yuval Mintza91eb522016-06-03 14:35:32 +030077
78 /* Table for keeping the virtual addresses of the chain pages,
79 * respectively to the physical addresses in the pbl table.
80 */
81 void **pp_virt_addr_tbl;
82
83 /* Index to current used page by producer/consumer */
84 union {
85 struct qed_chain_pbl_u16 pbl16;
86 struct qed_chain_pbl_u32 pbl32;
87 } u;
88};
89
90struct qed_chain_u16 {
91 /* Cyclic index of next element to produce/consme */
92 u16 prod_idx;
93 u16 cons_idx;
94};
95
96struct qed_chain_u32 {
97 /* Cyclic index of next element to produce/consme */
98 u32 prod_idx;
99 u32 cons_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200100};
101
102struct qed_chain {
103 void *p_virt_addr;
104 dma_addr_t p_phys_addr;
105 void *p_prod_elem;
106 void *p_cons_elem;
Yuval Mintza91eb522016-06-03 14:35:32 +0300107
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200108 enum qed_chain_mode mode;
109 enum qed_chain_use_mode intended_use; /* used to produce/consume */
Yuval Mintza91eb522016-06-03 14:35:32 +0300110 enum qed_chain_cnt_type cnt_type;
111
112 union {
113 struct qed_chain_u16 chain16;
114 struct qed_chain_u32 chain32;
115 } u;
116
117 u32 page_cnt;
118
119 /* Number of elements - capacity is for usable elements only,
120 * while size will contain total number of elements [for entire chain].
121 */
122 u32 capacity;
123 u32 size;
124
125 /* Elements information for fast calculations */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126 u16 elem_per_page;
127 u16 elem_per_page_mask;
128 u16 elem_unusable;
129 u16 usable_per_page;
130 u16 elem_size;
131 u16 next_page_mask;
132 struct qed_chain_pbl pbl;
133};
134
135#define QED_CHAIN_PBL_ENTRY_SIZE (8)
136#define QED_CHAIN_PAGE_SIZE (0x1000)
137#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
138
139#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
140 ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
141 (1 + ((sizeof(struct qed_chain_next) - 1) / \
142 (elem_size))) : 0)
143
144#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
145 ((u32)(ELEMS_PER_PAGE(elem_size) - \
146 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
147
148#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
149 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
150
Yuval Mintza91eb522016-06-03 14:35:32 +0300151#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
152#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
153
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200154/* Accessors */
155static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
156{
Yuval Mintza91eb522016-06-03 14:35:32 +0300157 return p_chain->u.chain16.prod_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200158}
159
160static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
161{
Yuval Mintza91eb522016-06-03 14:35:32 +0300162 return p_chain->u.chain16.cons_idx;
163}
164
165static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
166{
167 return p_chain->u.chain32.cons_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200168}
169
170static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
171{
172 u16 used;
173
Yuval Mintza91eb522016-06-03 14:35:32 +0300174 used = (u16) (((u32)0x10000 +
175 (u32)p_chain->u.chain16.prod_idx) -
176 (u32)p_chain->u.chain16.cons_idx);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200177 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
Yuval Mintza91eb522016-06-03 14:35:32 +0300178 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
179 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
180
181 return (u16)(p_chain->capacity - used);
182}
183
184static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
185{
186 u32 used;
187
188 used = (u32) (((u64)0x100000000ULL +
189 (u64)p_chain->u.chain32.prod_idx) -
190 (u64)p_chain->u.chain32.cons_idx);
191 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
192 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
193 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200194
195 return p_chain->capacity - used;
196}
197
Yuval Mintza91eb522016-06-03 14:35:32 +0300198static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200199{
200 return p_chain->usable_per_page;
201}
202
Yuval Mintza91eb522016-06-03 14:35:32 +0300203static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200204{
205 return p_chain->elem_unusable;
206}
207
Yuval Mintza91eb522016-06-03 14:35:32 +0300208static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200209{
Yuval Mintza91eb522016-06-03 14:35:32 +0300210 return p_chain->page_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200211}
212
Yuval Mintza91eb522016-06-03 14:35:32 +0300213static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200214{
215 return p_chain->pbl.p_phys_table;
216}
217
218/**
219 * @brief qed_chain_advance_page -
220 *
221 * Advance the next element accros pages for a linked chain
222 *
223 * @param p_chain
224 * @param p_next_elem
225 * @param idx_to_inc
226 * @param page_to_inc
227 */
228static inline void
229qed_chain_advance_page(struct qed_chain *p_chain,
Yuval Mintza91eb522016-06-03 14:35:32 +0300230 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200231
232{
Yuval Mintza91eb522016-06-03 14:35:32 +0300233 struct qed_chain_next *p_next = NULL;
234 u32 page_index = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200235 switch (p_chain->mode) {
236 case QED_CHAIN_MODE_NEXT_PTR:
Yuval Mintza91eb522016-06-03 14:35:32 +0300237 p_next = *p_next_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200238 *p_next_elem = p_next->next_virt;
Yuval Mintza91eb522016-06-03 14:35:32 +0300239 if (is_chain_u16(p_chain))
240 *(u16 *)idx_to_inc += p_chain->elem_unusable;
241 else
242 *(u32 *)idx_to_inc += p_chain->elem_unusable;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200243 break;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200244 case QED_CHAIN_MODE_SINGLE:
245 *p_next_elem = p_chain->p_virt_addr;
246 break;
247
248 case QED_CHAIN_MODE_PBL:
Yuval Mintza91eb522016-06-03 14:35:32 +0300249 if (is_chain_u16(p_chain)) {
250 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
251 *(u16 *)page_to_inc = 0;
252 page_index = *(u16 *)page_to_inc;
253 } else {
254 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
255 *(u32 *)page_to_inc = 0;
256 page_index = *(u32 *)page_to_inc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200257 }
Yuval Mintza91eb522016-06-03 14:35:32 +0300258 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200259 }
260}
261
262#define is_unusable_idx(p, idx) \
Yuval Mintza91eb522016-06-03 14:35:32 +0300263 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200264
Yuval Mintza91eb522016-06-03 14:35:32 +0300265#define is_unusable_idx_u32(p, idx) \
266 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
267#define is_unusable_next_idx(p, idx) \
268 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
269 (p)->usable_per_page)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200270
Yuval Mintza91eb522016-06-03 14:35:32 +0300271#define is_unusable_next_idx_u32(p, idx) \
272 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
273 (p)->usable_per_page)
274
275#define test_and_skip(p, idx) \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200276 do { \
Yuval Mintza91eb522016-06-03 14:35:32 +0300277 if (is_chain_u16(p)) { \
278 if (is_unusable_idx(p, idx)) \
279 (p)->u.chain16.idx += (p)->elem_unusable; \
280 } else { \
281 if (is_unusable_idx_u32(p, idx)) \
282 (p)->u.chain32.idx += (p)->elem_unusable; \
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200283 } \
284 } while (0)
285
286/**
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200287 * @brief qed_chain_return_produced -
288 *
289 * A chain in which the driver "Produces" elements should use this API
290 * to indicate previous produced elements are now consumed.
291 *
292 * @param p_chain
293 */
294static inline void qed_chain_return_produced(struct qed_chain *p_chain)
295{
Yuval Mintza91eb522016-06-03 14:35:32 +0300296 if (is_chain_u16(p_chain))
297 p_chain->u.chain16.cons_idx++;
298 else
299 p_chain->u.chain32.cons_idx++;
300 test_and_skip(p_chain, cons_idx);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200301}
302
303/**
304 * @brief qed_chain_produce -
305 *
306 * A chain in which the driver "Produces" elements should use this to get
307 * a pointer to the next element which can be "Produced". It's driver
308 * responsibility to validate that the chain has room for new element.
309 *
310 * @param p_chain
311 *
312 * @return void*, a pointer to next element
313 */
314static inline void *qed_chain_produce(struct qed_chain *p_chain)
315{
Yuval Mintza91eb522016-06-03 14:35:32 +0300316 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200317
Yuval Mintza91eb522016-06-03 14:35:32 +0300318 if (is_chain_u16(p_chain)) {
319 if ((p_chain->u.chain16.prod_idx &
320 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
321 p_prod_idx = &p_chain->u.chain16.prod_idx;
322 p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
323 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
324 p_prod_idx, p_prod_page_idx);
325 }
326 p_chain->u.chain16.prod_idx++;
327 } else {
328 if ((p_chain->u.chain32.prod_idx &
329 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
330 p_prod_idx = &p_chain->u.chain32.prod_idx;
331 p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
332 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
333 p_prod_idx, p_prod_page_idx);
334 }
335 p_chain->u.chain32.prod_idx++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200336 }
337
Yuval Mintza91eb522016-06-03 14:35:32 +0300338 p_ret = p_chain->p_prod_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200339 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
340 p_chain->elem_size);
341
Yuval Mintza91eb522016-06-03 14:35:32 +0300342 return p_ret;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200343}
344
345/**
346 * @brief qed_chain_get_capacity -
347 *
348 * Get the maximum number of BDs in chain
349 *
350 * @param p_chain
351 * @param num
352 *
Yuval Mintza91eb522016-06-03 14:35:32 +0300353 * @return number of unusable BDs
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200354 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300355static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200356{
357 return p_chain->capacity;
358}
359
360/**
361 * @brief qed_chain_recycle_consumed -
362 *
363 * Returns an element which was previously consumed;
364 * Increments producers so they could be written to FW.
365 *
366 * @param p_chain
367 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300368static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200369{
Yuval Mintza91eb522016-06-03 14:35:32 +0300370 test_and_skip(p_chain, prod_idx);
371 if (is_chain_u16(p_chain))
372 p_chain->u.chain16.prod_idx++;
373 else
374 p_chain->u.chain32.prod_idx++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200375}
376
377/**
378 * @brief qed_chain_consume -
379 *
380 * A Chain in which the driver utilizes data written by a different source
381 * (i.e., FW) should use this to access passed buffers.
382 *
383 * @param p_chain
384 *
385 * @return void*, a pointer to the next buffer written
386 */
387static inline void *qed_chain_consume(struct qed_chain *p_chain)
388{
Yuval Mintza91eb522016-06-03 14:35:32 +0300389 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200390
Yuval Mintza91eb522016-06-03 14:35:32 +0300391 if (is_chain_u16(p_chain)) {
392 if ((p_chain->u.chain16.cons_idx &
393 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
394 p_cons_idx = &p_chain->u.chain16.cons_idx;
395 p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
396 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
397 p_cons_idx, p_cons_page_idx);
398 }
399 p_chain->u.chain16.cons_idx++;
400 } else {
401 if ((p_chain->u.chain32.cons_idx &
402 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
403 p_cons_idx = &p_chain->u.chain32.cons_idx;
404 p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200405 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
Yuval Mintza91eb522016-06-03 14:35:32 +0300406 p_cons_idx, p_cons_page_idx);
407 }
408 p_chain->u.chain32.cons_idx++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200409 }
410
Yuval Mintza91eb522016-06-03 14:35:32 +0300411 p_ret = p_chain->p_cons_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200412 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
413 p_chain->elem_size);
414
Yuval Mintza91eb522016-06-03 14:35:32 +0300415 return p_ret;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200416}
417
418/**
419 * @brief qed_chain_reset - Resets the chain to its start state
420 *
421 * @param p_chain pointer to a previously allocted chain
422 */
423static inline void qed_chain_reset(struct qed_chain *p_chain)
424{
Yuval Mintza91eb522016-06-03 14:35:32 +0300425 u32 i;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200426
Yuval Mintza91eb522016-06-03 14:35:32 +0300427 if (is_chain_u16(p_chain)) {
428 p_chain->u.chain16.prod_idx = 0;
429 p_chain->u.chain16.cons_idx = 0;
430 } else {
431 p_chain->u.chain32.prod_idx = 0;
432 p_chain->u.chain32.cons_idx = 0;
433 }
434 p_chain->p_cons_elem = p_chain->p_virt_addr;
435 p_chain->p_prod_elem = p_chain->p_virt_addr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200436
437 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
Yuval Mintza91eb522016-06-03 14:35:32 +0300438 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
439 * indices, to avoid unnecessary page advancing on the first
440 * call to qed_chain_produce/consume. Instead, the indices
441 * will be advanced to page_cnt and then will be wrapped to 0.
442 */
443 u32 reset_val = p_chain->page_cnt - 1;
444
445 if (is_chain_u16(p_chain)) {
446 p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
447 p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
448 } else {
449 p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
450 p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
451 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200452 }
453
454 switch (p_chain->intended_use) {
455 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
456 case QED_CHAIN_USE_TO_PRODUCE:
457 /* Do nothing */
458 break;
459
460 case QED_CHAIN_USE_TO_CONSUME:
461 /* produce empty elements */
462 for (i = 0; i < p_chain->capacity; i++)
463 qed_chain_recycle_consumed(p_chain);
464 break;
465 }
466}
467
468/**
469 * @brief qed_chain_init - Initalizes a basic chain struct
470 *
471 * @param p_chain
472 * @param p_virt_addr
473 * @param p_phys_addr physical address of allocated buffer's beginning
474 * @param page_cnt number of pages in the allocated buffer
475 * @param elem_size size of each element in the chain
476 * @param intended_use
477 * @param mode
478 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300479static inline void qed_chain_init_params(struct qed_chain *p_chain,
480 u32 page_cnt,
481 u8 elem_size,
482 enum qed_chain_use_mode intended_use,
483 enum qed_chain_mode mode,
484 enum qed_chain_cnt_type cnt_type)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200485{
486 /* chain fixed parameters */
Yuval Mintza91eb522016-06-03 14:35:32 +0300487 p_chain->p_virt_addr = NULL;
488 p_chain->p_phys_addr = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200489 p_chain->elem_size = elem_size;
Yuval Mintza91eb522016-06-03 14:35:32 +0300490 p_chain->intended_use = intended_use;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200491 p_chain->mode = mode;
Yuval Mintza91eb522016-06-03 14:35:32 +0300492 p_chain->cnt_type = cnt_type;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200493
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200494 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
Yuval Mintza91eb522016-06-03 14:35:32 +0300495 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200496 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200497 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200498 p_chain->next_page_mask = (p_chain->usable_per_page &
499 p_chain->elem_per_page_mask);
500
Yuval Mintza91eb522016-06-03 14:35:32 +0300501 p_chain->page_cnt = page_cnt;
502 p_chain->capacity = p_chain->usable_per_page * page_cnt;
503 p_chain->size = p_chain->elem_per_page * page_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200504
Yuval Mintza91eb522016-06-03 14:35:32 +0300505 p_chain->pbl.p_phys_table = 0;
506 p_chain->pbl.p_virt_table = NULL;
507 p_chain->pbl.pp_virt_addr_tbl = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200508}
509
510/**
Yuval Mintza91eb522016-06-03 14:35:32 +0300511 * @brief qed_chain_init_mem -
512 *
513 * Initalizes a basic chain struct with its chain buffers
514 *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200515 * @param p_chain
516 * @param p_virt_addr virtual address of allocated buffer's beginning
517 * @param p_phys_addr physical address of allocated buffer's beginning
Yuval Mintza91eb522016-06-03 14:35:32 +0300518 *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200519 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300520static inline void qed_chain_init_mem(struct qed_chain *p_chain,
521 void *p_virt_addr, dma_addr_t p_phys_addr)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200522{
Yuval Mintza91eb522016-06-03 14:35:32 +0300523 p_chain->p_virt_addr = p_virt_addr;
524 p_chain->p_phys_addr = p_phys_addr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200525}
526
527/**
Yuval Mintza91eb522016-06-03 14:35:32 +0300528 * @brief qed_chain_init_pbl_mem -
529 *
530 * Initalizes a basic chain struct with its pbl buffers
531 *
532 * @param p_chain
533 * @param p_virt_pbl pointer to a pre allocated side table which will hold
534 * virtual page addresses.
535 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
536 * physical page addresses.
537 * @param pp_virt_addr_tbl
538 * pointer to a pre-allocated side table which will hold
539 * the virtual addresses of the chain pages.
540 *
541 */
542static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
543 void *p_virt_pbl,
544 dma_addr_t p_phys_pbl,
545 void **pp_virt_addr_tbl)
546{
547 p_chain->pbl.p_phys_table = p_phys_pbl;
548 p_chain->pbl.p_virt_table = p_virt_pbl;
549 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
550}
551
552/**
553 * @brief qed_chain_init_next_ptr_elem -
554 *
555 * Initalizes a next pointer element
556 *
557 * @param p_chain
558 * @param p_virt_curr virtual address of a chain page of which the next
559 * pointer element is initialized
560 * @param p_virt_next virtual address of the next chain page
561 * @param p_phys_next physical address of the next chain page
562 *
563 */
564static inline void
565qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
566 void *p_virt_curr,
567 void *p_virt_next, dma_addr_t p_phys_next)
568{
569 struct qed_chain_next *p_next;
570 u32 size;
571
572 size = p_chain->elem_size * p_chain->usable_per_page;
573 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
574
575 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
576
577 p_next->next_virt = p_virt_next;
578}
579
580/**
581 * @brief qed_chain_get_last_elem -
582 *
583 * Returns a pointer to the last element of the chain
584 *
585 * @param p_chain
586 *
587 * @return void*
588 */
589static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
590{
591 struct qed_chain_next *p_next = NULL;
592 void *p_virt_addr = NULL;
593 u32 size, last_page_idx;
594
595 if (!p_chain->p_virt_addr)
596 goto out;
597
598 switch (p_chain->mode) {
599 case QED_CHAIN_MODE_NEXT_PTR:
600 size = p_chain->elem_size * p_chain->usable_per_page;
601 p_virt_addr = p_chain->p_virt_addr;
602 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
603 while (p_next->next_virt != p_chain->p_virt_addr) {
604 p_virt_addr = p_next->next_virt;
605 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
606 size);
607 }
608 break;
609 case QED_CHAIN_MODE_SINGLE:
610 p_virt_addr = p_chain->p_virt_addr;
611 break;
612 case QED_CHAIN_MODE_PBL:
613 last_page_idx = p_chain->page_cnt - 1;
614 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
615 break;
616 }
617 /* p_virt_addr points at this stage to the last page of the chain */
618 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
619 p_virt_addr = (u8 *)p_virt_addr + size;
620out:
621 return p_virt_addr;
622}
623
624/**
625 * @brief qed_chain_set_prod - sets the prod to the given value
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200626 *
627 * @param prod_idx
628 * @param p_prod_elem
629 */
630static inline void qed_chain_set_prod(struct qed_chain *p_chain,
Yuval Mintza91eb522016-06-03 14:35:32 +0300631 u32 prod_idx, void *p_prod_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200632{
Yuval Mintza91eb522016-06-03 14:35:32 +0300633 if (is_chain_u16(p_chain))
634 p_chain->u.chain16.prod_idx = (u16) prod_idx;
635 else
636 p_chain->u.chain32.prod_idx = prod_idx;
637 p_chain->p_prod_elem = p_prod_elem;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200638}
639
640/**
Yuval Mintza91eb522016-06-03 14:35:32 +0300641 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200642 *
643 * @param p_chain
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200644 */
Yuval Mintza91eb522016-06-03 14:35:32 +0300645static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200646{
Yuval Mintza91eb522016-06-03 14:35:32 +0300647 u32 i, page_cnt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200648
Yuval Mintza91eb522016-06-03 14:35:32 +0300649 if (p_chain->mode != QED_CHAIN_MODE_PBL)
650 return;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200651
Yuval Mintza91eb522016-06-03 14:35:32 +0300652 page_cnt = qed_chain_get_page_cnt(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200653
Yuval Mintza91eb522016-06-03 14:35:32 +0300654 for (i = 0; i < page_cnt; i++)
655 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
656 QED_CHAIN_PAGE_SIZE);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200657}
658
659#endif