blob: 359b9b9f804179ee1ec2cca436646633dbc7530e [file] [log] [blame]
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
Jon Mason926bd902010-07-15 08:47:26 +000010 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000011 * Virtualized Server Adapter.
Jon Mason926bd902010-07-15 08:47:26 +000012 * Copyright(c) 2002-2010 Exar Corp.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000013 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000018
19#ifndef VXGE_CACHE_LINE_SIZE
20#define VXGE_CACHE_LINE_SIZE 128
21#endif
22
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000023#ifndef VXGE_ALIGN
24#define VXGE_ALIGN(adrs, size) \
25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
26#endif
27
28#define VXGE_HW_MIN_MTU 68
29#define VXGE_HW_MAX_MTU 9600
30#define VXGE_HW_DEFAULT_MTU 1500
31
Jon Masone8ac1752010-11-11 04:25:57 +000032#define VXGE_HW_MAX_ROM_IMAGES 8
33
34struct eprom_image {
35 u8 is_valid:1;
36 u8 index;
37 u8 type;
38 u16 version;
39};
40
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000041#ifdef VXGE_DEBUG_ASSERT
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000042/**
43 * vxge_assert
44 * @test: C-condition to check
45 * @fmt: printf like format string
46 *
47 * This function implements traditional assert. By default assertions
48 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
49 * compilation
50 * time.
51 */
Jon Masonddd62722010-11-11 04:25:55 +000052#define vxge_assert(test) BUG_ON(!(test))
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000053#else
54#define vxge_assert(test)
55#endif /* end of VXGE_DEBUG_ASSERT */
56
57/**
Jon Masonddd62722010-11-11 04:25:55 +000058 * enum vxge_debug_level
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000059 * @VXGE_NONE: debug disabled
60 * @VXGE_ERR: all errors going to be logged out
61 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
62 * going to be logged out. Very noisy.
63 *
64 * This enumeration going to be used to switch between different
65 * debug levels during runtime if DEBUG macro defined during
66 * compilation. If DEBUG macro not defined than code will be
67 * compiled out.
68 */
69enum vxge_debug_level {
70 VXGE_NONE = 0,
71 VXGE_TRACE = 1,
72 VXGE_ERR = 2
73};
74
75#define NULL_VPID 0xFFFFFFFF
76#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
77#define VXGE_DEBUG_MODULE_MASK 0xffffffff
78#define VXGE_DEBUG_TRACE_MASK 0xffffffff
79#define VXGE_DEBUG_ERR_MASK 0xffffffff
80#define VXGE_DEBUG_MASK 0x000001ff
81#else
82#define VXGE_DEBUG_MODULE_MASK 0x20000000
83#define VXGE_DEBUG_TRACE_MASK 0x20000000
84#define VXGE_DEBUG_ERR_MASK 0x20000000
85#define VXGE_DEBUG_MASK 0x00000001
86#endif
87
88/*
89 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
90 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
91 *
92 * This enumeration going to be used to distinguish modules
93 * or libraries during compilation and runtime. Makefile must declare
94 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
95 */
96#define VXGE_COMPONENT_LL 0x20000000
97#define VXGE_COMPONENT_ALL 0xffffffff
98
99#define VXGE_HW_BASE_INF 100
100#define VXGE_HW_BASE_ERR 200
101#define VXGE_HW_BASE_BADCFG 300
102
103enum vxge_hw_status {
104 VXGE_HW_OK = 0,
105 VXGE_HW_FAIL = 1,
106 VXGE_HW_PENDING = 2,
107 VXGE_HW_COMPLETIONS_REMAIN = 3,
108
109 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
110 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
111
112 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
113 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
114 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
115 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
116 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
117 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
118 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
119 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
120 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
121 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
122 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
123 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
124 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
125 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
126 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
127 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
128 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
129 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
130 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
131 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
132 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
133 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
134
135 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
136 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
137 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
138 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
139 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
140 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
141 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
142
143 VXGE_HW_EOF_TRACE_BUF = -1
144};
145
146/**
147 * enum enum vxge_hw_device_link_state - Link state enumeration.
148 * @VXGE_HW_LINK_NONE: Invalid link state.
149 * @VXGE_HW_LINK_DOWN: Link is down.
150 * @VXGE_HW_LINK_UP: Link is up.
151 *
152 */
153enum vxge_hw_device_link_state {
154 VXGE_HW_LINK_NONE,
155 VXGE_HW_LINK_DOWN,
156 VXGE_HW_LINK_UP
157};
158
159/**
Jon Masone8ac1752010-11-11 04:25:57 +0000160 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
161 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
162 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
163 * @VXGE_HW_FW_UPGRADE_ERR: upload error
164 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
165 *
166 */
167enum vxge_hw_fw_upgrade_code {
168 VXGE_HW_FW_UPGRADE_OK = 0,
169 VXGE_HW_FW_UPGRADE_DONE = 1,
170 VXGE_HW_FW_UPGRADE_ERR = 2,
171 VXGE_FW_UPGRADE_BYTES2SKIP = 3
172};
173
174/**
175 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
176 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
177 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
178 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
179 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
185 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
186 */
187enum vxge_hw_fw_upgrade_err_code {
188 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
189 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
190 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
191 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
194 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
196 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
197 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
198};
199
200/**
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000201 * struct vxge_hw_device_date - Date Format
202 * @day: Day
203 * @month: Month
204 * @year: Year
205 * @date: Date in string format
206 *
207 * Structure for returning date
208 */
209
210#define VXGE_HW_FW_STRLEN 32
211struct vxge_hw_device_date {
212 u32 day;
213 u32 month;
214 u32 year;
215 char date[VXGE_HW_FW_STRLEN];
216};
217
218struct vxge_hw_device_version {
219 u32 major;
220 u32 minor;
221 u32 build;
222 char version[VXGE_HW_FW_STRLEN];
223};
224
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000225/**
226 * struct vxge_hw_fifo_config - Configuration of fifo.
227 * @enable: Is this fifo to be commissioned
228 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
229 * blocks per queue.
230 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
231 * transmit operation).
232 * No more than 256 transmit buffers can be specified.
233 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
234 * bytes. Setting @memblock_size to page size ensures
235 * by-page allocation of descriptors. 128K bytes is the
236 * maximum supported block size.
237 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
238 * (e.g., to align on a cache line).
239 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
240 * Use 0 otherwise.
241 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
242 * which generally improves latency of the host bridge operation
243 * (see PCI specification). For valid values please refer
244 * to struct vxge_hw_fifo_config{} in the driver sources.
245 * Configuration of all Titan fifos.
246 * Note: Valid (min, max) range for each attribute is specified in the body of
247 * the struct vxge_hw_fifo_config{} structure.
248 */
249struct vxge_hw_fifo_config {
250 u32 enable;
251#define VXGE_HW_FIFO_ENABLE 1
252#define VXGE_HW_FIFO_DISABLE 0
253
254 u32 fifo_blocks;
255#define VXGE_HW_MIN_FIFO_BLOCKS 2
256#define VXGE_HW_MAX_FIFO_BLOCKS 128
257
258 u32 max_frags;
259#define VXGE_HW_MIN_FIFO_FRAGS 1
260#define VXGE_HW_MAX_FIFO_FRAGS 256
261
262 u32 memblock_size;
263#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
264#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
265#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
266
267 u32 alignment_size;
268#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
269#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
270#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
271
272 u32 intr;
273#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
274#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
275#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
276
277 u32 no_snoop_bits;
278#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
279#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
280#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
281#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
282#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
283
284};
285/**
286 * struct vxge_hw_ring_config - Ring configurations.
287 * @enable: Is this ring to be commissioned
288 * @ring_blocks: Numbers of RxD blocks in the ring
289 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
290 * to Titan User Guide.
291 * @scatter_mode: Titan supports two receive scatter modes: A and B.
292 * For details please refer to Titan User Guide.
293 * @rx_timer_val: The number of 32ns periods that would be counted between two
294 * timer interrupts.
295 * @greedy_return: If Set it forces the device to return absolutely all RxD
296 * that are consumed and still on board when a timer interrupt
297 * triggers. If Clear, then if the device has already returned
298 * RxD before current timer interrupt trigerred and after the
299 * previous timer interrupt triggered, then the device is not
300 * forced to returned the rest of the consumed RxD that it has
301 * on board which account for a byte count less than the one
302 * programmed into PRC_CFG6.RXD_CRXDT field
303 * @rx_timer_ci: TBD
304 * @backoff_interval_us: Time (in microseconds), after which Titan
305 * tries to download RxDs posted by the host.
306 * Note that the "backoff" does not happen if host posts receive
307 * descriptors in the timely fashion.
308 * Ring configuration.
309 */
310struct vxge_hw_ring_config {
311 u32 enable;
312#define VXGE_HW_RING_ENABLE 1
313#define VXGE_HW_RING_DISABLE 0
314#define VXGE_HW_RING_DEFAULT 1
315
316 u32 ring_blocks;
Jon Mason528f7272010-12-10 14:02:56 +0000317#define VXGE_HW_MIN_RING_BLOCKS 1
318#define VXGE_HW_MAX_RING_BLOCKS 128
319#define VXGE_HW_DEF_RING_BLOCKS 2
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000320
321 u32 buffer_mode;
322#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
323#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
324#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
325#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
326
327 u32 scatter_mode;
328#define VXGE_HW_RING_SCATTER_MODE_A 0
329#define VXGE_HW_RING_SCATTER_MODE_B 1
330#define VXGE_HW_RING_SCATTER_MODE_C 2
331#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
332
333 u64 rxds_limit;
334#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
335};
336
337/**
338 * struct vxge_hw_vp_config - Configuration of virtual path
339 * @vp_id: Virtual Path Id
340 * @min_bandwidth: Minimum Guaranteed bandwidth
341 * @ring: See struct vxge_hw_ring_config{}.
342 * @fifo: See struct vxge_hw_fifo_config{}.
343 * @tti: Configuration of interrupt associated with Transmit.
344 * see struct vxge_hw_tim_intr_config();
345 * @rti: Configuration of interrupt associated with Receive.
346 * see struct vxge_hw_tim_intr_config();
347 * @mtu: mtu size used on this port.
348 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
349 * remove the VLAN tag from all received tagged frames that are not
350 * replicated at the internal L2 switch.
351 * 0 - Do not strip the VLAN tag.
352 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
353 * always placed into the RxDMA descriptor.
354 *
355 * This structure is used by the driver to pass the configuration parameters to
356 * configure Virtual Path.
357 */
358struct vxge_hw_vp_config {
359 u32 vp_id;
360
361#define VXGE_HW_VPATH_PRIORITY_MIN 0
362#define VXGE_HW_VPATH_PRIORITY_MAX 16
363#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
364
365 u32 min_bandwidth;
366#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
367#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
368#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
369
370 struct vxge_hw_ring_config ring;
371 struct vxge_hw_fifo_config fifo;
372 struct vxge_hw_tim_intr_config tti;
373 struct vxge_hw_tim_intr_config rti;
374
375 u32 mtu;
376#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
377#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
378#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
379
380 u32 rpa_strip_vlan_tag;
381#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
382#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
383#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
384
385};
386/**
387 * struct vxge_hw_device_config - Device configuration.
388 * @dma_blockpool_initial: Initial size of DMA Pool
389 * @dma_blockpool_max: Maximum blocks in DMA pool
390 * @intr_mode: Line, or MSI-X interrupt.
391 *
392 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
393 * @rth_it_type: RTH IT table programming type
394 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
395 * @vp_config: Configuration for virtual paths
396 * @device_poll_millis: Specify the interval (in mulliseconds)
397 * to wait for register reads
398 *
399 * Titan configuration.
400 * Contains per-device configuration parameters, including:
401 * - stats sampling interval, etc.
402 *
403 * In addition, struct vxge_hw_device_config{} includes "subordinate"
404 * configurations, including:
405 * - fifos and rings;
406 * - MAC (done at firmware level).
407 *
408 * See Titan User Guide for more details.
409 * Note: Valid (min, max) range for each attribute is specified in the body of
410 * the struct vxge_hw_device_config{} structure. Please refer to the
411 * corresponding include file.
412 * See also: struct vxge_hw_tim_intr_config{}.
413 */
414struct vxge_hw_device_config {
Jon Masoncd883a72011-04-08 11:11:21 +0000415 u32 device_poll_millis;
416#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
417#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
418#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000419
Jon Masoncd883a72011-04-08 11:11:21 +0000420 u32 dma_blockpool_initial;
421 u32 dma_blockpool_max;
422#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
423#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
424#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
425#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000426
Jon Masoncd883a72011-04-08 11:11:21 +0000427#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000428
Jon Masoncd883a72011-04-08 11:11:21 +0000429 u32 intr_mode:2,
430#define VXGE_HW_INTR_MODE_IRQLINE 0
431#define VXGE_HW_INTR_MODE_MSIX 1
432#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000433
Jon Masoncd883a72011-04-08 11:11:21 +0000434#define VXGE_HW_INTR_MODE_DEF 0
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000435
Jon Masoncd883a72011-04-08 11:11:21 +0000436 rth_en:1,
437#define VXGE_HW_RTH_DISABLE 0
438#define VXGE_HW_RTH_ENABLE 1
439#define VXGE_HW_RTH_DEFAULT 0
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000440
Jon Masoncd883a72011-04-08 11:11:21 +0000441 rth_it_type:1,
442#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
443#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
444#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
445
446 rts_mac_en:1,
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000447#define VXGE_HW_RTS_MAC_DISABLE 0
448#define VXGE_HW_RTS_MAC_ENABLE 1
449#define VXGE_HW_RTS_MAC_DEFAULT 0
450
Jon Masoncd883a72011-04-08 11:11:21 +0000451 hwts_en:1;
452#define VXGE_HW_HWTS_DISABLE 0
453#define VXGE_HW_HWTS_ENABLE 1
454#define VXGE_HW_HWTS_DEFAULT 1
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000455
Jon Masoncd883a72011-04-08 11:11:21 +0000456 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000457};
458
459/**
460 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
461 * @devh: HW device handle.
462 * Link-up notification callback provided by the driver.
463 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
464 *
465 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
466 * vxge_hw_driver_initialize().
467 */
468
469/**
470 * function vxge_uld_link_down_f - Link-Down callback provided by
471 * driver.
472 * @devh: HW device handle.
473 *
474 * Link-Down notification callback provided by the driver.
475 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
476 *
477 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
478 * vxge_hw_driver_initialize().
479 */
480
481/**
482 * function vxge_uld_crit_err_f - Critical Error notification callback.
483 * @devh: HW device handle.
484 * (typically - at HW device iinitialization time).
485 * @type: Enumerated hw error, e.g.: double ECC.
486 * @serr_data: Titan status.
487 * @ext_data: Extended data. The contents depends on the @type.
488 *
489 * Link-Down notification callback provided by the driver.
490 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
491 *
492 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
493 * vxge_hw_driver_initialize().
494 */
495
496/**
497 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
498 * @link_up: See vxge_uld_link_up_f{}.
499 * @link_down: See vxge_uld_link_down_f{}.
500 * @crit_err: See vxge_uld_crit_err_f{}.
501 *
502 * Driver slow-path (per-driver) callbacks.
503 * Implemented by driver and provided to HW via
504 * vxge_hw_driver_initialize().
505 * Note that these callbacks are not mandatory: HW will not invoke
506 * a callback if NULL is specified.
507 *
508 * See also: vxge_hw_driver_initialize().
509 */
510struct vxge_hw_uld_cbs {
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000511 void (*link_up)(struct __vxge_hw_device *devh);
512 void (*link_down)(struct __vxge_hw_device *devh);
513 void (*crit_err)(struct __vxge_hw_device *devh,
514 enum vxge_hw_event type, u64 ext_data);
515};
516
517/*
518 * struct __vxge_hw_blockpool_entry - Block private data structure
519 * @item: List header used to link.
520 * @length: Length of the block
521 * @memblock: Virtual address block
522 * @dma_addr: DMA Address of the block.
523 * @dma_handle: DMA handle of the block.
524 * @acc_handle: DMA acc handle
525 *
526 * Block is allocated with a header to put the blocks into list.
527 *
528 */
529struct __vxge_hw_blockpool_entry {
530 struct list_head item;
531 u32 length;
532 void *memblock;
533 dma_addr_t dma_addr;
534 struct pci_dev *dma_handle;
535 struct pci_dev *acc_handle;
536};
537
538/*
539 * struct __vxge_hw_blockpool - Block Pool
540 * @hldev: HW device
541 * @block_size: size of each block.
542 * @Pool_size: Number of blocks in the pool
543 * @pool_max: Maximum number of blocks above which to free additional blocks
544 * @req_out: Number of block requests with OS out standing
545 * @free_block_list: List of free blocks
546 *
547 * Block pool contains the DMA blocks preallocated.
548 *
549 */
550struct __vxge_hw_blockpool {
551 struct __vxge_hw_device *hldev;
552 u32 block_size;
553 u32 pool_size;
554 u32 pool_max;
555 u32 req_out;
556 struct list_head free_block_list;
557 struct list_head free_entry_list;
558};
559
560/*
561 * enum enum __vxge_hw_channel_type - Enumerated channel types.
562 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
563 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
564 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
565 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
566 * (and recognized) channel types. Currently: 2.
567 *
568 * Enumerated channel types. Currently there are only two link-layer
569 * channels - Titan fifo and Titan ring. In the future the list will grow.
570 */
571enum __vxge_hw_channel_type {
572 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
573 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
574 VXGE_HW_CHANNEL_TYPE_RING = 2,
575 VXGE_HW_CHANNEL_TYPE_MAX = 3
576};
577
578/*
579 * struct __vxge_hw_channel
580 * @item: List item; used to maintain a list of open channels.
581 * @type: Channel type. See enum vxge_hw_channel_type{}.
582 * @devh: Device handle. HW device object that contains _this_ channel.
583 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
584 * @length: Channel length. Currently allocated number of descriptors.
585 * The channel length "grows" when more descriptors get allocated.
586 * See _hw_mempool_grow.
587 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
588 * by driver for the subsequent send or receive operation.
589 * See vxge_hw_fifo_txdl_reserve(),
590 * vxge_hw_ring_rxd_reserve().
591 * @reserve_ptr: Current pointer in the resrve array
592 * @reserve_top: Reserve top gives the maximum number of dtrs available in
593 * reserve array.
594 * @work_arr: Work array. Contains descriptors posted to the channel.
595 * Note that at any point in time @work_arr contains 3 types of
596 * descriptors:
597 * 1) posted but not yet consumed by Titan device;
598 * 2) consumed but not yet completed;
599 * 3) completed but not yet freed
600 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
601 * @post_index: Post index. At any point in time points on the
602 * position in the channel, which'll contain next to-be-posted
603 * descriptor.
604 * @compl_index: Completion index. At any point in time points on the
605 * position in the channel, which will contain next
606 * to-be-completed descriptor.
607 * @free_arr: Free array. Contains completed descriptors that were freed
608 * (i.e., handed over back to HW) by driver.
609 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
610 * @free_ptr: current pointer in free array
611 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
612 * to store per-operation control information.
613 * @stats: Pointer to common statistics
614 * @userdata: Per-channel opaque (void*) user-defined context, which may be
615 * driver object, ULP connection, etc.
616 * Once channel is open, @userdata is passed back to user via
617 * vxge_hw_channel_callback_f.
618 *
619 * HW channel object.
620 *
621 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
622 */
623struct __vxge_hw_channel {
624 struct list_head item;
625 enum __vxge_hw_channel_type type;
626 struct __vxge_hw_device *devh;
627 struct __vxge_hw_vpath_handle *vph;
628 u32 length;
629 u32 vp_id;
630 void **reserve_arr;
631 u32 reserve_ptr;
632 u32 reserve_top;
633 void **work_arr;
634 u32 post_index ____cacheline_aligned;
635 u32 compl_index ____cacheline_aligned;
636 void **free_arr;
637 u32 free_ptr;
638 void **orig_arr;
639 u32 per_dtr_space;
640 void *userdata;
641 struct vxge_hw_common_reg __iomem *common_reg;
642 u32 first_vp_id;
643 struct vxge_hw_vpath_stats_sw_common_info *stats;
644
645} ____cacheline_aligned;
646
647/*
648 * struct __vxge_hw_virtualpath - Virtual Path
649 *
650 * @vp_id: Virtual path id
651 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
652 * @hldev: Hal device
653 * @vp_config: Virtual Path Config
654 * @vp_reg: VPATH Register map address in BAR0
655 * @vpmgmt_reg: VPATH_MGMT register map address
656 * @max_mtu: Max mtu that can be supported
657 * @vsport_number: vsport attached to this vpath
658 * @max_kdfc_db: Maximum kernel mode doorbells
659 * @max_nofl_db: Maximum non offload doorbells
660 * @tx_intr_num: Interrupt Number associated with the TX
661
662 * @ringh: Ring Queue
663 * @fifoh: FIFO Queue
664 * @vpath_handles: Virtual Path handles list
665 * @stats_block: Memory for DMAing stats
666 * @stats: Vpath statistics
667 *
668 * Virtual path structure to encapsulate the data related to a virtual path.
669 * Virtual paths are allocated by the HW upon getting configuration from the
670 * driver and inserted into the list of virtual paths.
671 */
672struct __vxge_hw_virtualpath {
673 u32 vp_id;
674
675 u32 vp_open;
676#define VXGE_HW_VP_NOT_OPEN 0
677#define VXGE_HW_VP_OPEN 1
678
679 struct __vxge_hw_device *hldev;
680 struct vxge_hw_vp_config *vp_config;
681 struct vxge_hw_vpath_reg __iomem *vp_reg;
682 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
683 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
684
685 u32 max_mtu;
686 u32 vsport_number;
687 u32 max_kdfc_db;
688 u32 max_nofl_db;
Jon Mason16fded72011-01-18 15:02:21 +0000689 u64 tim_tti_cfg1_saved;
690 u64 tim_tti_cfg3_saved;
691 u64 tim_rti_cfg1_saved;
692 u64 tim_rti_cfg3_saved;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000693
694 struct __vxge_hw_ring *____cacheline_aligned ringh;
695 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
696 struct list_head vpath_handles;
697 struct __vxge_hw_blockpool_entry *stats_block;
698 struct vxge_hw_vpath_stats_hw_info *hw_stats;
699 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
700 struct vxge_hw_vpath_stats_sw_info *sw_stats;
Jon Mason8424e002010-11-11 04:25:56 +0000701 spinlock_t lock;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000702};
703
704/*
705 * struct __vxge_hw_vpath_handle - List item to store callback information
706 * @item: List head to keep the item in linked list
707 * @vpath: Virtual path to which this item belongs
708 *
709 * This structure is used to store the callback information.
710 */
Jon Mason528f7272010-12-10 14:02:56 +0000711struct __vxge_hw_vpath_handle {
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000712 struct list_head item;
713 struct __vxge_hw_virtualpath *vpath;
714};
715
716/*
717 * struct __vxge_hw_device
718 *
719 * HW device object.
720 */
721/**
722 * struct __vxge_hw_device - Hal device object
723 * @magic: Magic Number
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000724 * @bar0: BAR0 virtual address.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000725 * @pdev: Physical device handle
726 * @config: Confguration passed by the LL driver at initialization
727 * @link_state: Link state
728 *
729 * HW device object. Represents Titan adapter
730 */
731struct __vxge_hw_device {
732 u32 magic;
733#define VXGE_HW_DEVICE_MAGIC 0x12345678
734#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000735 void __iomem *bar0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000736 struct pci_dev *pdev;
737 struct net_device *ndev;
738 struct vxge_hw_device_config config;
739 enum vxge_hw_device_link_state link_state;
740
741 struct vxge_hw_uld_cbs uld_callbacks;
742
743 u32 host_type;
744 u32 func_id;
745 u32 access_rights;
746#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
747#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
748#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
749 struct vxge_hw_legacy_reg __iomem *legacy_reg;
750 struct vxge_hw_toc_reg __iomem *toc_reg;
751 struct vxge_hw_common_reg __iomem *common_reg;
752 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
753 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
754 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
755 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
756 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
757 struct vxge_hw_vpath_reg __iomem *vpath_reg \
758 [VXGE_HW_TITAN_VPATH_REG_SPACES];
759 u8 __iomem *kdfc;
760 u8 __iomem *usdc;
761 struct __vxge_hw_virtualpath virtual_paths \
762 [VXGE_HW_MAX_VIRTUAL_PATHS];
763 u64 vpath_assignments;
764 u64 vpaths_deployed;
765 u32 first_vp_id;
766 u64 tim_int_mask0[4];
767 u32 tim_int_mask1[4];
768
769 struct __vxge_hw_blockpool block_pool;
770 struct vxge_hw_device_stats stats;
771 u32 debug_module_mask;
772 u32 debug_level;
773 u32 level_err;
774 u32 level_trace;
Jon Masone8ac1752010-11-11 04:25:57 +0000775 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000776};
777
778#define VXGE_HW_INFO_LEN 64
779/**
780 * struct vxge_hw_device_hw_info - Device information
781 * @host_type: Host Type
782 * @func_id: Function Id
783 * @vpath_mask: vpath bit mask
784 * @fw_version: Firmware version
785 * @fw_date: Firmware Date
786 * @flash_version: Firmware version
787 * @flash_date: Firmware Date
788 * @mac_addrs: Mac addresses for each vpath
789 * @mac_addr_masks: Mac address masks for each vpath
790 *
791 * Returns the vpath mask that has the bits set for each vpath allocated
792 * for the driver and the first mac address for each vpath
793 */
794struct vxge_hw_device_hw_info {
795 u32 host_type;
796#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
797#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
798#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
799#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
800#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
801#define VXGE_HW_SR_VH_FUNCTION0 5
802#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
803#define VXGE_HW_VH_NORMAL_FUNCTION 7
804 u64 function_mode;
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -0700805#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
806#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000807#define VXGE_HW_FUNCTION_MODE_SRIOV 2
808#define VXGE_HW_FUNCTION_MODE_MRIOV 3
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -0700809#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
810#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
811#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
812#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
813#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
814#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
815#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
816
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000817 u32 func_id;
818 u64 vpath_mask;
819 struct vxge_hw_device_version fw_version;
820 struct vxge_hw_device_date fw_date;
821 struct vxge_hw_device_version flash_version;
822 struct vxge_hw_device_date flash_date;
823 u8 serial_number[VXGE_HW_INFO_LEN];
824 u8 part_number[VXGE_HW_INFO_LEN];
825 u8 product_desc[VXGE_HW_INFO_LEN];
Jon Mason528f7272010-12-10 14:02:56 +0000826 u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
827 u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000828};
829
830/**
831 * struct vxge_hw_device_attr - Device memory spaces.
832 * @bar0: BAR0 virtual address.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000833 * @pdev: PCI device object.
834 *
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +0000835 * Device memory spaces. Includes configuration, BAR0 etc. per device
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000836 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
837 */
838struct vxge_hw_device_attr {
839 void __iomem *bar0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000840 struct pci_dev *pdev;
841 struct vxge_hw_uld_cbs uld_callbacks;
842};
843
844#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
845
846#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
847 if (i < 16) { \
848 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
849 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
850 } \
851 else { \
852 m1[0] = 0x80000000; \
853 m1[1] = 0x40000000; \
854 } \
855}
856
857#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
858 if (i < 16) { \
859 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
860 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
861 } \
862 else { \
863 m1[0] = 0; \
864 m1[1] = 0; \
865 } \
866}
867
868#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
869 status = vxge_hw_mrpcim_stats_access(hldev, \
870 VXGE_HW_STATS_OP_READ, \
871 loc, \
872 offset, \
873 &val64); \
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000874 if (status != VXGE_HW_OK) \
875 return status; \
876}
877
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000878/*
879 * struct __vxge_hw_ring - Ring channel.
880 * @channel: Channel "base" of this ring, the common part of all HW
881 * channels.
882 * @mempool: Memory pool, the pool from which descriptors get allocated.
883 * (See vxge_hw_mm.h).
884 * @config: Ring configuration, part of device configuration
885 * (see struct vxge_hw_device_config{}).
886 * @ring_length: Length of the ring
887 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
888 * as per Titan User Guide.
889 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
890 * 1-buffer mode descriptor is 32 byte long, etc.
891 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
892 * per-descriptor data (e.g., DMA handle for Solaris)
893 * @per_rxd_space: Per rxd space requested by driver
894 * @rxds_per_block: Number of descriptors per hardware-defined RxD
895 * block. Depends on the (1-, 3-, 5-) buffer mode.
896 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
897 * usage. Not to confuse with @rxd_priv_size.
898 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
899 * @callback: Channel completion callback. HW invokes the callback when there
900 * are new completions on that channel. In many implementations
901 * the @callback executes in the hw interrupt context.
902 * @rxd_init: Channel's descriptor-initialize callback.
903 * See vxge_hw_ring_rxd_init_f{}.
904 * If not NULL, HW invokes the callback when opening
905 * the ring.
906 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
907 * HW invokes the callback when closing the corresponding channel.
908 * See also vxge_hw_channel_rxd_term_f{}.
909 * @stats: Statistics for ring
910 * Ring channel.
911 *
912 * Note: The structure is cache line aligned to better utilize
913 * CPU cache performance.
914 */
915struct __vxge_hw_ring {
916 struct __vxge_hw_channel channel;
917 struct vxge_hw_mempool *mempool;
918 struct vxge_hw_vpath_reg __iomem *vp_reg;
919 struct vxge_hw_common_reg __iomem *common_reg;
920 u32 ring_length;
921 u32 buffer_mode;
922 u32 rxd_size;
923 u32 rxd_priv_size;
924 u32 per_rxd_space;
925 u32 rxds_per_block;
926 u32 rxdblock_priv_size;
927 u32 cmpl_cnt;
928 u32 vp_id;
929 u32 doorbell_cnt;
930 u32 total_db_cnt;
931 u64 rxds_limit;
Jon Mason16fded72011-01-18 15:02:21 +0000932 u32 rtimer;
933 u64 tim_rti_cfg1_saved;
934 u64 tim_rti_cfg3_saved;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000935
936 enum vxge_hw_status (*callback)(
937 struct __vxge_hw_ring *ringh,
938 void *rxdh,
939 u8 t_code,
940 void *userdata);
941
942 enum vxge_hw_status (*rxd_init)(
943 void *rxdh,
944 void *userdata);
945
946 void (*rxd_term)(
947 void *rxdh,
948 enum vxge_hw_rxd_state state,
949 void *userdata);
950
951 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
952 struct vxge_hw_ring_config *config;
953} ____cacheline_aligned;
954
955/**
956 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
957 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
958 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
959 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
960 * device.
961 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
962 * filling-in and posting later.
963 *
964 * Titan/HW descriptor states.
965 *
966 */
967enum vxge_hw_txdl_state {
968 VXGE_HW_TXDL_STATE_NONE = 0,
969 VXGE_HW_TXDL_STATE_AVAIL = 1,
970 VXGE_HW_TXDL_STATE_POSTED = 2,
971 VXGE_HW_TXDL_STATE_FREED = 3
972};
973/*
974 * struct __vxge_hw_fifo - Fifo.
975 * @channel: Channel "base" of this fifo, the common part of all HW
976 * channels.
977 * @mempool: Memory pool, from which descriptors get allocated.
978 * @config: Fifo configuration, part of device configuration
979 * (see struct vxge_hw_device_config{}).
980 * @interrupt_type: Interrupt type to be used
981 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
982 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
983 * on TxDL please refer to Titan UG.
984 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
985 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
986 * @priv_size: Per-Tx descriptor space reserved for driver
987 * usage.
988 * @per_txdl_space: Per txdl private space for the driver
989 * @callback: Fifo completion callback. HW invokes the callback when there
990 * are new completions on that fifo. In many implementations
991 * the @callback executes in the hw interrupt context.
992 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
993 * HW invokes the callback when closing the corresponding fifo.
994 * See also vxge_hw_fifo_txdl_term_f{}.
995 * @stats: Statistics of this fifo
996 *
997 * Fifo channel.
998 * Note: The structure is cache line aligned.
999 */
1000struct __vxge_hw_fifo {
1001 struct __vxge_hw_channel channel;
1002 struct vxge_hw_mempool *mempool;
1003 struct vxge_hw_fifo_config *config;
1004 struct vxge_hw_vpath_reg __iomem *vp_reg;
1005 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
1006 u64 interrupt_type;
1007 u32 no_snoop_bits;
1008 u32 txdl_per_memblock;
1009 u32 txdl_size;
1010 u32 priv_size;
1011 u32 per_txdl_space;
1012 u32 vp_id;
1013 u32 tx_intr_num;
Jon Mason16fded72011-01-18 15:02:21 +00001014 u32 rtimer;
1015 u64 tim_tti_cfg1_saved;
1016 u64 tim_tti_cfg3_saved;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001017
1018 enum vxge_hw_status (*callback)(
1019 struct __vxge_hw_fifo *fifo_handle,
1020 void *txdlh,
1021 enum vxge_hw_fifo_tcode t_code,
1022 void *userdata,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +00001023 struct sk_buff ***skb_ptr,
1024 int nr_skb,
1025 int *more);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001026
1027 void (*txdl_term)(
1028 void *txdlh,
1029 enum vxge_hw_txdl_state state,
1030 void *userdata);
1031
1032 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
1033} ____cacheline_aligned;
1034
1035/*
1036 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
1037 * @dma_addr: DMA (mapped) address of _this_ descriptor.
1038 * @dma_handle: DMA handle used to map the descriptor onto device.
1039 * @dma_offset: Descriptor's offset in the memory block. HW allocates
1040 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1041 * Each memblock is a contiguous block of DMA-able memory.
1042 * @frags: Total number of fragments (that is, contiguous data buffers)
1043 * carried by this TxDL.
1044 * @align_vaddr_start: Aligned virtual address start
1045 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1046 * alignement. Used to place one or more mis-aligned fragments
1047 * @align_dma_addr: DMA address translated from the @align_vaddr.
1048 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1049 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1050 * @align_dma_offset: The current offset into the @align_vaddr area.
1051 * Grows while filling the descriptor, gets reset.
1052 * @align_used_frags: Number of fragments used.
1053 * @alloc_frags: Total number of fragments allocated.
1054 * @unused: TODO
1055 * @next_txdl_priv: (TODO).
1056 * @first_txdp: (TODO).
1057 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1058 * TxDL list.
1059 * @txdlh: Corresponding txdlh to this TxDL.
1060 * @memblock: Pointer to the TxDL memory block or memory page.
1061 * on the next send operation.
1062 * @dma_object: DMA address and handle of the memory block that contains
1063 * the descriptor. This member is used only in the "checked"
1064 * version of the HW (to enforce certain assertions);
1065 * otherwise it gets compiled out.
1066 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1067 *
1068 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1069 * information associated with the descriptor. Note that driver can ask HW
1070 * to allocate additional per-descriptor space for its own (driver-specific)
1071 * purposes.
1072 *
1073 * See also: struct vxge_hw_ring_rxd_priv{}.
1074 */
1075struct __vxge_hw_fifo_txdl_priv {
1076 dma_addr_t dma_addr;
1077 struct pci_dev *dma_handle;
1078 ptrdiff_t dma_offset;
1079 u32 frags;
1080 u8 *align_vaddr_start;
1081 u8 *align_vaddr;
1082 dma_addr_t align_dma_addr;
1083 struct pci_dev *align_dma_handle;
1084 struct pci_dev *align_dma_acch;
1085 ptrdiff_t align_dma_offset;
1086 u32 align_used_frags;
1087 u32 alloc_frags;
1088 u32 unused;
1089 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1090 struct vxge_hw_fifo_txd *first_txdp;
1091 void *memblock;
1092};
1093
1094/*
1095 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1096 * @control_0: Bits 0 to 7 - Doorbell type.
1097 * Bits 8 to 31 - Reserved.
1098 * Bits 32 to 39 - The highest TxD in this TxDL.
1099 * Bits 40 to 47 - Reserved.
1100 * Bits 48 to 55 - Reserved.
1101 * Bits 56 to 63 - No snoop flags.
1102 * @txdl_ptr: The starting location of the TxDL in host memory.
1103 *
1104 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1105 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1106 * part of a doorbell write. Consumed by the adapter but is not written by the
1107 * adapter.
1108 */
1109struct __vxge_hw_non_offload_db_wrapper {
1110 u64 control_0;
1111#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1112#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1113#define VXGE_HW_NODBW_TYPE_NODBW 0
1114
1115#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1116#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1117
1118#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1119#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1120#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1121#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1122
1123 u64 txdl_ptr;
1124};
1125
1126/*
1127 * TX Descriptor
1128 */
1129
1130/**
1131 * struct vxge_hw_fifo_txd - Transmit Descriptor
1132 * @control_0: Bits 0 to 6 - Reserved.
1133 * Bit 7 - List Ownership. This field should be initialized
1134 * to '1' by the driver before the transmit list pointer is
1135 * written to the adapter. This field will be set to '0' by the
1136 * adapter once it has completed transmitting the frame or frames in
1137 * the list. Note - This field is only valid in TxD0. Additionally,
1138 * for multi-list sequences, the driver should not release any
1139 * buffers until the ownership of the last list in the multi-list
1140 * sequence has been returned to the host.
1141 * Bits 8 to 11 - Reserved
1142 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1143 * TxD0. It is used to describe the status of the transmit data
1144 * buffer transfer. This field is always overwritten by the
1145 * adapter, so this field may be initialized to any value.
1146 * Bits 16 to 17 - Host steering. This field allows the host to
1147 * override the selection of the physical transmit port.
1148 * Attention:
1149 * Normal sounds as if learned from the switch rather than from
1150 * the aggregation algorythms.
1151 * 00: Normal. Use Destination/MAC Address
1152 * lookup to determine the transmit port.
1153 * 01: Send on physical Port1.
1154 * 10: Send on physical Port0.
Jon Mason528f7272010-12-10 14:02:56 +00001155 * 11: Send on both ports.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001156 * Bits 18 to 21 - Reserved
1157 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1158 * is used to describe how individual buffers comprise a frame.
1159 * 10: First descriptor of a frame.
1160 * 00: Middle of a multi-descriptor frame.
1161 * 01: Last descriptor of a frame.
1162 * 11: First and last descriptor of a frame (the entire frame
1163 * resides in a single buffer).
1164 * For multi-descriptor frames, the only valid gather code sequence
1165 * is {10, [00], 01}. In other words, the descriptors must be placed
1166 * in the list in the correct order.
1167 * Bits 24 to 27 - Reserved
1168 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1169 * definition. Only valid in TxD0. This field allows the host to
1170 * indicate the Ethernet encapsulation of an outbound LSO packet.
1171 * 00 - classic mode (best guess)
1172 * 01 - LLC
1173 * 10 - SNAP
1174 * 11 - DIX
1175 * If "classic mode" is selected, the adapter will attempt to
1176 * decode the frame's Ethernet encapsulation by examining the L/T
1177 * field as follows:
1178 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1179 * if packet is IPv4 or IPv6.
1180 * 0x8870 Jumbo-SNAP encoding.
1181 * 0x0800 IPv4 DIX encoding
1182 * 0x86DD IPv6 DIX encoding
1183 * others illegal encapsulation
1184 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1185 * Set to 1 to perform segmentation offload for TCP/UDP.
1186 * This field is valid only in TxD0.
1187 * Bits 31 to 33 - Reserved.
1188 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1189 * This field is meaningful only when LSO_Control is non-zero.
1190 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1191 * TCP segment described by this TxDL will be sent as a series of
1192 * TCP segments each of which contains no more than LSO_MSS
1193 * payload bytes.
1194 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1195 * UDP datagram described by this TxDL will be sent as a series of
1196 * UDP datagrams each of which contains no more than LSO_MSS
1197 * payload bytes.
1198 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1199 * or TCP payload, with the exception of the last, which will have
1200 * <= LSO_MSS bytes of payload.
1201 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1202 * buffer to be read by the adapter. This field is written by the
1203 * host. A value of 0 is illegal.
1204 * Bits 32 to 63 - This value is written by the adapter upon
1205 * completion of a UDP or TCP LSO operation and indicates the number
1206 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1207 * returned for any non-LSO operation.
1208 * @control_1: Bits 0 to 4 - Reserved.
1209 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1210 * offload. This field is only valid in the first TxD of a frame.
1211 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1212 * This field is only valid in the first TxD of a frame (the TxD's
1213 * gather code must be 10 or 11). The driver should only set this
1214 * bit if it can guarantee that TCP is present.
1215 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1216 * This field is only valid in the first TxD of a frame (the TxD's
1217 * gather code must be 10 or 11). The driver should only set this
1218 * bit if it can guarantee that UDP is present.
1219 * Bits 8 to 14 - Reserved.
1220 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1221 * instruct the adapter to insert the VLAN tag specified by the
1222 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1223 * a frame.
1224 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1225 * to be inserted into the frame by the adapter (the first two bytes
1226 * of a VLAN tag are always 0x8100). This field is only valid if the
1227 * Tx_VLAN_Enable field is set to '1'.
1228 * Bits 32 to 33 - Reserved.
1229 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1230 * number the frame associated with. This field is written by the
1231 * host. It is only valid in the first TxD of a frame.
1232 * Bits 40 to 42 - Reserved.
1233 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1234 * functions. This field is valid only in the first TxD
1235 * of a frame.
1236 * Bits 44 to 45 - Reserved.
1237 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1238 * generate an interrupt as soon as all of the frames in the list
1239 * have been transmitted. In order to have per-frame interrupts,
1240 * the driver should place a maximum of one frame per list. This
1241 * field is only valid in the first TxD of a frame.
1242 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1243 * to count the frame toward the utilization interrupt specified in
1244 * the Tx_Int_Number field. This field is only valid in the first
1245 * TxD of a frame.
1246 * Bits 48 to 63 - Reserved.
1247 * @buffer_pointer: Buffer start address.
1248 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1249 * Titan descriptor prior to posting the latter on the fifo
1250 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1251 * to the driver with each completed descriptor.
1252 *
1253 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1254 * (list) of TxDs. * For more details please refer to Titan User Guide,
1255 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1256 */
1257struct vxge_hw_fifo_txd {
1258 u64 control_0;
1259#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1260
1261#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1262#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1263#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1264
1265
1266#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1267#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1268#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1269
1270
1271#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1272
1273#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1274
1275#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1276
1277 u64 control_1;
1278#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1279#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1280#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1281#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1282
1283#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1284
1285#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1286
1287#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1288#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1289
1290 u64 buffer_pointer;
1291
1292 u64 host_control;
1293};
1294
1295/**
1296 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1297 * @host_control: This field is exclusively for host use and is "readonly"
1298 * from the adapter's perspective.
1299 * @control_0:Bits 0 to 6 - RTH_Bucket get
1300 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1301 * by the host, and is set to 0 by the adapter.
1302 * 0 - Host owns RxD and buffer.
1303 * 1 - The adapter owns RxD and buffer.
1304 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1305 * received frame meets all of the criteria for fast path processing.
1306 * The required criteria are as follows:
1307 * !SYN &
1308 * (Transfer_Code == "Transfer OK") &
1309 * (!Is_IP_Fragment) &
1310 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1311 * (Is_IPv6)) &
1312 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1313 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1314 * computed _L4_checksum == 0x0000)))
1315 * (same meaning for all RxD buffer modes)
1316 * Bit 9 - L3 Checksum Correct
1317 * Bit 10 - L4 Checksum Correct
1318 * Bit 11 - Reserved
1319 * Bit 12 to 15 - This field is written by the adapter. It is
1320 * used to report the status of the frame transfer to the host.
1321 * 0x0 - Transfer OK
1322 * 0x4 - RDA Failure During Transfer
1323 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1324 * 0x6 - Frame integrity error (FCS or ECC).
1325 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1326 * appropriately sized and data loss occurred.
1327 * 0x8 - Internal ECC Error. RxD corrupted.
1328 * 0x9 - IPv4 Checksum error
1329 * 0xA - TCP/UDP Checksum error
1330 * 0xF - Unknown Error or Multiple Error. Indicates an
1331 * unknown problem or that more than one of transfer codes is set.
1332 * Bit 16 - SYN The adapter sets this field to indicate that
1333 * the incoming frame contained a TCP segment with its SYN bit
1334 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1335 * modes)
1336 * Bit 17 - Is ICMP
1337 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1338 * Socket Pair Direct Match Table and the frame was steered based
1339 * on SPDM.
1340 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1341 * Indirection Table and the frame was steered based on hash
1342 * indirection.
1343 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1344 * type) that was used to calculate the hash.
1345 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1346 * tagged.
1347 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1348 * of the received frame.
1349 * 0x0 - Ethernet DIX
1350 * 0x1 - LLC
1351 * 0x2 - SNAP (includes Jumbo-SNAP)
1352 * 0x3 - IPX
1353 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1354 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1355 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1356 * IP packet.
1357 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1358 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1359 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1360 * arrived with the frame. If the resulting computed IPv4 header
1361 * checksum for the frame did not produce the expected 0xFFFF value,
1362 * then the transfer code would be set to 0x9.
1363 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1364 * arrived with the frame. If the resulting computed TCP/UDP checksum
1365 * for the frame did not produce the expected 0xFFFF value, then the
1366 * transfer code would be set to 0xA.
1367 * @control_1:Bits 0 to 1 - Reserved
1368 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1369 * eventually overwritten by the adapter. The host writes the
1370 * available buffer size in bytes when it passes the descriptor to
1371 * the adapter. When a frame is delivered the host, the adapter
1372 * populates this field with the number of bytes written into the
1373 * buffer. The largest supported buffer is 16, 383 bytes.
1374 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1375 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1376 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1377 * of the VLAN tag, if one was detected by the adapter. This field is
1378 * populated even if VLAN-tag stripping is enabled.
1379 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1380 *
1381 * One buffer mode RxD for ring structure
1382 */
1383struct vxge_hw_ring_rxd_1 {
1384 u64 host_control;
1385 u64 control_0;
1386#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1387
1388#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1389
1390#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1391
1392#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1393
1394#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1395
1396#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1397#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1398
1399#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1400
1401#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1402
1403#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1404
1405#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1406
1407#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1408
1409#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1410
1411#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1412
1413#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1414
1415#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1416
1417#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1418
1419#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1420
1421 u64 control_1;
1422
1423#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1424#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1425#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1426
1427#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1428
1429#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1430
1431 u64 buffer0_ptr;
1432};
1433
1434enum vxge_hw_rth_algoritms {
1435 RTH_ALG_JENKINS = 0,
1436 RTH_ALG_MS_RSS = 1,
1437 RTH_ALG_CRC32C = 2
1438};
1439
1440/**
1441 * struct vxge_hw_rth_hash_types - RTH hash types.
1442 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1443 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1444 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1445 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1446 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1447 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1448 *
1449 * Used to pass RTH hash types to rts_rts_set.
1450 *
1451 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1452 */
1453struct vxge_hw_rth_hash_types {
Jon Mason47f01db2010-11-11 04:25:53 +00001454 u8 hash_type_tcpipv4_en:1,
1455 hash_type_ipv4_en:1,
1456 hash_type_tcpipv6_en:1,
1457 hash_type_ipv6_en:1,
1458 hash_type_tcpipv6ex_en:1,
1459 hash_type_ipv6ex_en:1;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001460};
1461
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001462void vxge_hw_device_debug_set(
1463 struct __vxge_hw_device *devh,
1464 enum vxge_debug_level level,
1465 u32 mask);
1466
1467u32
1468vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1469
1470u32
1471vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1472
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001473/**
1474 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1475 * @buf_mode: Buffer mode (1, 3 or 5)
1476 *
1477 * This function returns the size of RxD for given buffer mode
1478 */
1479static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1480{
1481 return sizeof(struct vxge_hw_ring_rxd_1);
1482}
1483
1484/**
1485 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1486 * @buf_mode: Buffer mode (1 buffer mode only)
1487 *
1488 * This function returns the number of RxD for RxD block for given buffer mode
1489 */
1490static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1491{
1492 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1493 sizeof(struct vxge_hw_ring_rxd_1));
1494}
1495
1496/**
1497 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1498 * @rxdh: Descriptor handle.
1499 * @dma_pointer: DMA address of a single receive buffer this descriptor
1500 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1501 * the receive buffer should be already mapped to the device
1502 * @size: Size of the receive @dma_pointer buffer.
1503 *
1504 * Prepare 1-buffer-mode Rx descriptor for posting
1505 * (via vxge_hw_ring_rxd_post()).
1506 *
1507 * This inline helper-function does not return any parameters and always
1508 * succeeds.
1509 *
1510 */
1511static inline
1512void vxge_hw_ring_rxd_1b_set(
1513 void *rxdh,
1514 dma_addr_t dma_pointer,
1515 u32 size)
1516{
1517 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1518 rxdp->buffer0_ptr = dma_pointer;
1519 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1520 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1521}
1522
1523/**
1524 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1525 * descriptor.
1526 * @vpath_handle: Virtual Path handle.
1527 * @rxdh: Descriptor handle.
1528 * @dma_pointer: DMA address of a single receive buffer this descriptor
1529 * carries. Returned by HW.
1530 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1531 *
1532 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1533 * This inline helper-function uses completed descriptor to populate receive
1534 * buffer pointer and other "out" parameters. The function always succeeds.
1535 *
1536 */
1537static inline
1538void vxge_hw_ring_rxd_1b_get(
1539 struct __vxge_hw_ring *ring_handle,
1540 void *rxdh,
1541 u32 *pkt_length)
1542{
1543 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1544
1545 *pkt_length =
1546 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1547}
1548
1549/**
1550 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1551 * a completed receive descriptor for 1b mode.
1552 * @vpath_handle: Virtual Path handle.
1553 * @rxdh: Descriptor handle.
1554 * @rxd_info: Descriptor information
1555 *
1556 * Retrieve extended information associated with a completed receive descriptor.
1557 *
1558 */
1559static inline
1560void vxge_hw_ring_rxd_1b_info_get(
1561 struct __vxge_hw_ring *ring_handle,
1562 void *rxdh,
1563 struct vxge_hw_ring_rxd_info *rxd_info)
1564{
1565
1566 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1567 rxd_info->syn_flag =
1568 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1569 rxd_info->is_icmp =
1570 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1571 rxd_info->fast_path_eligible =
1572 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1573 rxd_info->l3_cksum_valid =
1574 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1575 rxd_info->l3_cksum =
1576 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1577 rxd_info->l4_cksum_valid =
1578 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1579 rxd_info->l4_cksum =
Joe Perchesa419aef2009-08-18 11:18:35 -07001580 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001581 rxd_info->frame =
1582 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1583 rxd_info->proto =
1584 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1585 rxd_info->is_vlan =
1586 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1587 rxd_info->vlan =
1588 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1589 rxd_info->rth_bucket =
1590 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1591 rxd_info->rth_it_hit =
1592 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1593 rxd_info->rth_spdm_hit =
1594 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1595 rxd_info->rth_hash_type =
1596 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1597 rxd_info->rth_value =
1598 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1599}
1600
1601/**
1602 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1603 * of 1b mode 3b mode ring.
1604 * @rxdh: Descriptor handle.
1605 *
1606 * Returns: private driver info associated with the descriptor.
1607 * driver requests per-descriptor space via vxge_hw_ring_attr.
1608 *
1609 */
1610static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1611{
1612 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1613 return (void *)(size_t)rxdp->host_control;
1614}
1615
1616/**
1617 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1618 * @txdlh: Descriptor handle.
1619 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1620 * and/or TCP and/or UDP.
1621 *
1622 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1623 * descriptor.
1624 * This API is part of the preparation of the transmit descriptor for posting
1625 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1626 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1627 * and vxge_hw_fifo_txdl_buffer_set().
1628 * All these APIs fill in the fields of the fifo descriptor,
1629 * in accordance with the Titan specification.
1630 *
1631 */
1632static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1633{
1634 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1635 txdp->control_1 |= cksum_bits;
1636}
1637
1638/**
1639 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1640 * @txdlh: Descriptor handle.
1641 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1642 * driver, which in turn inserts the MSS into the @txdlh.
1643 *
1644 * This API is part of the preparation of the transmit descriptor for posting
1645 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1646 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1647 * and vxge_hw_fifo_txdl_cksum_set_bits().
1648 * All these APIs fill in the fields of the fifo descriptor,
1649 * in accordance with the Titan specification.
1650 *
1651 */
1652static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1653{
1654 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1655
1656 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1657 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1658}
1659
1660/**
1661 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1662 * @txdlh: Descriptor handle.
1663 * @vlan_tag: 16bit VLAN tag.
1664 *
1665 * Insert VLAN tag into specified transmit descriptor.
1666 * The actual insertion of the tag into outgoing frame is done by the hardware.
1667 */
1668static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1669{
1670 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1671
1672 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1673 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1674}
1675
1676/**
1677 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1678 * @txdlh: Descriptor handle.
1679 *
1680 * Retrieve per-descriptor private data.
1681 * Note that driver requests per-descriptor space via
1682 * struct vxge_hw_fifo_attr passed to
1683 * vxge_hw_vpath_open().
1684 *
1685 * Returns: private driver data associated with the descriptor.
1686 */
1687static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1688{
1689 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1690
1691 return (void *)(size_t)txdp->host_control;
1692}
1693
1694/**
1695 * struct vxge_hw_ring_attr - Ring open "template".
1696 * @callback: Ring completion callback. HW invokes the callback when there
1697 * are new completions on that ring. In many implementations
1698 * the @callback executes in the hw interrupt context.
1699 * @rxd_init: Ring's descriptor-initialize callback.
1700 * See vxge_hw_ring_rxd_init_f{}.
1701 * If not NULL, HW invokes the callback when opening
1702 * the ring.
1703 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1704 * HW invokes the callback when closing the corresponding ring.
1705 * See also vxge_hw_ring_rxd_term_f{}.
1706 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1707 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1708 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1709 * reserved by HW per each receive descriptor.
1710 * Can be used to store
1711 * and retrieve on completion, information specific
1712 * to the driver.
1713 *
1714 * Ring open "template". User fills the structure with ring
1715 * attributes and passes it to vxge_hw_vpath_open().
1716 */
1717struct vxge_hw_ring_attr {
1718 enum vxge_hw_status (*callback)(
1719 struct __vxge_hw_ring *ringh,
1720 void *rxdh,
1721 u8 t_code,
1722 void *userdata);
1723
1724 enum vxge_hw_status (*rxd_init)(
1725 void *rxdh,
1726 void *userdata);
1727
1728 void (*rxd_term)(
1729 void *rxdh,
1730 enum vxge_hw_rxd_state state,
1731 void *userdata);
1732
1733 void *userdata;
1734 u32 per_rxd_space;
1735};
1736
1737/**
1738 * function vxge_hw_fifo_callback_f - FIFO callback.
1739 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1740 * descriptors.
1741 * @txdlh: First completed descriptor.
1742 * @txdl_priv: Pointer to per txdl space allocated
1743 * @t_code: Transfer code, as per Titan User Guide.
1744 * Returned by HW.
1745 * @host_control: Opaque 64bit data stored by driver inside the Titan
1746 * descriptor prior to posting the latter on the fifo
1747 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1748 * as is to the driver with each completed descriptor.
1749 * @userdata: Opaque per-fifo data specified at fifo open
1750 * time, via vxge_hw_vpath_open().
1751 *
1752 * Fifo completion callback (type declaration). A single per-fifo
1753 * callback is specified at fifo open time, via
1754 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1755 * of the Interrupt Service Routine.
1756 *
1757 * Fifo callback gets called by HW if, and only if, there is at least
1758 * one new completion on a given fifo. Upon processing the first @txdlh driver
1759 * is _supposed_ to continue consuming completions using:
1760 * - vxge_hw_fifo_txdl_next_completed()
1761 *
1762 * Note that failure to process new completions in a timely fashion
1763 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1764 *
1765 * Non-zero @t_code means failure to process transmit descriptor.
1766 *
1767 * In the "transmit" case the failure could happen, for instance, when the
1768 * link is down, in which case Titan completes the descriptor because it
1769 * is not able to send the data out.
1770 *
1771 * For details please refer to Titan User Guide.
1772 *
1773 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1774 */
1775/**
1776 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1777 * @txdlh: First completed descriptor.
1778 * @txdl_priv: Pointer to per txdl space allocated
1779 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1780 * @userdata: Per-fifo user data (a.k.a. context) specified at
1781 * fifo open time, via vxge_hw_vpath_open().
1782 *
1783 * Terminate descriptor callback. Unless NULL is specified in the
1784 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1785 * HW invokes the callback as part of closing fifo, prior to
1786 * de-allocating the ring and associated data structures
1787 * (including descriptors).
1788 * driver should utilize the callback to (for instance) unmap
1789 * and free DMA data buffers associated with the posted (state =
1790 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1791 * as well as other relevant cleanup functions.
1792 *
1793 * See also: struct vxge_hw_fifo_attr{}
1794 */
1795/**
1796 * struct vxge_hw_fifo_attr - Fifo open "template".
1797 * @callback: Fifo completion callback. HW invokes the callback when there
1798 * are new completions on that fifo. In many implementations
1799 * the @callback executes in the hw interrupt context.
1800 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1801 * HW invokes the callback when closing the corresponding fifo.
1802 * See also vxge_hw_fifo_txdl_term_f{}.
1803 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1804 * user as one of the @callback, and @txdl_term arguments.
1805 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1806 * reserved by HW per each transmit descriptor. Can be used to
1807 * store, and retrieve on completion, information specific
1808 * to the driver.
1809 *
1810 * Fifo open "template". User fills the structure with fifo
1811 * attributes and passes it to vxge_hw_vpath_open().
1812 */
1813struct vxge_hw_fifo_attr {
1814
1815 enum vxge_hw_status (*callback)(
1816 struct __vxge_hw_fifo *fifo_handle,
1817 void *txdlh,
1818 enum vxge_hw_fifo_tcode t_code,
1819 void *userdata,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +00001820 struct sk_buff ***skb_ptr,
1821 int nr_skb, int *more);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001822
1823 void (*txdl_term)(
1824 void *txdlh,
1825 enum vxge_hw_txdl_state state,
1826 void *userdata);
1827
1828 void *userdata;
1829 u32 per_txdl_space;
1830};
1831
1832/**
1833 * struct vxge_hw_vpath_attr - Attributes of virtual path
1834 * @vp_id: Identifier of Virtual Path
1835 * @ring_attr: Attributes of ring for non-offload receive
1836 * @fifo_attr: Attributes of fifo for non-offload transmit
1837 *
1838 * Attributes of virtual path. This structure is passed as parameter
1839 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1840 */
1841struct vxge_hw_vpath_attr {
1842 u32 vp_id;
1843 struct vxge_hw_ring_attr ring_attr;
1844 struct vxge_hw_fifo_attr fifo_attr;
1845};
1846
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001847enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1848 void __iomem *bar0,
1849 struct vxge_hw_device_hw_info *hw_info);
1850
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001851enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1852 struct vxge_hw_device_config *device_config);
1853
1854/**
1855 * vxge_hw_device_link_state_get - Get link state.
1856 * @devh: HW device handle.
1857 *
1858 * Get link state.
1859 * Returns: link state.
1860 */
1861static inline
1862enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1863 struct __vxge_hw_device *devh)
1864{
1865 return devh->link_state;
1866}
1867
1868void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1869
1870const u8 *
1871vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1872
1873u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1874
1875const u8 *
1876vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1877
1878enum vxge_hw_status __devinit vxge_hw_device_initialize(
1879 struct __vxge_hw_device **devh,
1880 struct vxge_hw_device_attr *attr,
1881 struct vxge_hw_device_config *device_config);
1882
1883enum vxge_hw_status vxge_hw_device_getpause_data(
1884 struct __vxge_hw_device *devh,
1885 u32 port,
1886 u32 *tx,
1887 u32 *rx);
1888
1889enum vxge_hw_status vxge_hw_device_setpause_data(
1890 struct __vxge_hw_device *devh,
1891 u32 port,
1892 u32 tx,
1893 u32 rx);
1894
1895static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1896 unsigned long size,
1897 struct pci_dev **p_dmah,
1898 struct pci_dev **p_dma_acch)
1899{
1900 gfp_t flags;
1901 void *vaddr;
1902 unsigned long misaligned = 0;
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001903 int realloc_flag = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001904 *p_dma_acch = *p_dmah = NULL;
1905
1906 if (in_interrupt())
1907 flags = GFP_ATOMIC | GFP_DMA;
1908 else
1909 flags = GFP_KERNEL | GFP_DMA;
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001910realloc:
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001911 vaddr = kmalloc((size), flags);
1912 if (vaddr == NULL)
1913 return vaddr;
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001914 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001915 VXGE_CACHE_LINE_SIZE);
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001916 if (realloc_flag)
1917 goto out;
1918
1919 if (misaligned) {
1920 /* misaligned, free current one and try allocating
1921 * size + VXGE_CACHE_LINE_SIZE memory
1922 */
1923 kfree((void *) vaddr);
1924 size += VXGE_CACHE_LINE_SIZE;
1925 realloc_flag = 1;
1926 goto realloc;
1927 }
1928out:
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001929 *(unsigned long *)p_dma_acch = misaligned;
1930 vaddr = (void *)((u8 *)vaddr + misaligned);
1931 return vaddr;
1932}
1933
Jon Mason528f7272010-12-10 14:02:56 +00001934static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1935 struct pci_dev **p_dma_acch)
1936{
1937 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1938 u8 *tmp = (u8 *)vaddr;
1939 tmp -= misaligned;
1940 kfree((void *)tmp);
1941}
1942
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001943/*
1944 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1945 */
1946static inline void*
1947__vxge_hw_mempool_item_priv(
1948 struct vxge_hw_mempool *mempool,
1949 u32 memblock_idx,
1950 void *item,
1951 u32 *memblock_item_idx)
1952{
1953 ptrdiff_t offset;
1954 void *memblock = mempool->memblocks_arr[memblock_idx];
1955
1956
1957 offset = (u32)((u8 *)item - (u8 *)memblock);
1958 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1959
1960 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1961 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1962
1963 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1964 (*memblock_item_idx) * mempool->items_priv_size;
1965}
1966
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001967/*
1968 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1969 * for the fifo.
1970 * @fifo: Fifo
1971 * @txdp: Poniter to a TxD
1972 */
1973static inline struct __vxge_hw_fifo_txdl_priv *
1974__vxge_hw_fifo_txdl_priv(
1975 struct __vxge_hw_fifo *fifo,
1976 struct vxge_hw_fifo_txd *txdp)
1977{
1978 return (struct __vxge_hw_fifo_txdl_priv *)
1979 (((char *)((ulong)txdp->host_control)) +
1980 fifo->per_txdl_space);
1981}
1982
1983enum vxge_hw_status vxge_hw_vpath_open(
1984 struct __vxge_hw_device *devh,
1985 struct vxge_hw_vpath_attr *attr,
1986 struct __vxge_hw_vpath_handle **vpath_handle);
1987
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001988enum vxge_hw_status vxge_hw_vpath_close(
1989 struct __vxge_hw_vpath_handle *vpath_handle);
1990
1991enum vxge_hw_status
1992vxge_hw_vpath_reset(
1993 struct __vxge_hw_vpath_handle *vpath_handle);
1994
1995enum vxge_hw_status
1996vxge_hw_vpath_recover_from_reset(
1997 struct __vxge_hw_vpath_handle *vpath_handle);
1998
1999void
2000vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
2001
2002enum vxge_hw_status
2003vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
2004
2005enum vxge_hw_status vxge_hw_vpath_mtu_set(
2006 struct __vxge_hw_vpath_handle *vpath_handle,
2007 u32 new_mtu);
2008
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002009void
2010vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2011
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002012#ifndef readq
2013static inline u64 readq(void __iomem *addr)
2014{
2015 u64 ret = 0;
2016 ret = readl(addr + 4);
2017 ret <<= 32;
2018 ret |= readl(addr);
2019
2020 return ret;
2021}
2022#endif
2023
2024#ifndef writeq
2025static inline void writeq(u64 val, void __iomem *addr)
2026{
2027 writel((u32) (val), addr);
2028 writel((u32) (val >> 32), (addr + 4));
2029}
2030#endif
2031
2032static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2033{
2034 writel(val, addr + 4);
2035}
2036
2037static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2038{
2039 writel(val, addr);
2040}
2041
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002042enum vxge_hw_status
2043vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2044
2045enum vxge_hw_status
Sreenivasa Honnurfa41fd12009-10-05 01:56:35 +00002046vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
stephen hemminger42821a52010-10-21 07:50:53 +00002047
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002048/**
Jon Masonddd62722010-11-11 04:25:55 +00002049 * vxge_debug_ll
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002050 * @level: level of debug verbosity.
2051 * @mask: mask for the debug
2052 * @buf: Circular buffer for tracing
2053 * @fmt: printf like format string
2054 *
2055 * Provides logging facilities. Can be customized on per-module
2056 * basis or/and with debug levels. Input parameters, except
2057 * module and level, are the same as posix printf. This function
2058 * may be compiled out if DEBUG macro was never defined.
2059 * See also: enum vxge_debug_level{}.
2060 */
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002061#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
Jon Masonddd62722010-11-11 04:25:55 +00002062#define vxge_debug_ll(level, mask, fmt, ...) do { \
2063 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2064 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2065 if ((mask & VXGE_DEBUG_MASK) == mask) \
2066 printk(fmt "\n", __VA_ARGS__); \
2067} while (0)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002068#else
2069#define vxge_debug_ll(level, mask, fmt, ...)
2070#endif
2071
2072enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2073 struct __vxge_hw_vpath_handle **vpath_handles,
2074 u32 vpath_count,
2075 u8 *mtable,
2076 u8 *itable,
2077 u32 itable_size);
2078
2079enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2080 struct __vxge_hw_vpath_handle *vpath_handle,
2081 enum vxge_hw_rth_algoritms algorithm,
2082 struct vxge_hw_rth_hash_types *hash_type,
2083 u16 bucket_size);
2084
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07002085enum vxge_hw_status
2086__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
Jon Mason4d2a5b42010-11-11 04:25:54 +00002087
2088#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2089#define VXGE_HW_MAX_POLLING_COUNT 100
2090
Jon Masone8ac1752010-11-11 04:25:57 +00002091void
2092vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
Jon Mason4d2a5b42010-11-11 04:25:54 +00002093
Jon Masone8ac1752010-11-11 04:25:57 +00002094enum vxge_hw_status
2095vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2096 u32 *minor, u32 *build);
2097
2098enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2099
2100enum vxge_hw_status
2101vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2102 int size);
2103
2104enum vxge_hw_status
2105vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2106 struct eprom_image *eprom_image_data);
2107
2108int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002109#endif