blob: 7761b9e0ad84d2002344172e3745880bcb920120 [file] [log] [blame]
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
Jon Mason926bd902010-07-15 08:47:26 +000010 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000011 * Virtualized Server Adapter.
Jon Mason926bd902010-07-15 08:47:26 +000012 * Copyright(c) 2002-2010 Exar Corp.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000013 ******************************************************************************/
14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000019
20#include "vxge-traffic.h"
21#include "vxge-config.h"
Jon Mason8424e002010-11-11 04:25:56 +000022#include "vxge-main.h"
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000023
stephen hemminger42821a52010-10-21 07:50:53 +000024static enum vxge_hw_status
25__vxge_hw_fifo_create(
26 struct __vxge_hw_vpath_handle *vpath_handle,
27 struct vxge_hw_fifo_attr *attr);
28
29static enum vxge_hw_status
30__vxge_hw_fifo_abort(
31 struct __vxge_hw_fifo *fifoh);
32
33static enum vxge_hw_status
34__vxge_hw_fifo_reset(
35 struct __vxge_hw_fifo *ringh);
36
37static enum vxge_hw_status
38__vxge_hw_fifo_delete(
39 struct __vxge_hw_vpath_handle *vpath_handle);
40
41static struct __vxge_hw_blockpool_entry *
42__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
43 u32 size);
44
45static void
46__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
47 struct __vxge_hw_blockpool_entry *entry);
48
49static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
50 void *block_addr,
51 u32 length,
52 struct pci_dev *dma_h,
53 struct pci_dev *acc_handle);
54
55static enum vxge_hw_status
56__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
57 struct __vxge_hw_blockpool *blockpool,
58 u32 pool_size,
59 u32 pool_max);
60
61static void
62__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
63
64static void *
65__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
66 u32 size,
67 struct vxge_hw_mempool_dma *dma_object);
68
69static void
70__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
71 void *memblock,
72 u32 size,
73 struct vxge_hw_mempool_dma *dma_object);
74
75
76static struct __vxge_hw_channel*
77__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
78 enum __vxge_hw_channel_type type, u32 length,
79 u32 per_dtr_space, void *userdata);
80
81static void
82__vxge_hw_channel_free(
83 struct __vxge_hw_channel *channel);
84
85static enum vxge_hw_status
86__vxge_hw_channel_initialize(
87 struct __vxge_hw_channel *channel);
88
89static enum vxge_hw_status
90__vxge_hw_channel_reset(
91 struct __vxge_hw_channel *channel);
92
93static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
94
95static enum vxge_hw_status
96__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
97
98static enum vxge_hw_status
99__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
100
101static void
102__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
103
104static void
105__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
106
107static enum vxge_hw_status
stephen hemminger42821a52010-10-21 07:50:53 +0000108__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
109
110static void
111__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
112
113static enum vxge_hw_status
114__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
115
116static enum vxge_hw_status
117__vxge_hw_device_register_poll(
118 void __iomem *reg,
119 u64 mask, u32 max_millis);
120
121static inline enum vxge_hw_status
122__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
123 u64 mask, u32 max_millis)
124{
125 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
126 wmb();
127
128 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
129 wmb();
130
131 return __vxge_hw_device_register_poll(addr, mask, max_millis);
132}
133
134static struct vxge_hw_mempool*
135__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
136 u32 item_size, u32 private_size, u32 items_initial,
137 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
138 void *userdata);
139static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
140
141static enum vxge_hw_status
142__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
143 struct vxge_hw_vpath_stats_hw_info *hw_stats);
144
145static enum vxge_hw_status
146vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
147
148static enum vxge_hw_status
149__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
150
stephen hemminger42821a52010-10-21 07:50:53 +0000151static enum vxge_hw_status
152__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
153
154
155static enum vxge_hw_status
156__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
157
stephen hemminger42821a52010-10-21 07:50:53 +0000158static enum vxge_hw_status
159__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
160
161static void
162__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
163
164static enum vxge_hw_status
165__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
166 u32 operation, u32 offset, u64 *stat);
167
168static enum vxge_hw_status
169__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
170 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
171
172static enum vxge_hw_status
173__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
174 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
175
Jon Mason4d2a5b42010-11-11 04:25:54 +0000176static void
177vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
178{
179 u64 val64;
180
181 val64 = readq(&vp_reg->rxmac_vcfg0);
182 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
183 writeq(val64, &vp_reg->rxmac_vcfg0);
184 val64 = readq(&vp_reg->rxmac_vcfg0);
185
186 return;
187}
188
189/*
190 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
191 */
192int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
193{
194 struct vxge_hw_vpath_reg __iomem *vp_reg;
195 struct __vxge_hw_virtualpath *vpath;
196 u64 val64, rxd_count, rxd_spat;
197 int count = 0, total_count = 0;
198
199 vpath = &hldev->virtual_paths[vp_id];
200 vp_reg = vpath->vp_reg;
201
202 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
203
204 /* Check that the ring controller for this vpath has enough free RxDs
205 * to send frames to the host. This is done by reading the
206 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
207 * RXD_SPAT value for the vpath.
208 */
209 val64 = readq(&vp_reg->prc_cfg6);
210 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
211 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
212 * leg room.
213 */
214 rxd_spat *= 2;
215
216 do {
217 mdelay(1);
218
219 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
220
221 /* Check that the ring controller for this vpath does
222 * not have any frame in its pipeline.
223 */
224 val64 = readq(&vp_reg->frm_in_progress_cnt);
225 if ((rxd_count <= rxd_spat) || (val64 > 0))
226 count = 0;
227 else
228 count++;
229 total_count++;
230 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
231 (total_count < VXGE_HW_MAX_POLLING_COUNT));
232
233 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
234 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
235 __func__);
236
237 return total_count;
238}
239
240/* vxge_hw_device_wait_receive_idle - This function waits until all frames
241 * stored in the frame buffer for each vpath assigned to the given
242 * function (hldev) have been sent to the host.
243 */
244void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
245{
246 int i, total_count = 0;
247
248 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
249 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
250 continue;
251
252 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
253 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
254 break;
255 }
256}
257
Jon Mason8424e002010-11-11 04:25:56 +0000258static enum vxge_hw_status
259vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
260 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
261 u64 *steer_ctrl)
262{
263 struct vxge_hw_vpath_reg __iomem *vp_reg;
264 enum vxge_hw_status status;
265 u64 val64;
266 u32 retry = 0, max_retry = 100;
267
268 vp_reg = vpath->vp_reg;
269
270 if (vpath->vp_open) {
271 max_retry = 3;
272 spin_lock(&vpath->lock);
273 }
274
275 writeq(*data0, &vp_reg->rts_access_steer_data0);
276 writeq(*data1, &vp_reg->rts_access_steer_data1);
277 wmb();
278
279 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
280 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
281 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
282 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
283 *steer_ctrl;
284
285 status = __vxge_hw_pio_mem_write64(val64,
286 &vp_reg->rts_access_steer_ctrl,
287 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
288 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
289
290 /* The __vxge_hw_device_register_poll can udelay for a significant
291 * amount of time, blocking other proccess from the CPU. If it delays
292 * for ~5secs, a NMI error can occur. A way around this is to give up
293 * the processor via msleep, but this is not allowed is under lock.
294 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
295 * 1sec and sleep for 10ms until the firmware operation has completed
296 * or timed-out.
297 */
298 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
299 if (!vpath->vp_open)
300 msleep(20);
301 status = __vxge_hw_device_register_poll(
302 &vp_reg->rts_access_steer_ctrl,
303 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
304 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
305 }
306
307 if (status != VXGE_HW_OK)
308 goto out;
309
310 val64 = readq(&vp_reg->rts_access_steer_ctrl);
311 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
312 *data0 = readq(&vp_reg->rts_access_steer_data0);
313 *data1 = readq(&vp_reg->rts_access_steer_data1);
314 *steer_ctrl = val64;
315 } else
316 status = VXGE_HW_FAIL;
317
318out:
319 if (vpath->vp_open)
320 spin_unlock(&vpath->lock);
321 return status;
322}
323
Jon Masone8ac1752010-11-11 04:25:57 +0000324enum vxge_hw_status
325vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
326 u32 *minor, u32 *build)
327{
328 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
329 struct __vxge_hw_virtualpath *vpath;
330 enum vxge_hw_status status;
331
332 vpath = &hldev->virtual_paths[hldev->first_vp_id];
333
334 status = vxge_hw_vpath_fw_api(vpath,
335 VXGE_HW_FW_UPGRADE_ACTION,
336 VXGE_HW_FW_UPGRADE_MEMO,
337 VXGE_HW_FW_UPGRADE_OFFSET_READ,
338 &data0, &data1, &steer_ctrl);
339 if (status != VXGE_HW_OK)
340 return status;
341
342 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
343 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
344 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
345
346 return status;
347}
348
349enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
350{
351 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
352 struct __vxge_hw_virtualpath *vpath;
353 enum vxge_hw_status status;
354 u32 ret;
355
356 vpath = &hldev->virtual_paths[hldev->first_vp_id];
357
358 status = vxge_hw_vpath_fw_api(vpath,
359 VXGE_HW_FW_UPGRADE_ACTION,
360 VXGE_HW_FW_UPGRADE_MEMO,
361 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
362 &data0, &data1, &steer_ctrl);
363 if (status != VXGE_HW_OK) {
364 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
365 goto exit;
366 }
367
368 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
369 if (ret != 1) {
370 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
371 __func__, ret);
372 status = VXGE_HW_FAIL;
373 }
374
375exit:
376 return status;
377}
378
379enum vxge_hw_status
380vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
381{
382 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
383 struct __vxge_hw_virtualpath *vpath;
384 enum vxge_hw_status status;
385 int ret_code, sec_code;
386
387 vpath = &hldev->virtual_paths[hldev->first_vp_id];
388
389 /* send upgrade start command */
390 status = vxge_hw_vpath_fw_api(vpath,
391 VXGE_HW_FW_UPGRADE_ACTION,
392 VXGE_HW_FW_UPGRADE_MEMO,
393 VXGE_HW_FW_UPGRADE_OFFSET_START,
394 &data0, &data1, &steer_ctrl);
395 if (status != VXGE_HW_OK) {
396 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
397 __func__);
398 return status;
399 }
400
401 /* Transfer fw image to adapter 16 bytes at a time */
402 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
403 steer_ctrl = 0;
404
405 /* The next 128bits of fwdata to be loaded onto the adapter */
406 data0 = *((u64 *)fwdata);
407 data1 = *((u64 *)fwdata + 1);
408
409 status = vxge_hw_vpath_fw_api(vpath,
410 VXGE_HW_FW_UPGRADE_ACTION,
411 VXGE_HW_FW_UPGRADE_MEMO,
412 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
413 &data0, &data1, &steer_ctrl);
414 if (status != VXGE_HW_OK) {
415 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
416 __func__);
417 goto out;
418 }
419
420 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
421 switch (ret_code) {
422 case VXGE_HW_FW_UPGRADE_OK:
423 /* All OK, send next 16 bytes. */
424 break;
425 case VXGE_FW_UPGRADE_BYTES2SKIP:
426 /* skip bytes in the stream */
427 fwdata += (data0 >> 8) & 0xFFFFFFFF;
428 break;
429 case VXGE_HW_FW_UPGRADE_DONE:
430 goto out;
431 case VXGE_HW_FW_UPGRADE_ERR:
432 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
433 switch (sec_code) {
434 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
435 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
436 printk(KERN_ERR
437 "corrupted data from .ncf file\n");
438 break;
439 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
440 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
441 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
442 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
443 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
444 printk(KERN_ERR "invalid .ncf file\n");
445 break;
446 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
447 printk(KERN_ERR "buffer overflow\n");
448 break;
449 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
450 printk(KERN_ERR "failed to flash the image\n");
451 break;
452 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
453 printk(KERN_ERR
454 "generic error. Unknown error type\n");
455 break;
456 default:
457 printk(KERN_ERR "Unknown error of type %d\n",
458 sec_code);
459 break;
460 }
461 status = VXGE_HW_FAIL;
462 goto out;
463 default:
464 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
465 status = VXGE_HW_FAIL;
466 goto out;
467 }
468 /* point to next 16 bytes */
469 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
470 }
471out:
472 return status;
473}
474
475enum vxge_hw_status
476vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
477 struct eprom_image *img)
478{
479 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
480 struct __vxge_hw_virtualpath *vpath;
481 enum vxge_hw_status status;
482 int i;
483
484 vpath = &hldev->virtual_paths[hldev->first_vp_id];
485
486 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
487 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
488 data1 = steer_ctrl = 0;
489
490 status = vxge_hw_vpath_fw_api(vpath,
491 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
492 VXGE_HW_FW_API_GET_EPROM_REV,
493 0, &data0, &data1, &steer_ctrl);
494 if (status != VXGE_HW_OK)
495 break;
496
497 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
498 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
499 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
500 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
501 }
502
503 return status;
504}
505
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000506/*
507 * __vxge_hw_channel_allocate - Allocate memory for channel
508 * This function allocates required memory for the channel and various arrays
509 * in the channel
510 */
511struct __vxge_hw_channel*
512__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
513 enum __vxge_hw_channel_type type,
514 u32 length, u32 per_dtr_space, void *userdata)
515{
516 struct __vxge_hw_channel *channel;
517 struct __vxge_hw_device *hldev;
518 int size = 0;
519 u32 vp_id;
520
521 hldev = vph->vpath->hldev;
522 vp_id = vph->vpath->vp_id;
523
524 switch (type) {
525 case VXGE_HW_CHANNEL_TYPE_FIFO:
526 size = sizeof(struct __vxge_hw_fifo);
527 break;
528 case VXGE_HW_CHANNEL_TYPE_RING:
529 size = sizeof(struct __vxge_hw_ring);
530 break;
531 default:
532 break;
533 }
534
535 channel = kzalloc(size, GFP_KERNEL);
536 if (channel == NULL)
537 goto exit0;
538 INIT_LIST_HEAD(&channel->item);
539
540 channel->common_reg = hldev->common_reg;
541 channel->first_vp_id = hldev->first_vp_id;
542 channel->type = type;
543 channel->devh = hldev;
544 channel->vph = vph;
545 channel->userdata = userdata;
546 channel->per_dtr_space = per_dtr_space;
547 channel->length = length;
548 channel->vp_id = vp_id;
549
550 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
551 if (channel->work_arr == NULL)
552 goto exit1;
553
554 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
555 if (channel->free_arr == NULL)
556 goto exit1;
557 channel->free_ptr = length;
558
559 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
560 if (channel->reserve_arr == NULL)
561 goto exit1;
562 channel->reserve_ptr = length;
563 channel->reserve_top = 0;
564
565 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
566 if (channel->orig_arr == NULL)
567 goto exit1;
568
569 return channel;
570exit1:
571 __vxge_hw_channel_free(channel);
572
573exit0:
574 return NULL;
575}
576
577/*
578 * __vxge_hw_channel_free - Free memory allocated for channel
579 * This function deallocates memory from the channel and various arrays
580 * in the channel
581 */
582void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
583{
584 kfree(channel->work_arr);
585 kfree(channel->free_arr);
586 kfree(channel->reserve_arr);
587 kfree(channel->orig_arr);
588 kfree(channel);
589}
590
591/*
592 * __vxge_hw_channel_initialize - Initialize a channel
593 * This function initializes a channel by properly setting the
594 * various references
595 */
596enum vxge_hw_status
597__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
598{
599 u32 i;
600 struct __vxge_hw_virtualpath *vpath;
601
602 vpath = channel->vph->vpath;
603
604 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
605 for (i = 0; i < channel->length; i++)
606 channel->orig_arr[i] = channel->reserve_arr[i];
607 }
608
609 switch (channel->type) {
610 case VXGE_HW_CHANNEL_TYPE_FIFO:
611 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
612 channel->stats = &((struct __vxge_hw_fifo *)
613 channel)->stats->common_stats;
614 break;
615 case VXGE_HW_CHANNEL_TYPE_RING:
616 vpath->ringh = (struct __vxge_hw_ring *)channel;
617 channel->stats = &((struct __vxge_hw_ring *)
618 channel)->stats->common_stats;
619 break;
620 default:
621 break;
622 }
623
624 return VXGE_HW_OK;
625}
626
627/*
628 * __vxge_hw_channel_reset - Resets a channel
629 * This function resets a channel by properly setting the various references
630 */
631enum vxge_hw_status
632__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
633{
634 u32 i;
635
636 for (i = 0; i < channel->length; i++) {
637 if (channel->reserve_arr != NULL)
638 channel->reserve_arr[i] = channel->orig_arr[i];
639 if (channel->free_arr != NULL)
640 channel->free_arr[i] = NULL;
641 if (channel->work_arr != NULL)
642 channel->work_arr[i] = NULL;
643 }
644 channel->free_ptr = channel->length;
645 channel->reserve_ptr = channel->length;
646 channel->reserve_top = 0;
647 channel->post_index = 0;
648 channel->compl_index = 0;
649
650 return VXGE_HW_OK;
651}
652
653/*
654 * __vxge_hw_device_pci_e_init
655 * Initialize certain PCI/PCI-X configuration registers
656 * with recommended values. Save config space for future hw resets.
657 */
658void
659__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
660{
661 u16 cmd = 0;
662
663 /* Set the PErr Repconse bit and SERR in PCI command register. */
664 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
665 cmd |= 0x140;
666 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
667
668 pci_save_state(hldev->pdev);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000669}
670
671/*
672 * __vxge_hw_device_register_poll
673 * Will poll certain register for specified amount of time.
674 * Will poll until masked bit is not cleared.
675 */
stephen hemminger42821a52010-10-21 07:50:53 +0000676static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000677__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
678{
679 u64 val64;
680 u32 i = 0;
681 enum vxge_hw_status ret = VXGE_HW_FAIL;
682
683 udelay(10);
684
685 do {
686 val64 = readq(reg);
687 if (!(val64 & mask))
688 return VXGE_HW_OK;
689 udelay(100);
690 } while (++i <= 9);
691
692 i = 0;
693 do {
694 val64 = readq(reg);
695 if (!(val64 & mask))
696 return VXGE_HW_OK;
697 mdelay(1);
698 } while (++i <= max_millis);
699
700 return ret;
701}
702
Jon Mason4d2a5b42010-11-11 04:25:54 +0000703/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000704 * in progress
705 * This routine checks the vpath reset in progress register is turned zero
706 */
stephen hemminger42821a52010-10-21 07:50:53 +0000707static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000708__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
709{
710 enum vxge_hw_status status;
711 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
712 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
713 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
714 return status;
715}
716
717/*
718 * __vxge_hw_device_toc_get
719 * This routine sets the swapper and reads the toc pointer and returns the
720 * memory mapped address of the toc
721 */
stephen hemminger42821a52010-10-21 07:50:53 +0000722static struct vxge_hw_toc_reg __iomem *
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000723__vxge_hw_device_toc_get(void __iomem *bar0)
724{
725 u64 val64;
726 struct vxge_hw_toc_reg __iomem *toc = NULL;
727 enum vxge_hw_status status;
728
729 struct vxge_hw_legacy_reg __iomem *legacy_reg =
730 (struct vxge_hw_legacy_reg __iomem *)bar0;
731
732 status = __vxge_hw_legacy_swapper_set(legacy_reg);
733 if (status != VXGE_HW_OK)
734 goto exit;
735
736 val64 = readq(&legacy_reg->toc_first_pointer);
737 toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
738exit:
739 return toc;
740}
741
742/*
743 * __vxge_hw_device_reg_addr_get
744 * This routine sets the swapper and reads the toc pointer and initializes the
745 * register location pointers in the device object. It waits until the ric is
746 * completed initializing registers.
747 */
748enum vxge_hw_status
749__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
750{
751 u64 val64;
752 u32 i;
753 enum vxge_hw_status status = VXGE_HW_OK;
754
755 hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
756
757 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
758 if (hldev->toc_reg == NULL) {
759 status = VXGE_HW_FAIL;
760 goto exit;
761 }
762
763 val64 = readq(&hldev->toc_reg->toc_common_pointer);
764 hldev->common_reg =
765 (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
766
767 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
768 hldev->mrpcim_reg =
769 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
770
771 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
772 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
773 hldev->srpcim_reg[i] =
774 (struct vxge_hw_srpcim_reg __iomem *)
775 (hldev->bar0 + val64);
776 }
777
778 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
779 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
780 hldev->vpmgmt_reg[i] =
781 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
782 }
783
784 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
785 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
786 hldev->vpath_reg[i] =
787 (struct vxge_hw_vpath_reg __iomem *)
788 (hldev->bar0 + val64);
789 }
790
791 val64 = readq(&hldev->toc_reg->toc_kdfc);
792
793 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
794 case 0:
795 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
796 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
797 break;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000798 default:
799 break;
800 }
801
802 status = __vxge_hw_device_vpath_reset_in_prog_check(
803 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
804exit:
805 return status;
806}
807
808/*
809 * __vxge_hw_device_id_get
810 * This routine returns sets the device id and revision numbers into the device
811 * structure
812 */
813void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
814{
815 u64 val64;
816
817 val64 = readq(&hldev->common_reg->titan_asic_id);
818 hldev->device_id =
819 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
820
821 hldev->major_revision =
822 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
823
824 hldev->minor_revision =
825 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000826}
827
828/*
829 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
830 * This routine returns the Access Rights of the driver
831 */
832static u32
833__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
834{
835 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
836
837 switch (host_type) {
838 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
Sreenivasa Honnur1dc47a92010-03-28 22:12:33 +0000839 if (func_id == 0) {
840 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
841 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
842 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000843 break;
844 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
845 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
846 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
847 break;
848 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
849 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
850 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
851 break;
852 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
853 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
854 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
855 break;
856 case VXGE_HW_SR_VH_FUNCTION0:
857 case VXGE_HW_VH_NORMAL_FUNCTION:
858 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
859 break;
860 }
861
862 return access_rights;
863}
864/*
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000865 * __vxge_hw_device_is_privilaged
866 * This routine checks if the device function is privilaged or not
867 */
868
869enum vxge_hw_status
870__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
871{
872 if (__vxge_hw_device_access_rights_get(host_type,
873 func_id) &
874 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
875 return VXGE_HW_OK;
876 else
877 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
878}
879
880/*
Jon Mason8424e002010-11-11 04:25:56 +0000881 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
882 * Returns the function number of the vpath.
883 */
884static u32
885__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
886{
887 u64 val64;
888
889 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
890
891 return
892 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
893}
894
895/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000896 * __vxge_hw_device_host_info_get
897 * This routine returns the host type assignments
898 */
Jon Mason8424e002010-11-11 04:25:56 +0000899static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000900{
901 u64 val64;
902 u32 i;
903
904 val64 = readq(&hldev->common_reg->host_type_assignments);
905
906 hldev->host_type =
907 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
908
909 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
910
911 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000912 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
913 continue;
914
915 hldev->func_id =
Jon Mason8424e002010-11-11 04:25:56 +0000916 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000917
918 hldev->access_rights = __vxge_hw_device_access_rights_get(
919 hldev->host_type, hldev->func_id);
920
Jon Mason8424e002010-11-11 04:25:56 +0000921 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
922 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
923
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000924 hldev->first_vp_id = i;
925 break;
926 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000927}
928
929/*
930 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
931 * link width and signalling rate.
932 */
933static enum vxge_hw_status
934__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
935{
936 int exp_cap;
937 u16 lnk;
938
939 /* Get the negotiated link width and speed from PCI config space */
940 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
941 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
942
943 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
944 return VXGE_HW_ERR_INVALID_PCI_INFO;
945
946 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
947 case PCIE_LNK_WIDTH_RESRV:
948 case PCIE_LNK_X1:
949 case PCIE_LNK_X2:
950 case PCIE_LNK_X4:
951 case PCIE_LNK_X8:
952 break;
953 default:
954 return VXGE_HW_ERR_INVALID_PCI_INFO;
955 }
956
957 return VXGE_HW_OK;
958}
959
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000960/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000961 * __vxge_hw_device_initialize
962 * Initialize Titan-V hardware.
963 */
964enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
965{
966 enum vxge_hw_status status = VXGE_HW_OK;
967
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000968 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
969 hldev->func_id)) {
Sivakumar Subramani5dbc9012009-06-16 18:48:55 +0000970 /* Validate the pci-e link width and speed */
971 status = __vxge_hw_verify_pci_e_info(hldev);
972 if (status != VXGE_HW_OK)
973 goto exit;
974 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000975
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000976exit:
977 return status;
978}
979
Jon Mason8424e002010-11-11 04:25:56 +0000980/*
981 * __vxge_hw_vpath_fw_ver_get - Get the fw version
982 * Returns FW Version
983 */
984static enum vxge_hw_status
985__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
986 struct vxge_hw_device_hw_info *hw_info)
987{
988 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
989 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
990 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
991 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
992 u64 data0, data1 = 0, steer_ctrl = 0;
993 enum vxge_hw_status status;
994
995 status = vxge_hw_vpath_fw_api(vpath,
996 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
997 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
998 0, &data0, &data1, &steer_ctrl);
999 if (status != VXGE_HW_OK)
1000 goto exit;
1001
1002 fw_date->day =
1003 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
1004 fw_date->month =
1005 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
1006 fw_date->year =
1007 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
1008
1009 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
1010 fw_date->month, fw_date->day, fw_date->year);
1011
1012 fw_version->major =
1013 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
1014 fw_version->minor =
1015 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
1016 fw_version->build =
1017 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
1018
1019 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
1020 fw_version->major, fw_version->minor, fw_version->build);
1021
1022 flash_date->day =
1023 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
1024 flash_date->month =
1025 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
1026 flash_date->year =
1027 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
1028
1029 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
1030 flash_date->month, flash_date->day, flash_date->year);
1031
1032 flash_version->major =
1033 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
1034 flash_version->minor =
1035 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
1036 flash_version->build =
1037 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
1038
1039 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
1040 flash_version->major, flash_version->minor,
1041 flash_version->build);
1042
1043exit:
1044 return status;
1045}
1046
1047/*
1048 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
1049 * part number and product description.
1050 */
1051static enum vxge_hw_status
1052__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
1053 struct vxge_hw_device_hw_info *hw_info)
1054{
1055 enum vxge_hw_status status;
1056 u64 data0, data1 = 0, steer_ctrl = 0;
1057 u8 *serial_number = hw_info->serial_number;
1058 u8 *part_number = hw_info->part_number;
1059 u8 *product_desc = hw_info->product_desc;
1060 u32 i, j = 0;
1061
1062 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
1063
1064 status = vxge_hw_vpath_fw_api(vpath,
1065 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1066 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1067 0, &data0, &data1, &steer_ctrl);
1068 if (status != VXGE_HW_OK)
1069 return status;
1070
1071 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
1072 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
1073
1074 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
1075 data1 = steer_ctrl = 0;
1076
1077 status = vxge_hw_vpath_fw_api(vpath,
1078 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1079 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1080 0, &data0, &data1, &steer_ctrl);
1081 if (status != VXGE_HW_OK)
1082 return status;
1083
1084 ((u64 *)part_number)[0] = be64_to_cpu(data0);
1085 ((u64 *)part_number)[1] = be64_to_cpu(data1);
1086
1087 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
1088 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
1089 data0 = i;
1090 data1 = steer_ctrl = 0;
1091
1092 status = vxge_hw_vpath_fw_api(vpath,
1093 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1094 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1095 0, &data0, &data1, &steer_ctrl);
1096 if (status != VXGE_HW_OK)
1097 return status;
1098
1099 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
1100 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
1101 }
1102
1103 return status;
1104}
1105
1106/*
1107 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
1108 * Returns pci function mode
1109 */
1110static u64
1111__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath)
1112{
1113 u64 data0, data1 = 0, steer_ctrl = 0;
1114 enum vxge_hw_status status;
1115
1116 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE;
1117
1118 status = vxge_hw_vpath_fw_api(vpath,
1119 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1120 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1121 0, &data0, &data1, &steer_ctrl);
1122
1123 return data0;
1124}
1125
1126/*
1127 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
1128 * from MAC address table.
1129 */
1130static enum vxge_hw_status
1131__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
1132 u8 *macaddr, u8 *macaddr_mask)
1133{
1134 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1135 data0 = 0, data1 = 0, steer_ctrl = 0;
1136 enum vxge_hw_status status;
1137 int i;
1138
1139 do {
1140 status = vxge_hw_vpath_fw_api(vpath, action,
1141 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1142 0, &data0, &data1, &steer_ctrl);
1143 if (status != VXGE_HW_OK)
1144 goto exit;
1145
1146 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
1147 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
1148 data1);
1149
1150 for (i = ETH_ALEN; i > 0; i--) {
1151 macaddr[i - 1] = (u8) (data0 & 0xFF);
1152 data0 >>= 8;
1153
1154 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
1155 data1 >>= 8;
1156 }
1157
1158 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
1159 data0 = 0, data1 = 0, steer_ctrl = 0;
1160
1161 } while (!is_valid_ether_addr(macaddr));
1162exit:
1163 return status;
1164}
1165
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001166/**
1167 * vxge_hw_device_hw_info_get - Get the hw information
1168 * Returns the vpath mask that has the bits set for each vpath allocated
1169 * for the driver, FW version information and the first mac addresse for
1170 * each vpath
1171 */
1172enum vxge_hw_status __devinit
1173vxge_hw_device_hw_info_get(void __iomem *bar0,
1174 struct vxge_hw_device_hw_info *hw_info)
1175{
1176 u32 i;
1177 u64 val64;
1178 struct vxge_hw_toc_reg __iomem *toc;
1179 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1180 struct vxge_hw_common_reg __iomem *common_reg;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001181 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1182 enum vxge_hw_status status;
Jon Mason8424e002010-11-11 04:25:56 +00001183 struct __vxge_hw_virtualpath vpath;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001184
1185 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1186
1187 toc = __vxge_hw_device_toc_get(bar0);
1188 if (toc == NULL) {
1189 status = VXGE_HW_ERR_CRITICAL;
1190 goto exit;
1191 }
1192
1193 val64 = readq(&toc->toc_common_pointer);
1194 common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
1195
1196 status = __vxge_hw_device_vpath_reset_in_prog_check(
1197 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
1198 if (status != VXGE_HW_OK)
1199 goto exit;
1200
1201 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1202
1203 val64 = readq(&common_reg->host_type_assignments);
1204
1205 hw_info->host_type =
1206 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1207
1208 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1209
1210 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1211 continue;
1212
1213 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1214
1215 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
1216 (bar0 + val64);
1217
Jon Mason8424e002010-11-11 04:25:56 +00001218 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001219 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1220 hw_info->func_id) &
1221 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1222
1223 val64 = readq(&toc->toc_mrpcim_pointer);
1224
1225 mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
1226 (bar0 + val64);
1227
1228 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1229 wmb();
1230 }
1231
1232 val64 = readq(&toc->toc_vpath_pointer[i]);
1233
Jon Mason8424e002010-11-11 04:25:56 +00001234 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1235 (bar0 + val64);
1236 vpath.vp_open = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001237
1238 hw_info->function_mode =
Jon Mason8424e002010-11-11 04:25:56 +00001239 __vxge_hw_vpath_pci_func_mode_get(&vpath);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001240
Jon Mason8424e002010-11-11 04:25:56 +00001241 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001242 if (status != VXGE_HW_OK)
1243 goto exit;
1244
Jon Mason8424e002010-11-11 04:25:56 +00001245 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001246 if (status != VXGE_HW_OK)
1247 goto exit;
1248
1249 break;
1250 }
1251
1252 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001253 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1254 continue;
1255
1256 val64 = readq(&toc->toc_vpath_pointer[i]);
Jon Mason8424e002010-11-11 04:25:56 +00001257 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1258 (bar0 + val64);
1259 vpath.vp_open = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001260
Jon Mason8424e002010-11-11 04:25:56 +00001261 status = __vxge_hw_vpath_addr_get(&vpath,
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001262 hw_info->mac_addrs[i],
1263 hw_info->mac_addr_masks[i]);
1264 if (status != VXGE_HW_OK)
1265 goto exit;
1266 }
1267exit:
1268 return status;
1269}
1270
1271/*
1272 * vxge_hw_device_initialize - Initialize Titan device.
1273 * Initialize Titan device. Note that all the arguments of this public API
1274 * are 'IN', including @hldev. Driver cooperates with
1275 * OS to find new Titan device, locate its PCI and memory spaces.
1276 *
1277 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1278 * to enable the latter to perform Titan hardware initialization.
1279 */
1280enum vxge_hw_status __devinit
1281vxge_hw_device_initialize(
1282 struct __vxge_hw_device **devh,
1283 struct vxge_hw_device_attr *attr,
1284 struct vxge_hw_device_config *device_config)
1285{
1286 u32 i;
1287 u32 nblocks = 0;
1288 struct __vxge_hw_device *hldev = NULL;
1289 enum vxge_hw_status status = VXGE_HW_OK;
1290
1291 status = __vxge_hw_device_config_check(device_config);
1292 if (status != VXGE_HW_OK)
1293 goto exit;
1294
1295 hldev = (struct __vxge_hw_device *)
1296 vmalloc(sizeof(struct __vxge_hw_device));
1297 if (hldev == NULL) {
1298 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1299 goto exit;
1300 }
1301
1302 memset(hldev, 0, sizeof(struct __vxge_hw_device));
1303 hldev->magic = VXGE_HW_DEVICE_MAGIC;
1304
1305 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1306
1307 /* apply config */
1308 memcpy(&hldev->config, device_config,
1309 sizeof(struct vxge_hw_device_config));
1310
1311 hldev->bar0 = attr->bar0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001312 hldev->pdev = attr->pdev;
1313
1314 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1315 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1316 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1317
1318 __vxge_hw_device_pci_e_init(hldev);
1319
1320 status = __vxge_hw_device_reg_addr_get(hldev);
Sreenivasa Honnuraaffbd92010-04-08 01:44:39 -07001321 if (status != VXGE_HW_OK) {
1322 vfree(hldev);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001323 goto exit;
Sreenivasa Honnuraaffbd92010-04-08 01:44:39 -07001324 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001325 __vxge_hw_device_id_get(hldev);
1326
1327 __vxge_hw_device_host_info_get(hldev);
1328
1329 /* Incrementing for stats blocks */
1330 nblocks++;
1331
1332 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001333 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1334 continue;
1335
1336 if (device_config->vp_config[i].ring.enable ==
1337 VXGE_HW_RING_ENABLE)
1338 nblocks += device_config->vp_config[i].ring.ring_blocks;
1339
1340 if (device_config->vp_config[i].fifo.enable ==
1341 VXGE_HW_FIFO_ENABLE)
1342 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1343 nblocks++;
1344 }
1345
1346 if (__vxge_hw_blockpool_create(hldev,
1347 &hldev->block_pool,
1348 device_config->dma_blockpool_initial + nblocks,
1349 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1350
1351 vxge_hw_device_terminate(hldev);
1352 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1353 goto exit;
1354 }
1355
1356 status = __vxge_hw_device_initialize(hldev);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001357 if (status != VXGE_HW_OK) {
1358 vxge_hw_device_terminate(hldev);
1359 goto exit;
1360 }
1361
1362 *devh = hldev;
1363exit:
1364 return status;
1365}
1366
1367/*
1368 * vxge_hw_device_terminate - Terminate Titan device.
1369 * Terminate HW device.
1370 */
1371void
1372vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1373{
1374 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1375
1376 hldev->magic = VXGE_HW_DEVICE_DEAD;
1377 __vxge_hw_blockpool_destroy(&hldev->block_pool);
1378 vfree(hldev);
1379}
1380
1381/*
1382 * vxge_hw_device_stats_get - Get the device hw statistics.
1383 * Returns the vpath h/w stats for the device.
1384 */
1385enum vxge_hw_status
1386vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1387 struct vxge_hw_device_stats_hw_info *hw_stats)
1388{
1389 u32 i;
1390 enum vxge_hw_status status = VXGE_HW_OK;
1391
1392 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001393 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1394 (hldev->virtual_paths[i].vp_open ==
1395 VXGE_HW_VP_NOT_OPEN))
1396 continue;
1397
1398 memcpy(hldev->virtual_paths[i].hw_stats_sav,
1399 hldev->virtual_paths[i].hw_stats,
1400 sizeof(struct vxge_hw_vpath_stats_hw_info));
1401
1402 status = __vxge_hw_vpath_stats_get(
1403 &hldev->virtual_paths[i],
1404 hldev->virtual_paths[i].hw_stats);
1405 }
1406
1407 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1408 sizeof(struct vxge_hw_device_stats_hw_info));
1409
1410 return status;
1411}
1412
1413/*
1414 * vxge_hw_driver_stats_get - Get the device sw statistics.
1415 * Returns the vpath s/w stats for the device.
1416 */
1417enum vxge_hw_status vxge_hw_driver_stats_get(
1418 struct __vxge_hw_device *hldev,
1419 struct vxge_hw_device_stats_sw_info *sw_stats)
1420{
1421 enum vxge_hw_status status = VXGE_HW_OK;
1422
1423 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1424 sizeof(struct vxge_hw_device_stats_sw_info));
1425
1426 return status;
1427}
1428
1429/*
1430 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1431 * and offset and perform an operation
1432 * Get the statistics from the given location and offset.
1433 */
1434enum vxge_hw_status
1435vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1436 u32 operation, u32 location, u32 offset, u64 *stat)
1437{
1438 u64 val64;
1439 enum vxge_hw_status status = VXGE_HW_OK;
1440
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +00001441 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1442 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001443 if (status != VXGE_HW_OK)
1444 goto exit;
1445
1446 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1447 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1448 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1449 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1450
1451 status = __vxge_hw_pio_mem_write64(val64,
1452 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1453 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1454 hldev->config.device_poll_millis);
1455
1456 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1457 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1458 else
1459 *stat = 0;
1460exit:
1461 return status;
1462}
1463
1464/*
1465 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1466 * Get the Statistics on aggregate port
1467 */
stephen hemminger42821a52010-10-21 07:50:53 +00001468static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001469vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1470 struct vxge_hw_xmac_aggr_stats *aggr_stats)
1471{
1472 u64 *val64;
1473 int i;
1474 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1475 enum vxge_hw_status status = VXGE_HW_OK;
1476
1477 val64 = (u64 *)aggr_stats;
1478
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +00001479 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1480 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001481 if (status != VXGE_HW_OK)
1482 goto exit;
1483
1484 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1485 status = vxge_hw_mrpcim_stats_access(hldev,
1486 VXGE_HW_STATS_OP_READ,
1487 VXGE_HW_STATS_LOC_AGGR,
1488 ((offset + (104 * port)) >> 3), val64);
1489 if (status != VXGE_HW_OK)
1490 goto exit;
1491
1492 offset += 8;
1493 val64++;
1494 }
1495exit:
1496 return status;
1497}
1498
1499/*
1500 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1501 * Get the Statistics on port
1502 */
stephen hemminger42821a52010-10-21 07:50:53 +00001503static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001504vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1505 struct vxge_hw_xmac_port_stats *port_stats)
1506{
1507 u64 *val64;
1508 enum vxge_hw_status status = VXGE_HW_OK;
1509 int i;
1510 u32 offset = 0x0;
1511 val64 = (u64 *) port_stats;
1512
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +00001513 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1514 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001515 if (status != VXGE_HW_OK)
1516 goto exit;
1517
1518 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1519 status = vxge_hw_mrpcim_stats_access(hldev,
1520 VXGE_HW_STATS_OP_READ,
1521 VXGE_HW_STATS_LOC_AGGR,
1522 ((offset + (608 * port)) >> 3), val64);
1523 if (status != VXGE_HW_OK)
1524 goto exit;
1525
1526 offset += 8;
1527 val64++;
1528 }
1529
1530exit:
1531 return status;
1532}
1533
1534/*
1535 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1536 * Get the XMAC Statistics
1537 */
1538enum vxge_hw_status
1539vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1540 struct vxge_hw_xmac_stats *xmac_stats)
1541{
1542 enum vxge_hw_status status = VXGE_HW_OK;
1543 u32 i;
1544
1545 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1546 0, &xmac_stats->aggr_stats[0]);
1547
1548 if (status != VXGE_HW_OK)
1549 goto exit;
1550
1551 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1552 1, &xmac_stats->aggr_stats[1]);
1553 if (status != VXGE_HW_OK)
1554 goto exit;
1555
1556 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1557
1558 status = vxge_hw_device_xmac_port_stats_get(hldev,
1559 i, &xmac_stats->port_stats[i]);
1560 if (status != VXGE_HW_OK)
1561 goto exit;
1562 }
1563
1564 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1565
1566 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1567 continue;
1568
1569 status = __vxge_hw_vpath_xmac_tx_stats_get(
1570 &hldev->virtual_paths[i],
1571 &xmac_stats->vpath_tx_stats[i]);
1572 if (status != VXGE_HW_OK)
1573 goto exit;
1574
1575 status = __vxge_hw_vpath_xmac_rx_stats_get(
1576 &hldev->virtual_paths[i],
1577 &xmac_stats->vpath_rx_stats[i]);
1578 if (status != VXGE_HW_OK)
1579 goto exit;
1580 }
1581exit:
1582 return status;
1583}
1584
1585/*
1586 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1587 * This routine is used to dynamically change the debug output
1588 */
1589void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1590 enum vxge_debug_level level, u32 mask)
1591{
1592 if (hldev == NULL)
1593 return;
1594
1595#if defined(VXGE_DEBUG_TRACE_MASK) || \
1596 defined(VXGE_DEBUG_ERR_MASK)
1597 hldev->debug_module_mask = mask;
1598 hldev->debug_level = level;
1599#endif
1600
1601#if defined(VXGE_DEBUG_ERR_MASK)
1602 hldev->level_err = level & VXGE_ERR;
1603#endif
1604
1605#if defined(VXGE_DEBUG_TRACE_MASK)
1606 hldev->level_trace = level & VXGE_TRACE;
1607#endif
1608}
1609
1610/*
1611 * vxge_hw_device_error_level_get - Get the error level
1612 * This routine returns the current error level set
1613 */
1614u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1615{
1616#if defined(VXGE_DEBUG_ERR_MASK)
1617 if (hldev == NULL)
1618 return VXGE_ERR;
1619 else
1620 return hldev->level_err;
1621#else
1622 return 0;
1623#endif
1624}
1625
1626/*
1627 * vxge_hw_device_trace_level_get - Get the trace level
1628 * This routine returns the current trace level set
1629 */
1630u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1631{
1632#if defined(VXGE_DEBUG_TRACE_MASK)
1633 if (hldev == NULL)
1634 return VXGE_TRACE;
1635 else
1636 return hldev->level_trace;
1637#else
1638 return 0;
1639#endif
1640}
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001641
1642/*
1643 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1644 * Returns the Pause frame generation and reception capability of the NIC.
1645 */
1646enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1647 u32 port, u32 *tx, u32 *rx)
1648{
1649 u64 val64;
1650 enum vxge_hw_status status = VXGE_HW_OK;
1651
1652 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1653 status = VXGE_HW_ERR_INVALID_DEVICE;
1654 goto exit;
1655 }
1656
1657 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1658 status = VXGE_HW_ERR_INVALID_PORT;
1659 goto exit;
1660 }
1661
1662 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1663 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1664 goto exit;
1665 }
1666
1667 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1668 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1669 *tx = 1;
1670 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1671 *rx = 1;
1672exit:
1673 return status;
1674}
1675
1676/*
1677 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1678 * It can be used to set or reset Pause frame generation or reception
1679 * support of the NIC.
1680 */
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001681enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1682 u32 port, u32 tx, u32 rx)
1683{
1684 u64 val64;
1685 enum vxge_hw_status status = VXGE_HW_OK;
1686
1687 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1688 status = VXGE_HW_ERR_INVALID_DEVICE;
1689 goto exit;
1690 }
1691
1692 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1693 status = VXGE_HW_ERR_INVALID_PORT;
1694 goto exit;
1695 }
1696
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +00001697 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1698 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001699 if (status != VXGE_HW_OK)
1700 goto exit;
1701
1702 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1703 if (tx)
1704 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1705 else
1706 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1707 if (rx)
1708 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1709 else
1710 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1711
1712 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1713exit:
1714 return status;
1715}
1716
1717u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1718{
1719 int link_width, exp_cap;
1720 u16 lnk;
1721
1722 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1723 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1724 link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1725 return link_width;
1726}
1727
1728/*
1729 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1730 * This function returns the index of memory block
1731 */
1732static inline u32
1733__vxge_hw_ring_block_memblock_idx(u8 *block)
1734{
1735 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1736}
1737
1738/*
1739 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1740 * This function sets index to a memory block
1741 */
1742static inline void
1743__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1744{
1745 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1746}
1747
1748/*
1749 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1750 * in RxD block
1751 * Sets the next block pointer in RxD block
1752 */
1753static inline void
1754__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1755{
1756 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1757}
1758
1759/*
1760 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1761 * first block
1762 * Returns the dma address of the first RxD block
1763 */
stephen hemminger42821a52010-10-21 07:50:53 +00001764static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001765{
1766 struct vxge_hw_mempool_dma *dma_object;
1767
1768 dma_object = ring->mempool->memblocks_dma_arr;
1769 vxge_assert(dma_object != NULL);
1770
1771 return dma_object->addr;
1772}
1773
1774/*
1775 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1776 * This function returns the dma address of a given item
1777 */
1778static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1779 void *item)
1780{
1781 u32 memblock_idx;
1782 void *memblock;
1783 struct vxge_hw_mempool_dma *memblock_dma_object;
1784 ptrdiff_t dma_item_offset;
1785
1786 /* get owner memblock index */
1787 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1788
1789 /* get owner memblock by memblock index */
1790 memblock = mempoolh->memblocks_arr[memblock_idx];
1791
1792 /* get memblock DMA object by memblock index */
1793 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1794
1795 /* calculate offset in the memblock of this item */
1796 dma_item_offset = (u8 *)item - (u8 *)memblock;
1797
1798 return memblock_dma_object->addr + dma_item_offset;
1799}
1800
1801/*
1802 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1803 * This function returns the dma address of a given item
1804 */
1805static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1806 struct __vxge_hw_ring *ring, u32 from,
1807 u32 to)
1808{
1809 u8 *to_item , *from_item;
1810 dma_addr_t to_dma;
1811
1812 /* get "from" RxD block */
1813 from_item = mempoolh->items_arr[from];
1814 vxge_assert(from_item);
1815
1816 /* get "to" RxD block */
1817 to_item = mempoolh->items_arr[to];
1818 vxge_assert(to_item);
1819
1820 /* return address of the beginning of previous RxD block */
1821 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1822
1823 /* set next pointer for this RxD block to point on
1824 * previous item's DMA start address */
1825 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1826}
1827
1828/*
1829 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1830 * block callback
1831 * This function is callback passed to __vxge_hw_mempool_create to create memory
1832 * pool for RxD block
1833 */
1834static void
1835__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1836 u32 memblock_index,
1837 struct vxge_hw_mempool_dma *dma_object,
1838 u32 index, u32 is_last)
1839{
1840 u32 i;
1841 void *item = mempoolh->items_arr[index];
1842 struct __vxge_hw_ring *ring =
1843 (struct __vxge_hw_ring *)mempoolh->userdata;
1844
1845 /* format rxds array */
1846 for (i = 0; i < ring->rxds_per_block; i++) {
1847 void *rxdblock_priv;
1848 void *uld_priv;
1849 struct vxge_hw_ring_rxd_1 *rxdp;
1850
1851 u32 reserve_index = ring->channel.reserve_ptr -
1852 (index * ring->rxds_per_block + i + 1);
1853 u32 memblock_item_idx;
1854
1855 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1856 i * ring->rxd_size;
1857
1858 /* Note: memblock_item_idx is index of the item within
1859 * the memblock. For instance, in case of three RxD-blocks
1860 * per memblock this value can be 0, 1 or 2. */
1861 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1862 memblock_index, item,
1863 &memblock_item_idx);
1864
1865 rxdp = (struct vxge_hw_ring_rxd_1 *)
1866 ring->channel.reserve_arr[reserve_index];
1867
1868 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1869
1870 /* pre-format Host_Control */
1871 rxdp->host_control = (u64)(size_t)uld_priv;
1872 }
1873
1874 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1875
1876 if (is_last) {
1877 /* link last one with first one */
1878 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1879 }
1880
1881 if (index > 0) {
1882 /* link this RxD block with previous one */
1883 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1884 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001885}
1886
1887/*
Sreenivasa Honnur33632762010-03-28 22:08:30 +00001888 * __vxge_hw_ring_replenish - Initial replenish of RxDs
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001889 * This function replenishes the RxDs from reserve array to work array
1890 */
1891enum vxge_hw_status
Sreenivasa Honnur33632762010-03-28 22:08:30 +00001892vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001893{
1894 void *rxd;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001895 struct __vxge_hw_channel *channel;
1896 enum vxge_hw_status status = VXGE_HW_OK;
1897
1898 channel = &ring->channel;
1899
1900 while (vxge_hw_channel_dtr_count(channel) > 0) {
1901
1902 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1903
1904 vxge_assert(status == VXGE_HW_OK);
1905
1906 if (ring->rxd_init) {
1907 status = ring->rxd_init(rxd, channel->userdata);
1908 if (status != VXGE_HW_OK) {
1909 vxge_hw_ring_rxd_free(ring, rxd);
1910 goto exit;
1911 }
1912 }
1913
1914 vxge_hw_ring_rxd_post(ring, rxd);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001915 }
1916 status = VXGE_HW_OK;
1917exit:
1918 return status;
1919}
1920
1921/*
1922 * __vxge_hw_ring_create - Create a Ring
1923 * This function creates Ring and initializes it.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001924 */
stephen hemminger42821a52010-10-21 07:50:53 +00001925static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001926__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1927 struct vxge_hw_ring_attr *attr)
1928{
1929 enum vxge_hw_status status = VXGE_HW_OK;
1930 struct __vxge_hw_ring *ring;
1931 u32 ring_length;
1932 struct vxge_hw_ring_config *config;
1933 struct __vxge_hw_device *hldev;
1934 u32 vp_id;
1935 struct vxge_hw_mempool_cbs ring_mp_callback;
1936
1937 if ((vp == NULL) || (attr == NULL)) {
1938 status = VXGE_HW_FAIL;
1939 goto exit;
1940 }
1941
1942 hldev = vp->vpath->hldev;
1943 vp_id = vp->vpath->vp_id;
1944
1945 config = &hldev->config.vp_config[vp_id].ring;
1946
1947 ring_length = config->ring_blocks *
1948 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1949
1950 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1951 VXGE_HW_CHANNEL_TYPE_RING,
1952 ring_length,
1953 attr->per_rxd_space,
1954 attr->userdata);
1955
1956 if (ring == NULL) {
1957 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1958 goto exit;
1959 }
1960
1961 vp->vpath->ringh = ring;
1962 ring->vp_id = vp_id;
1963 ring->vp_reg = vp->vpath->vp_reg;
1964 ring->common_reg = hldev->common_reg;
1965 ring->stats = &vp->vpath->sw_stats->ring_stats;
1966 ring->config = config;
1967 ring->callback = attr->callback;
1968 ring->rxd_init = attr->rxd_init;
1969 ring->rxd_term = attr->rxd_term;
1970 ring->buffer_mode = config->buffer_mode;
1971 ring->rxds_limit = config->rxds_limit;
1972
1973 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1974 ring->rxd_priv_size =
1975 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1976 ring->per_rxd_space = attr->per_rxd_space;
1977
1978 ring->rxd_priv_size =
1979 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1980 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1981
1982 /* how many RxDs can fit into one block. Depends on configured
1983 * buffer_mode. */
1984 ring->rxds_per_block =
1985 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1986
1987 /* calculate actual RxD block private size */
1988 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1989 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1990 ring->mempool = __vxge_hw_mempool_create(hldev,
1991 VXGE_HW_BLOCK_SIZE,
1992 VXGE_HW_BLOCK_SIZE,
1993 ring->rxdblock_priv_size,
1994 ring->config->ring_blocks,
1995 ring->config->ring_blocks,
1996 &ring_mp_callback,
1997 ring);
1998
1999 if (ring->mempool == NULL) {
2000 __vxge_hw_ring_delete(vp);
2001 return VXGE_HW_ERR_OUT_OF_MEMORY;
2002 }
2003
2004 status = __vxge_hw_channel_initialize(&ring->channel);
2005 if (status != VXGE_HW_OK) {
2006 __vxge_hw_ring_delete(vp);
2007 goto exit;
2008 }
2009
2010 /* Note:
2011 * Specifying rxd_init callback means two things:
2012 * 1) rxds need to be initialized by driver at channel-open time;
2013 * 2) rxds need to be posted at channel-open time
2014 * (that's what the initial_replenish() below does)
2015 * Currently we don't have a case when the 1) is done without the 2).
2016 */
2017 if (ring->rxd_init) {
Sreenivasa Honnur33632762010-03-28 22:08:30 +00002018 status = vxge_hw_ring_replenish(ring);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002019 if (status != VXGE_HW_OK) {
2020 __vxge_hw_ring_delete(vp);
2021 goto exit;
2022 }
2023 }
2024
2025 /* initial replenish will increment the counter in its post() routine,
2026 * we have to reset it */
2027 ring->stats->common_stats.usage_cnt = 0;
2028exit:
2029 return status;
2030}
2031
2032/*
2033 * __vxge_hw_ring_abort - Returns the RxD
2034 * This function terminates the RxDs of ring
2035 */
stephen hemminger42821a52010-10-21 07:50:53 +00002036static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002037{
2038 void *rxdh;
2039 struct __vxge_hw_channel *channel;
2040
2041 channel = &ring->channel;
2042
2043 for (;;) {
2044 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2045
2046 if (rxdh == NULL)
2047 break;
2048
2049 vxge_hw_channel_dtr_complete(channel);
2050
2051 if (ring->rxd_term)
2052 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2053 channel->userdata);
2054
2055 vxge_hw_channel_dtr_free(channel, rxdh);
2056 }
2057
2058 return VXGE_HW_OK;
2059}
2060
2061/*
2062 * __vxge_hw_ring_reset - Resets the ring
2063 * This function resets the ring during vpath reset operation
2064 */
stephen hemminger42821a52010-10-21 07:50:53 +00002065static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002066{
2067 enum vxge_hw_status status = VXGE_HW_OK;
2068 struct __vxge_hw_channel *channel;
2069
2070 channel = &ring->channel;
2071
2072 __vxge_hw_ring_abort(ring);
2073
2074 status = __vxge_hw_channel_reset(channel);
2075
2076 if (status != VXGE_HW_OK)
2077 goto exit;
2078
2079 if (ring->rxd_init) {
Sreenivasa Honnur33632762010-03-28 22:08:30 +00002080 status = vxge_hw_ring_replenish(ring);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002081 if (status != VXGE_HW_OK)
2082 goto exit;
2083 }
2084exit:
2085 return status;
2086}
2087
2088/*
2089 * __vxge_hw_ring_delete - Removes the ring
2090 * This function freeup the memory pool and removes the ring
2091 */
stephen hemminger42821a52010-10-21 07:50:53 +00002092static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002093{
2094 struct __vxge_hw_ring *ring = vp->vpath->ringh;
2095
2096 __vxge_hw_ring_abort(ring);
2097
2098 if (ring->mempool)
2099 __vxge_hw_mempool_destroy(ring->mempool);
2100
2101 vp->vpath->ringh = NULL;
2102 __vxge_hw_channel_free(&ring->channel);
2103
2104 return VXGE_HW_OK;
2105}
2106
2107/*
2108 * __vxge_hw_mempool_grow
2109 * Will resize mempool up to %num_allocate value.
2110 */
stephen hemminger42821a52010-10-21 07:50:53 +00002111static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002112__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2113 u32 *num_allocated)
2114{
2115 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2116 u32 n_items = mempool->items_per_memblock;
2117 u32 start_block_idx = mempool->memblocks_allocated;
2118 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2119 enum vxge_hw_status status = VXGE_HW_OK;
2120
2121 *num_allocated = 0;
2122
2123 if (end_block_idx > mempool->memblocks_max) {
2124 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2125 goto exit;
2126 }
2127
2128 for (i = start_block_idx; i < end_block_idx; i++) {
2129 u32 j;
2130 u32 is_last = ((end_block_idx - 1) == i);
2131 struct vxge_hw_mempool_dma *dma_object =
2132 mempool->memblocks_dma_arr + i;
2133 void *the_memblock;
2134
2135 /* allocate memblock's private part. Each DMA memblock
2136 * has a space allocated for item's private usage upon
2137 * mempool's user request. Each time mempool grows, it will
2138 * allocate new memblock and its private part at once.
2139 * This helps to minimize memory usage a lot. */
2140 mempool->memblocks_priv_arr[i] =
2141 vmalloc(mempool->items_priv_size * n_items);
2142 if (mempool->memblocks_priv_arr[i] == NULL) {
2143 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2144 goto exit;
2145 }
2146
2147 memset(mempool->memblocks_priv_arr[i], 0,
2148 mempool->items_priv_size * n_items);
2149
2150 /* allocate DMA-capable memblock */
2151 mempool->memblocks_arr[i] =
2152 __vxge_hw_blockpool_malloc(mempool->devh,
2153 mempool->memblock_size, dma_object);
2154 if (mempool->memblocks_arr[i] == NULL) {
2155 vfree(mempool->memblocks_priv_arr[i]);
2156 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2157 goto exit;
2158 }
2159
2160 (*num_allocated)++;
2161 mempool->memblocks_allocated++;
2162
2163 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2164
2165 the_memblock = mempool->memblocks_arr[i];
2166
2167 /* fill the items hash array */
2168 for (j = 0; j < n_items; j++) {
2169 u32 index = i * n_items + j;
2170
2171 if (first_time && index >= mempool->items_initial)
2172 break;
2173
2174 mempool->items_arr[index] =
2175 ((char *)the_memblock + j*mempool->item_size);
2176
2177 /* let caller to do more job on each item */
2178 if (mempool->item_func_alloc != NULL)
2179 mempool->item_func_alloc(mempool, i,
2180 dma_object, index, is_last);
2181
2182 mempool->items_current = index + 1;
2183 }
2184
2185 if (first_time && mempool->items_current ==
2186 mempool->items_initial)
2187 break;
2188 }
2189exit:
2190 return status;
2191}
2192
2193/*
2194 * vxge_hw_mempool_create
2195 * This function will create memory pool object. Pool may grow but will
2196 * never shrink. Pool consists of number of dynamically allocated blocks
2197 * with size enough to hold %items_initial number of items. Memory is
2198 * DMA-able but client must map/unmap before interoperating with the device.
2199 */
stephen hemminger42821a52010-10-21 07:50:53 +00002200static struct vxge_hw_mempool*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002201__vxge_hw_mempool_create(
2202 struct __vxge_hw_device *devh,
2203 u32 memblock_size,
2204 u32 item_size,
2205 u32 items_priv_size,
2206 u32 items_initial,
2207 u32 items_max,
2208 struct vxge_hw_mempool_cbs *mp_callback,
2209 void *userdata)
2210{
2211 enum vxge_hw_status status = VXGE_HW_OK;
2212 u32 memblocks_to_allocate;
2213 struct vxge_hw_mempool *mempool = NULL;
2214 u32 allocated;
2215
2216 if (memblock_size < item_size) {
2217 status = VXGE_HW_FAIL;
2218 goto exit;
2219 }
2220
2221 mempool = (struct vxge_hw_mempool *)
2222 vmalloc(sizeof(struct vxge_hw_mempool));
2223 if (mempool == NULL) {
2224 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2225 goto exit;
2226 }
2227 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
2228
2229 mempool->devh = devh;
2230 mempool->memblock_size = memblock_size;
2231 mempool->items_max = items_max;
2232 mempool->items_initial = items_initial;
2233 mempool->item_size = item_size;
2234 mempool->items_priv_size = items_priv_size;
2235 mempool->item_func_alloc = mp_callback->item_func_alloc;
2236 mempool->userdata = userdata;
2237
2238 mempool->memblocks_allocated = 0;
2239
2240 mempool->items_per_memblock = memblock_size / item_size;
2241
2242 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2243 mempool->items_per_memblock;
2244
2245 /* allocate array of memblocks */
2246 mempool->memblocks_arr =
2247 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
2248 if (mempool->memblocks_arr == NULL) {
2249 __vxge_hw_mempool_destroy(mempool);
2250 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2251 mempool = NULL;
2252 goto exit;
2253 }
2254 memset(mempool->memblocks_arr, 0,
2255 sizeof(void *) * mempool->memblocks_max);
2256
2257 /* allocate array of private parts of items per memblocks */
2258 mempool->memblocks_priv_arr =
2259 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
2260 if (mempool->memblocks_priv_arr == NULL) {
2261 __vxge_hw_mempool_destroy(mempool);
2262 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2263 mempool = NULL;
2264 goto exit;
2265 }
2266 memset(mempool->memblocks_priv_arr, 0,
2267 sizeof(void *) * mempool->memblocks_max);
2268
2269 /* allocate array of memblocks DMA objects */
2270 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
2271 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
2272 mempool->memblocks_max);
2273
2274 if (mempool->memblocks_dma_arr == NULL) {
2275 __vxge_hw_mempool_destroy(mempool);
2276 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2277 mempool = NULL;
2278 goto exit;
2279 }
2280 memset(mempool->memblocks_dma_arr, 0,
2281 sizeof(struct vxge_hw_mempool_dma) *
2282 mempool->memblocks_max);
2283
2284 /* allocate hash array of items */
2285 mempool->items_arr =
2286 (void **) vmalloc(sizeof(void *) * mempool->items_max);
2287 if (mempool->items_arr == NULL) {
2288 __vxge_hw_mempool_destroy(mempool);
2289 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2290 mempool = NULL;
2291 goto exit;
2292 }
2293 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
2294
2295 /* calculate initial number of memblocks */
2296 memblocks_to_allocate = (mempool->items_initial +
2297 mempool->items_per_memblock - 1) /
2298 mempool->items_per_memblock;
2299
2300 /* pre-allocate the mempool */
2301 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2302 &allocated);
2303 if (status != VXGE_HW_OK) {
2304 __vxge_hw_mempool_destroy(mempool);
2305 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2306 mempool = NULL;
2307 goto exit;
2308 }
2309
2310exit:
2311 return mempool;
2312}
2313
2314/*
2315 * vxge_hw_mempool_destroy
2316 */
stephen hemminger42821a52010-10-21 07:50:53 +00002317static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002318{
2319 u32 i, j;
2320 struct __vxge_hw_device *devh = mempool->devh;
2321
2322 for (i = 0; i < mempool->memblocks_allocated; i++) {
2323 struct vxge_hw_mempool_dma *dma_object;
2324
2325 vxge_assert(mempool->memblocks_arr[i]);
2326 vxge_assert(mempool->memblocks_dma_arr + i);
2327
2328 dma_object = mempool->memblocks_dma_arr + i;
2329
2330 for (j = 0; j < mempool->items_per_memblock; j++) {
2331 u32 index = i * mempool->items_per_memblock + j;
2332
2333 /* to skip last partially filled(if any) memblock */
2334 if (index >= mempool->items_current)
2335 break;
2336 }
2337
2338 vfree(mempool->memblocks_priv_arr[i]);
2339
2340 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2341 mempool->memblock_size, dma_object);
2342 }
2343
Figo.zhang50d36a92009-06-10 04:21:55 +00002344 vfree(mempool->items_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002345
Figo.zhang50d36a92009-06-10 04:21:55 +00002346 vfree(mempool->memblocks_dma_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002347
Figo.zhang50d36a92009-06-10 04:21:55 +00002348 vfree(mempool->memblocks_priv_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002349
Figo.zhang50d36a92009-06-10 04:21:55 +00002350 vfree(mempool->memblocks_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002351
2352 vfree(mempool);
2353}
2354
2355/*
2356 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
2357 * Check the fifo configuration
2358 */
2359enum vxge_hw_status
2360__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
2361{
2362 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
2363 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
2364 return VXGE_HW_BADCFG_FIFO_BLOCKS;
2365
2366 return VXGE_HW_OK;
2367}
2368
2369/*
2370 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
2371 * Check the vpath configuration
2372 */
stephen hemminger42821a52010-10-21 07:50:53 +00002373static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002374__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
2375{
2376 enum vxge_hw_status status;
2377
2378 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
2379 (vp_config->min_bandwidth >
2380 VXGE_HW_VPATH_BANDWIDTH_MAX))
2381 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
2382
2383 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
2384 if (status != VXGE_HW_OK)
2385 return status;
2386
2387 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
2388 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
2389 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
2390 return VXGE_HW_BADCFG_VPATH_MTU;
2391
2392 if ((vp_config->rpa_strip_vlan_tag !=
2393 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
2394 (vp_config->rpa_strip_vlan_tag !=
2395 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
2396 (vp_config->rpa_strip_vlan_tag !=
2397 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
2398 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
2399
2400 return VXGE_HW_OK;
2401}
2402
2403/*
2404 * __vxge_hw_device_config_check - Check device configuration.
2405 * Check the device configuration
2406 */
2407enum vxge_hw_status
2408__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
2409{
2410 u32 i;
2411 enum vxge_hw_status status;
2412
2413 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
2414 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
2415 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
2416 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
2417 return VXGE_HW_BADCFG_INTR_MODE;
2418
2419 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
2420 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
2421 return VXGE_HW_BADCFG_RTS_MAC_EN;
2422
2423 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2424 status = __vxge_hw_device_vpath_config_check(
2425 &new_config->vp_config[i]);
2426 if (status != VXGE_HW_OK)
2427 return status;
2428 }
2429
2430 return VXGE_HW_OK;
2431}
2432
2433/*
2434 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2435 * Initialize Titan device config with default values.
2436 */
2437enum vxge_hw_status __devinit
2438vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2439{
2440 u32 i;
2441
2442 device_config->dma_blockpool_initial =
2443 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2444 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2445 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2446 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2447 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2448 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2449 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
2450
2451 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2452
2453 device_config->vp_config[i].vp_id = i;
2454
2455 device_config->vp_config[i].min_bandwidth =
2456 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2457
2458 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2459
2460 device_config->vp_config[i].ring.ring_blocks =
2461 VXGE_HW_DEF_RING_BLOCKS;
2462
2463 device_config->vp_config[i].ring.buffer_mode =
2464 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2465
2466 device_config->vp_config[i].ring.scatter_mode =
2467 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2468
2469 device_config->vp_config[i].ring.rxds_limit =
2470 VXGE_HW_DEF_RING_RXDS_LIMIT;
2471
2472 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2473
2474 device_config->vp_config[i].fifo.fifo_blocks =
2475 VXGE_HW_MIN_FIFO_BLOCKS;
2476
2477 device_config->vp_config[i].fifo.max_frags =
2478 VXGE_HW_MAX_FIFO_FRAGS;
2479
2480 device_config->vp_config[i].fifo.memblock_size =
2481 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2482
2483 device_config->vp_config[i].fifo.alignment_size =
2484 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2485
2486 device_config->vp_config[i].fifo.intr =
2487 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2488
2489 device_config->vp_config[i].fifo.no_snoop_bits =
2490 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2491 device_config->vp_config[i].tti.intr_enable =
2492 VXGE_HW_TIM_INTR_DEFAULT;
2493
2494 device_config->vp_config[i].tti.btimer_val =
2495 VXGE_HW_USE_FLASH_DEFAULT;
2496
2497 device_config->vp_config[i].tti.timer_ac_en =
2498 VXGE_HW_USE_FLASH_DEFAULT;
2499
2500 device_config->vp_config[i].tti.timer_ci_en =
2501 VXGE_HW_USE_FLASH_DEFAULT;
2502
2503 device_config->vp_config[i].tti.timer_ri_en =
2504 VXGE_HW_USE_FLASH_DEFAULT;
2505
2506 device_config->vp_config[i].tti.rtimer_val =
2507 VXGE_HW_USE_FLASH_DEFAULT;
2508
2509 device_config->vp_config[i].tti.util_sel =
2510 VXGE_HW_USE_FLASH_DEFAULT;
2511
2512 device_config->vp_config[i].tti.ltimer_val =
2513 VXGE_HW_USE_FLASH_DEFAULT;
2514
2515 device_config->vp_config[i].tti.urange_a =
2516 VXGE_HW_USE_FLASH_DEFAULT;
2517
2518 device_config->vp_config[i].tti.uec_a =
2519 VXGE_HW_USE_FLASH_DEFAULT;
2520
2521 device_config->vp_config[i].tti.urange_b =
2522 VXGE_HW_USE_FLASH_DEFAULT;
2523
2524 device_config->vp_config[i].tti.uec_b =
2525 VXGE_HW_USE_FLASH_DEFAULT;
2526
2527 device_config->vp_config[i].tti.urange_c =
2528 VXGE_HW_USE_FLASH_DEFAULT;
2529
2530 device_config->vp_config[i].tti.uec_c =
2531 VXGE_HW_USE_FLASH_DEFAULT;
2532
2533 device_config->vp_config[i].tti.uec_d =
2534 VXGE_HW_USE_FLASH_DEFAULT;
2535
2536 device_config->vp_config[i].rti.intr_enable =
2537 VXGE_HW_TIM_INTR_DEFAULT;
2538
2539 device_config->vp_config[i].rti.btimer_val =
2540 VXGE_HW_USE_FLASH_DEFAULT;
2541
2542 device_config->vp_config[i].rti.timer_ac_en =
2543 VXGE_HW_USE_FLASH_DEFAULT;
2544
2545 device_config->vp_config[i].rti.timer_ci_en =
2546 VXGE_HW_USE_FLASH_DEFAULT;
2547
2548 device_config->vp_config[i].rti.timer_ri_en =
2549 VXGE_HW_USE_FLASH_DEFAULT;
2550
2551 device_config->vp_config[i].rti.rtimer_val =
2552 VXGE_HW_USE_FLASH_DEFAULT;
2553
2554 device_config->vp_config[i].rti.util_sel =
2555 VXGE_HW_USE_FLASH_DEFAULT;
2556
2557 device_config->vp_config[i].rti.ltimer_val =
2558 VXGE_HW_USE_FLASH_DEFAULT;
2559
2560 device_config->vp_config[i].rti.urange_a =
2561 VXGE_HW_USE_FLASH_DEFAULT;
2562
2563 device_config->vp_config[i].rti.uec_a =
2564 VXGE_HW_USE_FLASH_DEFAULT;
2565
2566 device_config->vp_config[i].rti.urange_b =
2567 VXGE_HW_USE_FLASH_DEFAULT;
2568
2569 device_config->vp_config[i].rti.uec_b =
2570 VXGE_HW_USE_FLASH_DEFAULT;
2571
2572 device_config->vp_config[i].rti.urange_c =
2573 VXGE_HW_USE_FLASH_DEFAULT;
2574
2575 device_config->vp_config[i].rti.uec_c =
2576 VXGE_HW_USE_FLASH_DEFAULT;
2577
2578 device_config->vp_config[i].rti.uec_d =
2579 VXGE_HW_USE_FLASH_DEFAULT;
2580
2581 device_config->vp_config[i].mtu =
2582 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2583
2584 device_config->vp_config[i].rpa_strip_vlan_tag =
2585 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2586 }
2587
2588 return VXGE_HW_OK;
2589}
2590
2591/*
2592 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2593 * Set the swapper bits appropriately for the lagacy section.
2594 */
stephen hemminger42821a52010-10-21 07:50:53 +00002595static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002596__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2597{
2598 u64 val64;
2599 enum vxge_hw_status status = VXGE_HW_OK;
2600
2601 val64 = readq(&legacy_reg->toc_swapper_fb);
2602
2603 wmb();
2604
2605 switch (val64) {
2606
2607 case VXGE_HW_SWAPPER_INITIAL_VALUE:
2608 return status;
2609
2610 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2611 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2612 &legacy_reg->pifm_rd_swap_en);
2613 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2614 &legacy_reg->pifm_rd_flip_en);
2615 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2616 &legacy_reg->pifm_wr_swap_en);
2617 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2618 &legacy_reg->pifm_wr_flip_en);
2619 break;
2620
2621 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2622 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2623 &legacy_reg->pifm_rd_swap_en);
2624 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2625 &legacy_reg->pifm_wr_swap_en);
2626 break;
2627
2628 case VXGE_HW_SWAPPER_BIT_FLIPPED:
2629 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2630 &legacy_reg->pifm_rd_flip_en);
2631 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2632 &legacy_reg->pifm_wr_flip_en);
2633 break;
2634 }
2635
2636 wmb();
2637
2638 val64 = readq(&legacy_reg->toc_swapper_fb);
2639
2640 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2641 status = VXGE_HW_ERR_SWAPPER_CTRL;
2642
2643 return status;
2644}
2645
2646/*
2647 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2648 * Set the swapper bits appropriately for the vpath.
2649 */
stephen hemminger42821a52010-10-21 07:50:53 +00002650static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002651__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2652{
2653#ifndef __BIG_ENDIAN
2654 u64 val64;
2655
2656 val64 = readq(&vpath_reg->vpath_general_cfg1);
2657 wmb();
2658 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2659 writeq(val64, &vpath_reg->vpath_general_cfg1);
2660 wmb();
2661#endif
2662 return VXGE_HW_OK;
2663}
2664
2665/*
2666 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2667 * Set the swapper bits appropriately for the vpath.
2668 */
stephen hemminger42821a52010-10-21 07:50:53 +00002669static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002670__vxge_hw_kdfc_swapper_set(
2671 struct vxge_hw_legacy_reg __iomem *legacy_reg,
2672 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2673{
2674 u64 val64;
2675
2676 val64 = readq(&legacy_reg->pifm_wr_swap_en);
2677
2678 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2679 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2680 wmb();
2681
2682 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2683 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
2684 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2685
2686 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2687 wmb();
2688 }
2689
2690 return VXGE_HW_OK;
2691}
2692
2693/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002694 * vxge_hw_mgmt_reg_read - Read Titan register.
2695 */
2696enum vxge_hw_status
2697vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2698 enum vxge_hw_mgmt_reg_type type,
2699 u32 index, u32 offset, u64 *value)
2700{
2701 enum vxge_hw_status status = VXGE_HW_OK;
2702
2703 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2704 status = VXGE_HW_ERR_INVALID_DEVICE;
2705 goto exit;
2706 }
2707
2708 switch (type) {
2709 case vxge_hw_mgmt_reg_type_legacy:
2710 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2711 status = VXGE_HW_ERR_INVALID_OFFSET;
2712 break;
2713 }
2714 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2715 break;
2716 case vxge_hw_mgmt_reg_type_toc:
2717 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2718 status = VXGE_HW_ERR_INVALID_OFFSET;
2719 break;
2720 }
2721 *value = readq((void __iomem *)hldev->toc_reg + offset);
2722 break;
2723 case vxge_hw_mgmt_reg_type_common:
2724 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2725 status = VXGE_HW_ERR_INVALID_OFFSET;
2726 break;
2727 }
2728 *value = readq((void __iomem *)hldev->common_reg + offset);
2729 break;
2730 case vxge_hw_mgmt_reg_type_mrpcim:
2731 if (!(hldev->access_rights &
2732 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2733 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2734 break;
2735 }
2736 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2737 status = VXGE_HW_ERR_INVALID_OFFSET;
2738 break;
2739 }
2740 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2741 break;
2742 case vxge_hw_mgmt_reg_type_srpcim:
2743 if (!(hldev->access_rights &
2744 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2745 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2746 break;
2747 }
2748 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2749 status = VXGE_HW_ERR_INVALID_INDEX;
2750 break;
2751 }
2752 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2753 status = VXGE_HW_ERR_INVALID_OFFSET;
2754 break;
2755 }
2756 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2757 offset);
2758 break;
2759 case vxge_hw_mgmt_reg_type_vpmgmt:
2760 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2761 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2762 status = VXGE_HW_ERR_INVALID_INDEX;
2763 break;
2764 }
2765 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2766 status = VXGE_HW_ERR_INVALID_OFFSET;
2767 break;
2768 }
2769 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2770 offset);
2771 break;
2772 case vxge_hw_mgmt_reg_type_vpath:
2773 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2774 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2775 status = VXGE_HW_ERR_INVALID_INDEX;
2776 break;
2777 }
2778 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2779 status = VXGE_HW_ERR_INVALID_INDEX;
2780 break;
2781 }
2782 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2783 status = VXGE_HW_ERR_INVALID_OFFSET;
2784 break;
2785 }
2786 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2787 offset);
2788 break;
2789 default:
2790 status = VXGE_HW_ERR_INVALID_TYPE;
2791 break;
2792 }
2793
2794exit:
2795 return status;
2796}
2797
2798/*
Sreenivasa Honnurfa41fd12009-10-05 01:56:35 +00002799 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2800 */
2801enum vxge_hw_status
2802vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2803{
2804 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
2805 enum vxge_hw_status status = VXGE_HW_OK;
2806 int i = 0, j = 0;
2807
2808 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2809 if (!((vpath_mask) & vxge_mBIT(i)))
2810 continue;
2811 vpmgmt_reg = hldev->vpmgmt_reg[i];
2812 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2813 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2814 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2815 return VXGE_HW_FAIL;
2816 }
2817 }
2818 return status;
2819}
2820/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002821 * vxge_hw_mgmt_reg_Write - Write Titan register.
2822 */
2823enum vxge_hw_status
2824vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2825 enum vxge_hw_mgmt_reg_type type,
2826 u32 index, u32 offset, u64 value)
2827{
2828 enum vxge_hw_status status = VXGE_HW_OK;
2829
2830 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2831 status = VXGE_HW_ERR_INVALID_DEVICE;
2832 goto exit;
2833 }
2834
2835 switch (type) {
2836 case vxge_hw_mgmt_reg_type_legacy:
2837 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2838 status = VXGE_HW_ERR_INVALID_OFFSET;
2839 break;
2840 }
2841 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2842 break;
2843 case vxge_hw_mgmt_reg_type_toc:
2844 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2845 status = VXGE_HW_ERR_INVALID_OFFSET;
2846 break;
2847 }
2848 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2849 break;
2850 case vxge_hw_mgmt_reg_type_common:
2851 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2852 status = VXGE_HW_ERR_INVALID_OFFSET;
2853 break;
2854 }
2855 writeq(value, (void __iomem *)hldev->common_reg + offset);
2856 break;
2857 case vxge_hw_mgmt_reg_type_mrpcim:
2858 if (!(hldev->access_rights &
2859 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2860 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2861 break;
2862 }
2863 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2864 status = VXGE_HW_ERR_INVALID_OFFSET;
2865 break;
2866 }
2867 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2868 break;
2869 case vxge_hw_mgmt_reg_type_srpcim:
2870 if (!(hldev->access_rights &
2871 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2872 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2873 break;
2874 }
2875 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2876 status = VXGE_HW_ERR_INVALID_INDEX;
2877 break;
2878 }
2879 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2880 status = VXGE_HW_ERR_INVALID_OFFSET;
2881 break;
2882 }
2883 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2884 offset);
2885
2886 break;
2887 case vxge_hw_mgmt_reg_type_vpmgmt:
2888 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2889 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2890 status = VXGE_HW_ERR_INVALID_INDEX;
2891 break;
2892 }
2893 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2894 status = VXGE_HW_ERR_INVALID_OFFSET;
2895 break;
2896 }
2897 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2898 offset);
2899 break;
2900 case vxge_hw_mgmt_reg_type_vpath:
2901 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2902 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2903 status = VXGE_HW_ERR_INVALID_INDEX;
2904 break;
2905 }
2906 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2907 status = VXGE_HW_ERR_INVALID_OFFSET;
2908 break;
2909 }
2910 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2911 offset);
2912 break;
2913 default:
2914 status = VXGE_HW_ERR_INVALID_TYPE;
2915 break;
2916 }
2917exit:
2918 return status;
2919}
2920
2921/*
2922 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2923 * list callback
2924 * This function is callback passed to __vxge_hw_mempool_create to create memory
2925 * pool for TxD list
2926 */
2927static void
2928__vxge_hw_fifo_mempool_item_alloc(
2929 struct vxge_hw_mempool *mempoolh,
2930 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2931 u32 index, u32 is_last)
2932{
2933 u32 memblock_item_idx;
2934 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2935 struct vxge_hw_fifo_txd *txdp =
2936 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2937 struct __vxge_hw_fifo *fifo =
2938 (struct __vxge_hw_fifo *)mempoolh->userdata;
2939 void *memblock = mempoolh->memblocks_arr[memblock_index];
2940
2941 vxge_assert(txdp);
2942
2943 txdp->host_control = (u64) (size_t)
2944 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2945 &memblock_item_idx);
2946
2947 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2948
2949 vxge_assert(txdl_priv);
2950
2951 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2952
2953 /* pre-format HW's TxDL's private */
2954 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2955 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2956 txdl_priv->dma_handle = dma_object->handle;
2957 txdl_priv->memblock = memblock;
2958 txdl_priv->first_txdp = txdp;
2959 txdl_priv->next_txdl_priv = NULL;
2960 txdl_priv->alloc_frags = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002961}
2962
2963/*
2964 * __vxge_hw_fifo_create - Create a FIFO
2965 * This function creates FIFO and initializes it.
2966 */
2967enum vxge_hw_status
2968__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2969 struct vxge_hw_fifo_attr *attr)
2970{
2971 enum vxge_hw_status status = VXGE_HW_OK;
2972 struct __vxge_hw_fifo *fifo;
2973 struct vxge_hw_fifo_config *config;
2974 u32 txdl_size, txdl_per_memblock;
2975 struct vxge_hw_mempool_cbs fifo_mp_callback;
2976 struct __vxge_hw_virtualpath *vpath;
2977
2978 if ((vp == NULL) || (attr == NULL)) {
2979 status = VXGE_HW_ERR_INVALID_HANDLE;
2980 goto exit;
2981 }
2982 vpath = vp->vpath;
2983 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2984
2985 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2986
2987 txdl_per_memblock = config->memblock_size / txdl_size;
2988
2989 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2990 VXGE_HW_CHANNEL_TYPE_FIFO,
2991 config->fifo_blocks * txdl_per_memblock,
2992 attr->per_txdl_space, attr->userdata);
2993
2994 if (fifo == NULL) {
2995 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2996 goto exit;
2997 }
2998
2999 vpath->fifoh = fifo;
3000 fifo->nofl_db = vpath->nofl_db;
3001
3002 fifo->vp_id = vpath->vp_id;
3003 fifo->vp_reg = vpath->vp_reg;
3004 fifo->stats = &vpath->sw_stats->fifo_stats;
3005
3006 fifo->config = config;
3007
3008 /* apply "interrupts per txdl" attribute */
3009 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3010
3011 if (fifo->config->intr)
3012 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3013
3014 fifo->no_snoop_bits = config->no_snoop_bits;
3015
3016 /*
3017 * FIFO memory management strategy:
3018 *
3019 * TxDL split into three independent parts:
3020 * - set of TxD's
3021 * - TxD HW private part
3022 * - driver private part
3023 *
3024 * Adaptative memory allocation used. i.e. Memory allocated on
3025 * demand with the size which will fit into one memory block.
3026 * One memory block may contain more than one TxDL.
3027 *
3028 * During "reserve" operations more memory can be allocated on demand
3029 * for example due to FIFO full condition.
3030 *
3031 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3032 * routine which will essentially stop the channel and free resources.
3033 */
3034
3035 /* TxDL common private size == TxDL private + driver private */
3036 fifo->priv_size =
3037 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3038 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
3039 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3040
3041 fifo->per_txdl_space = attr->per_txdl_space;
3042
3043 /* recompute txdl size to be cacheline aligned */
3044 fifo->txdl_size = txdl_size;
3045 fifo->txdl_per_memblock = txdl_per_memblock;
3046
3047 fifo->txdl_term = attr->txdl_term;
3048 fifo->callback = attr->callback;
3049
3050 if (fifo->txdl_per_memblock == 0) {
3051 __vxge_hw_fifo_delete(vp);
3052 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3053 goto exit;
3054 }
3055
3056 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3057
3058 fifo->mempool =
3059 __vxge_hw_mempool_create(vpath->hldev,
3060 fifo->config->memblock_size,
3061 fifo->txdl_size,
3062 fifo->priv_size,
3063 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3064 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3065 &fifo_mp_callback,
3066 fifo);
3067
3068 if (fifo->mempool == NULL) {
3069 __vxge_hw_fifo_delete(vp);
3070 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3071 goto exit;
3072 }
3073
3074 status = __vxge_hw_channel_initialize(&fifo->channel);
3075 if (status != VXGE_HW_OK) {
3076 __vxge_hw_fifo_delete(vp);
3077 goto exit;
3078 }
3079
3080 vxge_assert(fifo->channel.reserve_ptr);
3081exit:
3082 return status;
3083}
3084
3085/*
3086 * __vxge_hw_fifo_abort - Returns the TxD
3087 * This function terminates the TxDs of fifo
3088 */
stephen hemminger42821a52010-10-21 07:50:53 +00003089static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003090{
3091 void *txdlh;
3092
3093 for (;;) {
3094 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3095
3096 if (txdlh == NULL)
3097 break;
3098
3099 vxge_hw_channel_dtr_complete(&fifo->channel);
3100
3101 if (fifo->txdl_term) {
3102 fifo->txdl_term(txdlh,
3103 VXGE_HW_TXDL_STATE_POSTED,
3104 fifo->channel.userdata);
3105 }
3106
3107 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3108 }
3109
3110 return VXGE_HW_OK;
3111}
3112
3113/*
3114 * __vxge_hw_fifo_reset - Resets the fifo
3115 * This function resets the fifo during vpath reset operation
3116 */
stephen hemminger42821a52010-10-21 07:50:53 +00003117static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003118{
3119 enum vxge_hw_status status = VXGE_HW_OK;
3120
3121 __vxge_hw_fifo_abort(fifo);
3122 status = __vxge_hw_channel_reset(&fifo->channel);
3123
3124 return status;
3125}
3126
3127/*
3128 * __vxge_hw_fifo_delete - Removes the FIFO
3129 * This function freeup the memory pool and removes the FIFO
3130 */
3131enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3132{
3133 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3134
3135 __vxge_hw_fifo_abort(fifo);
3136
3137 if (fifo->mempool)
3138 __vxge_hw_mempool_destroy(fifo->mempool);
3139
3140 vp->vpath->fifoh = NULL;
3141
3142 __vxge_hw_channel_free(&fifo->channel);
3143
3144 return VXGE_HW_OK;
3145}
3146
3147/*
3148 * __vxge_hw_vpath_pci_read - Read the content of given address
3149 * in pci config space.
3150 * Read from the vpath pci config space.
3151 */
stephen hemminger42821a52010-10-21 07:50:53 +00003152static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003153__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3154 u32 phy_func_0, u32 offset, u32 *val)
3155{
3156 u64 val64;
3157 enum vxge_hw_status status = VXGE_HW_OK;
3158 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3159
3160 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3161
3162 if (phy_func_0)
3163 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3164
3165 writeq(val64, &vp_reg->pci_config_access_cfg1);
3166 wmb();
3167 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3168 &vp_reg->pci_config_access_cfg2);
3169 wmb();
3170
3171 status = __vxge_hw_device_register_poll(
3172 &vp_reg->pci_config_access_cfg2,
3173 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3174
3175 if (status != VXGE_HW_OK)
3176 goto exit;
3177
3178 val64 = readq(&vp_reg->pci_config_access_status);
3179
3180 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3181 status = VXGE_HW_FAIL;
3182 *val = 0;
3183 } else
3184 *val = (u32)vxge_bVALn(val64, 32, 32);
3185exit:
3186 return status;
3187}
3188
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003189/**
3190 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3191 * @hldev: HW device.
3192 * @on_off: TRUE if flickering to be on, FALSE to be off
3193 *
3194 * Flicker the link LED.
3195 */
3196enum vxge_hw_status
Jon Mason8424e002010-11-11 04:25:56 +00003197vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003198{
Jon Mason8424e002010-11-11 04:25:56 +00003199 struct __vxge_hw_virtualpath *vpath;
3200 u64 data0, data1 = 0, steer_ctrl = 0;
3201 enum vxge_hw_status status;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003202
3203 if (hldev == NULL) {
3204 status = VXGE_HW_ERR_INVALID_DEVICE;
3205 goto exit;
3206 }
3207
Jon Mason8424e002010-11-11 04:25:56 +00003208 vpath = &hldev->virtual_paths[hldev->first_vp_id];
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003209
Jon Mason8424e002010-11-11 04:25:56 +00003210 data0 = on_off;
3211 status = vxge_hw_vpath_fw_api(vpath,
3212 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3213 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3214 0, &data0, &data1, &steer_ctrl);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003215exit:
3216 return status;
3217}
3218
3219/*
3220 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3221 */
3222enum vxge_hw_status
Jon Mason8424e002010-11-11 04:25:56 +00003223__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3224 u32 action, u32 rts_table, u32 offset,
3225 u64 *data0, u64 *data1)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003226{
Jon Mason8424e002010-11-11 04:25:56 +00003227 enum vxge_hw_status status;
3228 u64 steer_ctrl = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003229
3230 if (vp == NULL) {
3231 status = VXGE_HW_ERR_INVALID_HANDLE;
3232 goto exit;
3233 }
3234
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003235 if ((rts_table ==
Jon Mason8424e002010-11-11 04:25:56 +00003236 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003237 (rts_table ==
Jon Mason8424e002010-11-11 04:25:56 +00003238 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003239 (rts_table ==
Jon Mason8424e002010-11-11 04:25:56 +00003240 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003241 (rts_table ==
Jon Mason8424e002010-11-11 04:25:56 +00003242 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3243 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003244 }
3245
Jon Mason8424e002010-11-11 04:25:56 +00003246 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3247 data0, data1, &steer_ctrl);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003248 if (status != VXGE_HW_OK)
3249 goto exit;
3250
Jon Mason8424e002010-11-11 04:25:56 +00003251 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3252 (rts_table !=
3253 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3254 *data1 = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003255exit:
3256 return status;
3257}
3258
3259/*
3260 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3261 */
3262enum vxge_hw_status
Jon Mason8424e002010-11-11 04:25:56 +00003263__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3264 u32 rts_table, u32 offset, u64 steer_data0,
3265 u64 steer_data1)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003266{
Jon Mason8424e002010-11-11 04:25:56 +00003267 u64 data0, data1 = 0, steer_ctrl = 0;
3268 enum vxge_hw_status status;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003269
3270 if (vp == NULL) {
3271 status = VXGE_HW_ERR_INVALID_HANDLE;
3272 goto exit;
3273 }
3274
Jon Mason8424e002010-11-11 04:25:56 +00003275 data0 = steer_data0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003276
3277 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3278 (rts_table ==
Jon Mason8424e002010-11-11 04:25:56 +00003279 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3280 data1 = steer_data1;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003281
Jon Mason8424e002010-11-11 04:25:56 +00003282 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3283 &data0, &data1, &steer_ctrl);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003284exit:
3285 return status;
3286}
3287
3288/*
3289 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3290 */
3291enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3292 struct __vxge_hw_vpath_handle *vp,
3293 enum vxge_hw_rth_algoritms algorithm,
3294 struct vxge_hw_rth_hash_types *hash_type,
3295 u16 bucket_size)
3296{
3297 u64 data0, data1;
3298 enum vxge_hw_status status = VXGE_HW_OK;
3299
3300 if (vp == NULL) {
3301 status = VXGE_HW_ERR_INVALID_HANDLE;
3302 goto exit;
3303 }
3304
3305 status = __vxge_hw_vpath_rts_table_get(vp,
3306 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3307 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3308 0, &data0, &data1);
Jon Mason47f01db2010-11-11 04:25:53 +00003309 if (status != VXGE_HW_OK)
3310 goto exit;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003311
3312 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3313 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3314
3315 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3316 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3317 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3318
3319 if (hash_type->hash_type_tcpipv4_en)
3320 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3321
3322 if (hash_type->hash_type_ipv4_en)
3323 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3324
3325 if (hash_type->hash_type_tcpipv6_en)
3326 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3327
3328 if (hash_type->hash_type_ipv6_en)
3329 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3330
3331 if (hash_type->hash_type_tcpipv6ex_en)
3332 data0 |=
3333 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3334
3335 if (hash_type->hash_type_ipv6ex_en)
3336 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3337
3338 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3339 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3340 else
3341 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3342
3343 status = __vxge_hw_vpath_rts_table_set(vp,
3344 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3345 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3346 0, data0, 0);
3347exit:
3348 return status;
3349}
3350
3351static void
3352vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3353 u16 flag, u8 *itable)
3354{
3355 switch (flag) {
3356 case 1:
3357 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3358 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3359 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3360 itable[j]);
3361 case 2:
3362 *data0 |=
3363 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3364 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3365 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3366 itable[j]);
3367 case 3:
3368 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3369 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3370 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3371 itable[j]);
3372 case 4:
3373 *data1 |=
3374 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3375 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3376 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3377 itable[j]);
3378 default:
3379 return;
3380 }
3381}
3382/*
3383 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3384 */
3385enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3386 struct __vxge_hw_vpath_handle **vpath_handles,
3387 u32 vpath_count,
3388 u8 *mtable,
3389 u8 *itable,
3390 u32 itable_size)
3391{
3392 u32 i, j, action, rts_table;
3393 u64 data0;
3394 u64 data1;
3395 u32 max_entries;
3396 enum vxge_hw_status status = VXGE_HW_OK;
3397 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3398
3399 if (vp == NULL) {
3400 status = VXGE_HW_ERR_INVALID_HANDLE;
3401 goto exit;
3402 }
3403
3404 max_entries = (((u32)1) << itable_size);
3405
3406 if (vp->vpath->hldev->config.rth_it_type
3407 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3408 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3409 rts_table =
3410 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3411
3412 for (j = 0; j < max_entries; j++) {
3413
3414 data1 = 0;
3415
3416 data0 =
3417 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3418 itable[j]);
3419
3420 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3421 action, rts_table, j, data0, data1);
3422
3423 if (status != VXGE_HW_OK)
3424 goto exit;
3425 }
3426
3427 for (j = 0; j < max_entries; j++) {
3428
3429 data1 = 0;
3430
3431 data0 =
3432 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3433 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3434 itable[j]);
3435
3436 status = __vxge_hw_vpath_rts_table_set(
3437 vpath_handles[mtable[itable[j]]], action,
3438 rts_table, j, data0, data1);
3439
3440 if (status != VXGE_HW_OK)
3441 goto exit;
3442 }
3443 } else {
3444 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3445 rts_table =
3446 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3447 for (i = 0; i < vpath_count; i++) {
3448
3449 for (j = 0; j < max_entries;) {
3450
3451 data0 = 0;
3452 data1 = 0;
3453
3454 while (j < max_entries) {
3455 if (mtable[itable[j]] != i) {
3456 j++;
3457 continue;
3458 }
3459 vxge_hw_rts_rth_data0_data1_get(j,
3460 &data0, &data1, 1, itable);
3461 j++;
3462 break;
3463 }
3464
3465 while (j < max_entries) {
3466 if (mtable[itable[j]] != i) {
3467 j++;
3468 continue;
3469 }
3470 vxge_hw_rts_rth_data0_data1_get(j,
3471 &data0, &data1, 2, itable);
3472 j++;
3473 break;
3474 }
3475
3476 while (j < max_entries) {
3477 if (mtable[itable[j]] != i) {
3478 j++;
3479 continue;
3480 }
3481 vxge_hw_rts_rth_data0_data1_get(j,
3482 &data0, &data1, 3, itable);
3483 j++;
3484 break;
3485 }
3486
3487 while (j < max_entries) {
3488 if (mtable[itable[j]] != i) {
3489 j++;
3490 continue;
3491 }
3492 vxge_hw_rts_rth_data0_data1_get(j,
3493 &data0, &data1, 4, itable);
3494 j++;
3495 break;
3496 }
3497
3498 if (data0 != 0) {
3499 status = __vxge_hw_vpath_rts_table_set(
3500 vpath_handles[i],
3501 action, rts_table,
3502 0, data0, data1);
3503
3504 if (status != VXGE_HW_OK)
3505 goto exit;
3506 }
3507 }
3508 }
3509 }
3510exit:
3511 return status;
3512}
3513
3514/**
3515 * vxge_hw_vpath_check_leak - Check for memory leak
3516 * @ringh: Handle to the ring object used for receive
3517 *
3518 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3519 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3520 * Returns: VXGE_HW_FAIL, if leak has occurred.
3521 *
3522 */
3523enum vxge_hw_status
3524vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3525{
3526 enum vxge_hw_status status = VXGE_HW_OK;
3527 u64 rxd_new_count, rxd_spat;
3528
3529 if (ring == NULL)
3530 return status;
3531
3532 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3533 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3534 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3535
3536 if (rxd_new_count >= rxd_spat)
3537 status = VXGE_HW_FAIL;
3538
3539 return status;
3540}
3541
3542/*
3543 * __vxge_hw_vpath_mgmt_read
3544 * This routine reads the vpath_mgmt registers
3545 */
3546static enum vxge_hw_status
3547__vxge_hw_vpath_mgmt_read(
3548 struct __vxge_hw_device *hldev,
3549 struct __vxge_hw_virtualpath *vpath)
3550{
3551 u32 i, mtu = 0, max_pyld = 0;
3552 u64 val64;
3553 enum vxge_hw_status status = VXGE_HW_OK;
3554
3555 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3556
3557 val64 = readq(&vpath->vpmgmt_reg->
3558 rxmac_cfg0_port_vpmgmt_clone[i]);
3559 max_pyld =
3560 (u32)
3561 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3562 (val64);
3563 if (mtu < max_pyld)
3564 mtu = max_pyld;
3565 }
3566
3567 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3568
3569 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3570
3571 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3572 if (val64 & vxge_mBIT(i))
3573 vpath->vsport_number = i;
3574 }
3575
3576 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3577
3578 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3579 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3580 else
3581 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3582
3583 return status;
3584}
3585
3586/*
3587 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3588 * This routine checks the vpath_rst_in_prog register to see if
3589 * adapter completed the reset process for the vpath
3590 */
stephen hemminger42821a52010-10-21 07:50:53 +00003591static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003592__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3593{
3594 enum vxge_hw_status status;
3595
3596 status = __vxge_hw_device_register_poll(
3597 &vpath->hldev->common_reg->vpath_rst_in_prog,
3598 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3599 1 << (16 - vpath->vp_id)),
3600 vpath->hldev->config.device_poll_millis);
3601
3602 return status;
3603}
3604
3605/*
3606 * __vxge_hw_vpath_reset
3607 * This routine resets the vpath on the device
3608 */
stephen hemminger42821a52010-10-21 07:50:53 +00003609static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003610__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3611{
3612 u64 val64;
3613 enum vxge_hw_status status = VXGE_HW_OK;
3614
3615 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3616
3617 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3618 &hldev->common_reg->cmn_rsthdlr_cfg0);
3619
3620 return status;
3621}
3622
3623/*
3624 * __vxge_hw_vpath_sw_reset
3625 * This routine resets the vpath structures
3626 */
stephen hemminger42821a52010-10-21 07:50:53 +00003627static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003628__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3629{
3630 enum vxge_hw_status status = VXGE_HW_OK;
3631 struct __vxge_hw_virtualpath *vpath;
3632
3633 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3634
3635 if (vpath->ringh) {
3636 status = __vxge_hw_ring_reset(vpath->ringh);
3637 if (status != VXGE_HW_OK)
3638 goto exit;
3639 }
3640
3641 if (vpath->fifoh)
3642 status = __vxge_hw_fifo_reset(vpath->fifoh);
3643exit:
3644 return status;
3645}
3646
3647/*
3648 * __vxge_hw_vpath_prc_configure
3649 * This routine configures the prc registers of virtual path using the config
3650 * passed
3651 */
stephen hemminger42821a52010-10-21 07:50:53 +00003652static void
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003653__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3654{
3655 u64 val64;
3656 struct __vxge_hw_virtualpath *vpath;
3657 struct vxge_hw_vp_config *vp_config;
3658 struct vxge_hw_vpath_reg __iomem *vp_reg;
3659
3660 vpath = &hldev->virtual_paths[vp_id];
3661 vp_reg = vpath->vp_reg;
3662 vp_config = vpath->vp_config;
3663
3664 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3665 return;
3666
3667 val64 = readq(&vp_reg->prc_cfg1);
3668 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3669 writeq(val64, &vp_reg->prc_cfg1);
3670
3671 val64 = readq(&vpath->vp_reg->prc_cfg6);
3672 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3673 writeq(val64, &vpath->vp_reg->prc_cfg6);
3674
3675 val64 = readq(&vp_reg->prc_cfg7);
3676
3677 if (vpath->vp_config->ring.scatter_mode !=
3678 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3679
3680 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3681
3682 switch (vpath->vp_config->ring.scatter_mode) {
3683 case VXGE_HW_RING_SCATTER_MODE_A:
3684 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3685 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3686 break;
3687 case VXGE_HW_RING_SCATTER_MODE_B:
3688 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3689 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3690 break;
3691 case VXGE_HW_RING_SCATTER_MODE_C:
3692 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3693 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3694 break;
3695 }
3696 }
3697
3698 writeq(val64, &vp_reg->prc_cfg7);
3699
3700 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3701 __vxge_hw_ring_first_block_address_get(
3702 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3703
3704 val64 = readq(&vp_reg->prc_cfg4);
3705 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3706 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3707
3708 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3709 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3710
3711 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3712 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3713 else
3714 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3715
3716 writeq(val64, &vp_reg->prc_cfg4);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003717}
3718
3719/*
3720 * __vxge_hw_vpath_kdfc_configure
3721 * This routine configures the kdfc registers of virtual path using the
3722 * config passed
3723 */
stephen hemminger42821a52010-10-21 07:50:53 +00003724static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003725__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3726{
3727 u64 val64;
3728 u64 vpath_stride;
3729 enum vxge_hw_status status = VXGE_HW_OK;
3730 struct __vxge_hw_virtualpath *vpath;
3731 struct vxge_hw_vpath_reg __iomem *vp_reg;
3732
3733 vpath = &hldev->virtual_paths[vp_id];
3734 vp_reg = vpath->vp_reg;
3735 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3736
3737 if (status != VXGE_HW_OK)
3738 goto exit;
3739
3740 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3741
3742 vpath->max_kdfc_db =
3743 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3744 val64+1)/2;
3745
3746 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3747
3748 vpath->max_nofl_db = vpath->max_kdfc_db;
3749
3750 if (vpath->max_nofl_db <
3751 ((vpath->vp_config->fifo.memblock_size /
3752 (vpath->vp_config->fifo.max_frags *
3753 sizeof(struct vxge_hw_fifo_txd))) *
3754 vpath->vp_config->fifo.fifo_blocks)) {
3755
3756 return VXGE_HW_BADCFG_FIFO_BLOCKS;
3757 }
3758 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3759 (vpath->max_nofl_db*2)-1);
3760 }
3761
3762 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3763
3764 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3765 &vp_reg->kdfc_fifo_trpl_ctrl);
3766
3767 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3768
3769 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3770 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3771
3772 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3773 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3774#ifndef __BIG_ENDIAN
3775 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3776#endif
3777 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3778
3779 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3780 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3781 wmb();
3782 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3783
3784 vpath->nofl_db =
3785 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3786 (hldev->kdfc + (vp_id *
3787 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3788 vpath_stride)));
3789exit:
3790 return status;
3791}
3792
3793/*
3794 * __vxge_hw_vpath_mac_configure
3795 * This routine configures the mac of virtual path using the config passed
3796 */
stephen hemminger42821a52010-10-21 07:50:53 +00003797static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003798__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3799{
3800 u64 val64;
3801 enum vxge_hw_status status = VXGE_HW_OK;
3802 struct __vxge_hw_virtualpath *vpath;
3803 struct vxge_hw_vp_config *vp_config;
3804 struct vxge_hw_vpath_reg __iomem *vp_reg;
3805
3806 vpath = &hldev->virtual_paths[vp_id];
3807 vp_reg = vpath->vp_reg;
3808 vp_config = vpath->vp_config;
3809
3810 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3811 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3812
3813 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3814
3815 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3816
3817 if (vp_config->rpa_strip_vlan_tag !=
3818 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3819 if (vp_config->rpa_strip_vlan_tag)
3820 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3821 else
3822 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3823 }
3824
3825 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3826 val64 = readq(&vp_reg->rxmac_vcfg0);
3827
3828 if (vp_config->mtu !=
3829 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3830 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3831 if ((vp_config->mtu +
3832 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3833 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3834 vp_config->mtu +
3835 VXGE_HW_MAC_HEADER_MAX_SIZE);
3836 else
3837 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3838 vpath->max_mtu);
3839 }
3840
3841 writeq(val64, &vp_reg->rxmac_vcfg0);
3842
3843 val64 = readq(&vp_reg->rxmac_vcfg1);
3844
3845 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3846 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3847
3848 if (hldev->config.rth_it_type ==
3849 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3850 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3851 0x2) |
3852 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3853 }
3854
3855 writeq(val64, &vp_reg->rxmac_vcfg1);
3856 }
3857 return status;
3858}
3859
3860/*
3861 * __vxge_hw_vpath_tim_configure
3862 * This routine configures the tim registers of virtual path using the config
3863 * passed
3864 */
stephen hemminger42821a52010-10-21 07:50:53 +00003865static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003866__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3867{
3868 u64 val64;
3869 enum vxge_hw_status status = VXGE_HW_OK;
3870 struct __vxge_hw_virtualpath *vpath;
3871 struct vxge_hw_vpath_reg __iomem *vp_reg;
3872 struct vxge_hw_vp_config *config;
3873
3874 vpath = &hldev->virtual_paths[vp_id];
3875 vp_reg = vpath->vp_reg;
3876 config = vpath->vp_config;
3877
3878 writeq((u64)0, &vp_reg->tim_dest_addr);
3879 writeq((u64)0, &vp_reg->tim_vpath_map);
3880 writeq((u64)0, &vp_reg->tim_bitmap);
3881 writeq((u64)0, &vp_reg->tim_remap);
3882
3883 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3884 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3885 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3886 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3887
3888 val64 = readq(&vp_reg->tim_pci_cfg);
3889 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3890 writeq(val64, &vp_reg->tim_pci_cfg);
3891
3892 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3893
3894 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3895
3896 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3897 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3898 0x3ffffff);
3899 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3900 config->tti.btimer_val);
3901 }
3902
3903 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3904
3905 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3906 if (config->tti.timer_ac_en)
3907 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3908 else
3909 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3910 }
3911
3912 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3913 if (config->tti.timer_ci_en)
3914 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3915 else
3916 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3917 }
3918
3919 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3920 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3921 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3922 config->tti.urange_a);
3923 }
3924
3925 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3926 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3927 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3928 config->tti.urange_b);
3929 }
3930
3931 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3932 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3933 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3934 config->tti.urange_c);
3935 }
3936
3937 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3938 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3939
3940 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3941 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3942 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3943 config->tti.uec_a);
3944 }
3945
3946 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3947 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3948 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3949 config->tti.uec_b);
3950 }
3951
3952 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3953 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3954 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3955 config->tti.uec_c);
3956 }
3957
3958 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3959 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3960 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3961 config->tti.uec_d);
3962 }
3963
3964 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3965 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3966
3967 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3968 if (config->tti.timer_ri_en)
3969 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3970 else
3971 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3972 }
3973
3974 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3975 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3976 0x3ffffff);
3977 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3978 config->tti.rtimer_val);
3979 }
3980
3981 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3982 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3983 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3984 config->tti.util_sel);
3985 }
3986
3987 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3988 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3989 0x3ffffff);
3990 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3991 config->tti.ltimer_val);
3992 }
3993
3994 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3995 }
3996
3997 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3998
3999 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4000
4001 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4002 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4003 0x3ffffff);
4004 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4005 config->rti.btimer_val);
4006 }
4007
4008 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4009
4010 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4011 if (config->rti.timer_ac_en)
4012 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4013 else
4014 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4015 }
4016
4017 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4018 if (config->rti.timer_ci_en)
4019 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4020 else
4021 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4022 }
4023
4024 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4025 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4026 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4027 config->rti.urange_a);
4028 }
4029
4030 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4031 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4032 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4033 config->rti.urange_b);
4034 }
4035
4036 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4037 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4038 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4039 config->rti.urange_c);
4040 }
4041
4042 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4043 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4044
4045 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4046 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4047 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4048 config->rti.uec_a);
4049 }
4050
4051 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4052 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4053 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4054 config->rti.uec_b);
4055 }
4056
4057 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4058 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4059 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4060 config->rti.uec_c);
4061 }
4062
4063 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4064 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4065 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4066 config->rti.uec_d);
4067 }
4068
4069 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4070 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4071
4072 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4073 if (config->rti.timer_ri_en)
4074 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4075 else
4076 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4077 }
4078
4079 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4080 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4081 0x3ffffff);
4082 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4083 config->rti.rtimer_val);
4084 }
4085
4086 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4087 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4088 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4089 config->rti.util_sel);
4090 }
4091
4092 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4093 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4094 0x3ffffff);
4095 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4096 config->rti.ltimer_val);
4097 }
4098
4099 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4100 }
4101
4102 val64 = 0;
4103 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4104 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4105 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4106 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4107 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4108 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4109
4110 return status;
4111}
4112
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00004113void
4114vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4115{
4116 struct __vxge_hw_virtualpath *vpath;
4117 struct vxge_hw_vpath_reg __iomem *vp_reg;
4118 struct vxge_hw_vp_config *config;
4119 u64 val64;
4120
4121 vpath = &hldev->virtual_paths[vp_id];
4122 vp_reg = vpath->vp_reg;
4123 config = vpath->vp_config;
4124
4125 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4126 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4127
4128 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4129 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4130 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4131 writeq(val64,
4132 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4133 }
4134 }
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00004135}
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004136/*
4137 * __vxge_hw_vpath_initialize
4138 * This routine is the final phase of init which initializes the
4139 * registers of the vpath using the configuration passed.
4140 */
stephen hemminger42821a52010-10-21 07:50:53 +00004141static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004142__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4143{
4144 u64 val64;
4145 u32 val32;
4146 enum vxge_hw_status status = VXGE_HW_OK;
4147 struct __vxge_hw_virtualpath *vpath;
4148 struct vxge_hw_vpath_reg __iomem *vp_reg;
4149
4150 vpath = &hldev->virtual_paths[vp_id];
4151
4152 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4153 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4154 goto exit;
4155 }
4156 vp_reg = vpath->vp_reg;
4157
4158 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4159
4160 if (status != VXGE_HW_OK)
4161 goto exit;
4162
4163 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4164
4165 if (status != VXGE_HW_OK)
4166 goto exit;
4167
4168 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4169
4170 if (status != VXGE_HW_OK)
4171 goto exit;
4172
4173 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4174
4175 if (status != VXGE_HW_OK)
4176 goto exit;
4177
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004178 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4179
4180 /* Get MRRS value from device control */
4181 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4182
4183 if (status == VXGE_HW_OK) {
4184 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4185 val64 &=
4186 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4187 val64 |=
4188 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4189
4190 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4191 }
4192
4193 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4194 val64 |=
4195 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4196 VXGE_HW_MAX_PAYLOAD_SIZE_512);
4197
4198 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4199 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4200
4201exit:
4202 return status;
4203}
4204
4205/*
4206 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4207 * This routine is the initial phase of init which resets the vpath and
4208 * initializes the software support structures.
4209 */
stephen hemminger42821a52010-10-21 07:50:53 +00004210static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004211__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4212 struct vxge_hw_vp_config *config)
4213{
4214 struct __vxge_hw_virtualpath *vpath;
4215 enum vxge_hw_status status = VXGE_HW_OK;
4216
4217 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4218 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4219 goto exit;
4220 }
4221
4222 vpath = &hldev->virtual_paths[vp_id];
4223
Jon Mason8424e002010-11-11 04:25:56 +00004224 spin_lock_init(&hldev->virtual_paths[vp_id].lock);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004225 vpath->vp_id = vp_id;
4226 vpath->vp_open = VXGE_HW_VP_OPEN;
4227 vpath->hldev = hldev;
4228 vpath->vp_config = config;
4229 vpath->vp_reg = hldev->vpath_reg[vp_id];
4230 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4231
4232 __vxge_hw_vpath_reset(hldev, vp_id);
4233
4234 status = __vxge_hw_vpath_reset_check(vpath);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004235 if (status != VXGE_HW_OK) {
4236 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4237 goto exit;
4238 }
4239
4240 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004241 if (status != VXGE_HW_OK) {
4242 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4243 goto exit;
4244 }
4245
4246 INIT_LIST_HEAD(&vpath->vpath_handles);
4247
4248 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4249
4250 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4251 hldev->tim_int_mask1, vp_id);
4252
4253 status = __vxge_hw_vpath_initialize(hldev, vp_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004254 if (status != VXGE_HW_OK)
4255 __vxge_hw_vp_terminate(hldev, vp_id);
4256exit:
4257 return status;
4258}
4259
4260/*
4261 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4262 * This routine closes all channels it opened and freeup memory
4263 */
stephen hemminger42821a52010-10-21 07:50:53 +00004264static void
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004265__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4266{
4267 struct __vxge_hw_virtualpath *vpath;
4268
4269 vpath = &hldev->virtual_paths[vp_id];
4270
4271 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4272 goto exit;
4273
4274 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4275 vpath->hldev->tim_int_mask1, vpath->vp_id);
4276 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4277
4278 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4279exit:
4280 return;
4281}
4282
4283/*
4284 * vxge_hw_vpath_mtu_set - Set MTU.
4285 * Set new MTU value. Example, to use jumbo frames:
4286 * vxge_hw_vpath_mtu_set(my_device, 9600);
4287 */
4288enum vxge_hw_status
4289vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4290{
4291 u64 val64;
4292 enum vxge_hw_status status = VXGE_HW_OK;
4293 struct __vxge_hw_virtualpath *vpath;
4294
4295 if (vp == NULL) {
4296 status = VXGE_HW_ERR_INVALID_HANDLE;
4297 goto exit;
4298 }
4299 vpath = vp->vpath;
4300
4301 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4302
4303 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4304 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4305
4306 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4307
4308 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4309 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4310
4311 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4312
4313 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4314
4315exit:
4316 return status;
4317}
4318
4319/*
4320 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4321 * This function is used to open access to virtual path of an
4322 * adapter for offload, GRO operations. This function returns
4323 * synchronously.
4324 */
4325enum vxge_hw_status
4326vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4327 struct vxge_hw_vpath_attr *attr,
4328 struct __vxge_hw_vpath_handle **vpath_handle)
4329{
4330 struct __vxge_hw_virtualpath *vpath;
4331 struct __vxge_hw_vpath_handle *vp;
4332 enum vxge_hw_status status;
4333
4334 vpath = &hldev->virtual_paths[attr->vp_id];
4335
4336 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4337 status = VXGE_HW_ERR_INVALID_STATE;
4338 goto vpath_open_exit1;
4339 }
4340
4341 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4342 &hldev->config.vp_config[attr->vp_id]);
4343
4344 if (status != VXGE_HW_OK)
4345 goto vpath_open_exit1;
4346
4347 vp = (struct __vxge_hw_vpath_handle *)
4348 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4349 if (vp == NULL) {
4350 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4351 goto vpath_open_exit2;
4352 }
4353
4354 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4355
4356 vp->vpath = vpath;
4357
4358 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4359 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4360 if (status != VXGE_HW_OK)
4361 goto vpath_open_exit6;
4362 }
4363
4364 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4365 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4366 if (status != VXGE_HW_OK)
4367 goto vpath_open_exit7;
4368
4369 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4370 }
4371
4372 vpath->fifoh->tx_intr_num =
4373 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4374 VXGE_HW_VPATH_INTR_TX;
4375
4376 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4377 VXGE_HW_BLOCK_SIZE);
4378
4379 if (vpath->stats_block == NULL) {
4380 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4381 goto vpath_open_exit8;
4382 }
4383
4384 vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4385 stats_block->memblock;
4386 memset(vpath->hw_stats, 0,
4387 sizeof(struct vxge_hw_vpath_stats_hw_info));
4388
4389 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4390 vpath->hw_stats;
4391
4392 vpath->hw_stats_sav =
4393 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4394 memset(vpath->hw_stats_sav, 0,
4395 sizeof(struct vxge_hw_vpath_stats_hw_info));
4396
4397 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4398
4399 status = vxge_hw_vpath_stats_enable(vp);
4400 if (status != VXGE_HW_OK)
4401 goto vpath_open_exit8;
4402
4403 list_add(&vp->item, &vpath->vpath_handles);
4404
4405 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4406
4407 *vpath_handle = vp;
4408
4409 attr->fifo_attr.userdata = vpath->fifoh;
4410 attr->ring_attr.userdata = vpath->ringh;
4411
4412 return VXGE_HW_OK;
4413
4414vpath_open_exit8:
4415 if (vpath->ringh != NULL)
4416 __vxge_hw_ring_delete(vp);
4417vpath_open_exit7:
4418 if (vpath->fifoh != NULL)
4419 __vxge_hw_fifo_delete(vp);
4420vpath_open_exit6:
4421 vfree(vp);
4422vpath_open_exit2:
4423 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4424vpath_open_exit1:
4425
4426 return status;
4427}
4428
4429/**
4430 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4431 * (vpath) open
4432 * @vp: Handle got from previous vpath open
4433 *
4434 * This function is used to close access to virtual path opened
4435 * earlier.
4436 */
4437void
4438vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4439{
4440 struct __vxge_hw_virtualpath *vpath = NULL;
4441 u64 new_count, val64, val164;
4442 struct __vxge_hw_ring *ring;
4443
4444 vpath = vp->vpath;
4445 ring = vpath->ringh;
4446
4447 new_count = readq(&vpath->vp_reg->rxdmem_size);
4448 new_count &= 0x1fff;
4449 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4450
4451 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4452 &vpath->vp_reg->prc_rxd_doorbell);
4453 readl(&vpath->vp_reg->prc_rxd_doorbell);
4454
4455 val164 /= 2;
4456 val64 = readq(&vpath->vp_reg->prc_cfg6);
4457 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4458 val64 &= 0x1ff;
4459
4460 /*
4461 * Each RxD is of 4 qwords
4462 */
4463 new_count -= (val64 + 1);
4464 val64 = min(val164, new_count) / 4;
4465
4466 ring->rxds_limit = min(ring->rxds_limit, val64);
4467 if (ring->rxds_limit < 4)
4468 ring->rxds_limit = 4;
4469}
4470
4471/*
4472 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4473 * This function is used to close access to virtual path opened
4474 * earlier.
4475 */
4476enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4477{
4478 struct __vxge_hw_virtualpath *vpath = NULL;
4479 struct __vxge_hw_device *devh = NULL;
4480 u32 vp_id = vp->vpath->vp_id;
4481 u32 is_empty = TRUE;
4482 enum vxge_hw_status status = VXGE_HW_OK;
4483
4484 vpath = vp->vpath;
4485 devh = vpath->hldev;
4486
4487 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4488 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4489 goto vpath_close_exit;
4490 }
4491
4492 list_del(&vp->item);
4493
4494 if (!list_empty(&vpath->vpath_handles)) {
4495 list_add(&vp->item, &vpath->vpath_handles);
4496 is_empty = FALSE;
4497 }
4498
4499 if (!is_empty) {
4500 status = VXGE_HW_FAIL;
4501 goto vpath_close_exit;
4502 }
4503
4504 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4505
4506 if (vpath->ringh != NULL)
4507 __vxge_hw_ring_delete(vp);
4508
4509 if (vpath->fifoh != NULL)
4510 __vxge_hw_fifo_delete(vp);
4511
4512 if (vpath->stats_block != NULL)
4513 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4514
4515 vfree(vp);
4516
4517 __vxge_hw_vp_terminate(devh, vp_id);
4518
Jon Mason8424e002010-11-11 04:25:56 +00004519 spin_lock(&vpath->lock);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004520 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
Jon Mason8424e002010-11-11 04:25:56 +00004521 spin_unlock(&vpath->lock);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004522
4523vpath_close_exit:
4524 return status;
4525}
4526
4527/*
4528 * vxge_hw_vpath_reset - Resets vpath
4529 * This function is used to request a reset of vpath
4530 */
4531enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4532{
4533 enum vxge_hw_status status;
4534 u32 vp_id;
4535 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4536
4537 vp_id = vpath->vp_id;
4538
4539 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4540 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4541 goto exit;
4542 }
4543
4544 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4545 if (status == VXGE_HW_OK)
4546 vpath->sw_stats->soft_reset_cnt++;
4547exit:
4548 return status;
4549}
4550
4551/*
4552 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4553 * This function poll's for the vpath reset completion and re initializes
4554 * the vpath.
4555 */
4556enum vxge_hw_status
4557vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4558{
4559 struct __vxge_hw_virtualpath *vpath = NULL;
4560 enum vxge_hw_status status;
4561 struct __vxge_hw_device *hldev;
4562 u32 vp_id;
4563
4564 vp_id = vp->vpath->vp_id;
4565 vpath = vp->vpath;
4566 hldev = vpath->hldev;
4567
4568 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4569 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4570 goto exit;
4571 }
4572
4573 status = __vxge_hw_vpath_reset_check(vpath);
4574 if (status != VXGE_HW_OK)
4575 goto exit;
4576
4577 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4578 if (status != VXGE_HW_OK)
4579 goto exit;
4580
4581 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4582 if (status != VXGE_HW_OK)
4583 goto exit;
4584
4585 if (vpath->ringh != NULL)
4586 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4587
4588 memset(vpath->hw_stats, 0,
4589 sizeof(struct vxge_hw_vpath_stats_hw_info));
4590
4591 memset(vpath->hw_stats_sav, 0,
4592 sizeof(struct vxge_hw_vpath_stats_hw_info));
4593
4594 writeq(vpath->stats_block->dma_addr,
4595 &vpath->vp_reg->stats_cfg);
4596
4597 status = vxge_hw_vpath_stats_enable(vp);
4598
4599exit:
4600 return status;
4601}
4602
4603/*
4604 * vxge_hw_vpath_enable - Enable vpath.
4605 * This routine clears the vpath reset thereby enabling a vpath
4606 * to start forwarding frames and generating interrupts.
4607 */
4608void
4609vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4610{
4611 struct __vxge_hw_device *hldev;
4612 u64 val64;
4613
4614 hldev = vp->vpath->hldev;
4615
4616 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4617 1 << (16 - vp->vpath->vp_id));
4618
4619 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4620 &hldev->common_reg->cmn_rsthdlr_cfg1);
4621}
4622
4623/*
4624 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4625 * Enable the DMA vpath statistics. The function is to be called to re-enable
4626 * the adapter to update stats into the host memory
4627 */
stephen hemminger42821a52010-10-21 07:50:53 +00004628static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004629vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4630{
4631 enum vxge_hw_status status = VXGE_HW_OK;
4632 struct __vxge_hw_virtualpath *vpath;
4633
4634 vpath = vp->vpath;
4635
4636 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4637 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4638 goto exit;
4639 }
4640
4641 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4642 sizeof(struct vxge_hw_vpath_stats_hw_info));
4643
4644 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4645exit:
4646 return status;
4647}
4648
4649/*
4650 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4651 * and offset and perform an operation
4652 */
stephen hemminger42821a52010-10-21 07:50:53 +00004653static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004654__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4655 u32 operation, u32 offset, u64 *stat)
4656{
4657 u64 val64;
4658 enum vxge_hw_status status = VXGE_HW_OK;
4659 struct vxge_hw_vpath_reg __iomem *vp_reg;
4660
4661 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4662 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4663 goto vpath_stats_access_exit;
4664 }
4665
4666 vp_reg = vpath->vp_reg;
4667
4668 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4669 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4670 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4671
4672 status = __vxge_hw_pio_mem_write64(val64,
4673 &vp_reg->xmac_stats_access_cmd,
4674 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4675 vpath->hldev->config.device_poll_millis);
4676
4677 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4678 *stat = readq(&vp_reg->xmac_stats_access_data);
4679 else
4680 *stat = 0;
4681
4682vpath_stats_access_exit:
4683 return status;
4684}
4685
4686/*
4687 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4688 */
stephen hemminger42821a52010-10-21 07:50:53 +00004689static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004690__vxge_hw_vpath_xmac_tx_stats_get(
4691 struct __vxge_hw_virtualpath *vpath,
4692 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4693{
4694 u64 *val64;
4695 int i;
4696 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4697 enum vxge_hw_status status = VXGE_HW_OK;
4698
4699 val64 = (u64 *) vpath_tx_stats;
4700
4701 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4702 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4703 goto exit;
4704 }
4705
4706 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4707 status = __vxge_hw_vpath_stats_access(vpath,
4708 VXGE_HW_STATS_OP_READ,
4709 offset, val64);
4710 if (status != VXGE_HW_OK)
4711 goto exit;
4712 offset++;
4713 val64++;
4714 }
4715exit:
4716 return status;
4717}
4718
4719/*
4720 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4721 */
stephen hemminger42821a52010-10-21 07:50:53 +00004722static enum vxge_hw_status
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004723__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
stephen hemminger42821a52010-10-21 07:50:53 +00004724 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004725{
4726 u64 *val64;
4727 enum vxge_hw_status status = VXGE_HW_OK;
4728 int i;
4729 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4730 val64 = (u64 *) vpath_rx_stats;
4731
4732 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4733 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4734 goto exit;
4735 }
4736 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4737 status = __vxge_hw_vpath_stats_access(vpath,
4738 VXGE_HW_STATS_OP_READ,
4739 offset >> 3, val64);
4740 if (status != VXGE_HW_OK)
4741 goto exit;
4742
4743 offset += 8;
4744 val64++;
4745 }
4746exit:
4747 return status;
4748}
4749
4750/*
4751 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4752 */
stephen hemminger42821a52010-10-21 07:50:53 +00004753static enum vxge_hw_status
4754__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4755 struct vxge_hw_vpath_stats_hw_info *hw_stats)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004756{
4757 u64 val64;
4758 enum vxge_hw_status status = VXGE_HW_OK;
4759 struct vxge_hw_vpath_reg __iomem *vp_reg;
4760
4761 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4762 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4763 goto exit;
4764 }
4765 vp_reg = vpath->vp_reg;
4766
4767 val64 = readq(&vp_reg->vpath_debug_stats0);
4768 hw_stats->ini_num_mwr_sent =
4769 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4770
4771 val64 = readq(&vp_reg->vpath_debug_stats1);
4772 hw_stats->ini_num_mrd_sent =
4773 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4774
4775 val64 = readq(&vp_reg->vpath_debug_stats2);
4776 hw_stats->ini_num_cpl_rcvd =
4777 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4778
4779 val64 = readq(&vp_reg->vpath_debug_stats3);
4780 hw_stats->ini_num_mwr_byte_sent =
4781 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4782
4783 val64 = readq(&vp_reg->vpath_debug_stats4);
4784 hw_stats->ini_num_cpl_byte_rcvd =
4785 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4786
4787 val64 = readq(&vp_reg->vpath_debug_stats5);
4788 hw_stats->wrcrdtarb_xoff =
4789 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4790
4791 val64 = readq(&vp_reg->vpath_debug_stats6);
4792 hw_stats->rdcrdtarb_xoff =
4793 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4794
4795 val64 = readq(&vp_reg->vpath_genstats_count01);
4796 hw_stats->vpath_genstats_count0 =
4797 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4798 val64);
4799
4800 val64 = readq(&vp_reg->vpath_genstats_count01);
4801 hw_stats->vpath_genstats_count1 =
4802 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4803 val64);
4804
4805 val64 = readq(&vp_reg->vpath_genstats_count23);
4806 hw_stats->vpath_genstats_count2 =
4807 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4808 val64);
4809
4810 val64 = readq(&vp_reg->vpath_genstats_count01);
4811 hw_stats->vpath_genstats_count3 =
4812 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4813 val64);
4814
4815 val64 = readq(&vp_reg->vpath_genstats_count4);
4816 hw_stats->vpath_genstats_count4 =
4817 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4818 val64);
4819
4820 val64 = readq(&vp_reg->vpath_genstats_count5);
4821 hw_stats->vpath_genstats_count5 =
4822 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4823 val64);
4824
4825 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4826 if (status != VXGE_HW_OK)
4827 goto exit;
4828
4829 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4830 if (status != VXGE_HW_OK)
4831 goto exit;
4832
4833 VXGE_HW_VPATH_STATS_PIO_READ(
4834 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4835
4836 hw_stats->prog_event_vnum0 =
4837 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4838
4839 hw_stats->prog_event_vnum1 =
4840 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4841
4842 VXGE_HW_VPATH_STATS_PIO_READ(
4843 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4844
4845 hw_stats->prog_event_vnum2 =
4846 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4847
4848 hw_stats->prog_event_vnum3 =
4849 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4850
4851 val64 = readq(&vp_reg->rx_multi_cast_stats);
4852 hw_stats->rx_multi_cast_frame_discard =
4853 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4854
4855 val64 = readq(&vp_reg->rx_frm_transferred);
4856 hw_stats->rx_frm_transferred =
4857 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4858
4859 val64 = readq(&vp_reg->rxd_returned);
4860 hw_stats->rxd_returned =
4861 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4862
4863 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4864 hw_stats->rx_mpa_len_fail_frms =
4865 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4866 hw_stats->rx_mpa_mrk_fail_frms =
4867 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4868 hw_stats->rx_mpa_crc_fail_frms =
4869 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4870
4871 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4872 hw_stats->rx_permitted_frms =
4873 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4874 hw_stats->rx_vp_reset_discarded_frms =
4875 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4876 hw_stats->rx_wol_frms =
4877 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4878
4879 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4880 hw_stats->tx_vp_reset_discarded_frms =
4881 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4882 val64);
4883exit:
4884 return status;
4885}
4886
stephen hemminger42821a52010-10-21 07:50:53 +00004887
4888static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4889 unsigned long size)
4890{
4891 gfp_t flags;
4892 void *vaddr;
4893
4894 if (in_interrupt())
4895 flags = GFP_ATOMIC | GFP_DMA;
4896 else
4897 flags = GFP_KERNEL | GFP_DMA;
4898
4899 vaddr = kmalloc((size), flags);
4900
4901 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4902}
4903
4904static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4905 struct pci_dev **p_dma_acch)
4906{
4907 unsigned long misaligned = *(unsigned long *)p_dma_acch;
4908 u8 *tmp = (u8 *)vaddr;
4909 tmp -= misaligned;
4910 kfree((void *)tmp);
4911}
4912
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00004913/*
4914 * __vxge_hw_blockpool_create - Create block pool
4915 */
4916
4917enum vxge_hw_status
4918__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4919 struct __vxge_hw_blockpool *blockpool,
4920 u32 pool_size,
4921 u32 pool_max)
4922{
4923 u32 i;
4924 struct __vxge_hw_blockpool_entry *entry = NULL;
4925 void *memblock;
4926 dma_addr_t dma_addr;
4927 struct pci_dev *dma_handle;
4928 struct pci_dev *acc_handle;
4929 enum vxge_hw_status status = VXGE_HW_OK;
4930
4931 if (blockpool == NULL) {
4932 status = VXGE_HW_FAIL;
4933 goto blockpool_create_exit;
4934 }
4935
4936 blockpool->hldev = hldev;
4937 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4938 blockpool->pool_size = 0;
4939 blockpool->pool_max = pool_max;
4940 blockpool->req_out = 0;
4941
4942 INIT_LIST_HEAD(&blockpool->free_block_list);
4943 INIT_LIST_HEAD(&blockpool->free_entry_list);
4944
4945 for (i = 0; i < pool_size + pool_max; i++) {
4946 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4947 GFP_KERNEL);
4948 if (entry == NULL) {
4949 __vxge_hw_blockpool_destroy(blockpool);
4950 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4951 goto blockpool_create_exit;
4952 }
4953 list_add(&entry->item, &blockpool->free_entry_list);
4954 }
4955
4956 for (i = 0; i < pool_size; i++) {
4957
4958 memblock = vxge_os_dma_malloc(
4959 hldev->pdev,
4960 VXGE_HW_BLOCK_SIZE,
4961 &dma_handle,
4962 &acc_handle);
4963
4964 if (memblock == NULL) {
4965 __vxge_hw_blockpool_destroy(blockpool);
4966 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4967 goto blockpool_create_exit;
4968 }
4969
4970 dma_addr = pci_map_single(hldev->pdev, memblock,
4971 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4972
4973 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4974 dma_addr))) {
4975
4976 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4977 __vxge_hw_blockpool_destroy(blockpool);
4978 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4979 goto blockpool_create_exit;
4980 }
4981
4982 if (!list_empty(&blockpool->free_entry_list))
4983 entry = (struct __vxge_hw_blockpool_entry *)
4984 list_first_entry(&blockpool->free_entry_list,
4985 struct __vxge_hw_blockpool_entry,
4986 item);
4987
4988 if (entry == NULL)
4989 entry =
4990 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4991 GFP_KERNEL);
4992 if (entry != NULL) {
4993 list_del(&entry->item);
4994 entry->length = VXGE_HW_BLOCK_SIZE;
4995 entry->memblock = memblock;
4996 entry->dma_addr = dma_addr;
4997 entry->acc_handle = acc_handle;
4998 entry->dma_handle = dma_handle;
4999 list_add(&entry->item,
5000 &blockpool->free_block_list);
5001 blockpool->pool_size++;
5002 } else {
5003 __vxge_hw_blockpool_destroy(blockpool);
5004 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5005 goto blockpool_create_exit;
5006 }
5007 }
5008
5009blockpool_create_exit:
5010 return status;
5011}
5012
5013/*
5014 * __vxge_hw_blockpool_destroy - Deallocates the block pool
5015 */
5016
5017void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
5018{
5019
5020 struct __vxge_hw_device *hldev;
5021 struct list_head *p, *n;
5022 u16 ret;
5023
5024 if (blockpool == NULL) {
5025 ret = 1;
5026 goto exit;
5027 }
5028
5029 hldev = blockpool->hldev;
5030
5031 list_for_each_safe(p, n, &blockpool->free_block_list) {
5032
5033 pci_unmap_single(hldev->pdev,
5034 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5035 ((struct __vxge_hw_blockpool_entry *)p)->length,
5036 PCI_DMA_BIDIRECTIONAL);
5037
5038 vxge_os_dma_free(hldev->pdev,
5039 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5040 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
5041
5042 list_del(
5043 &((struct __vxge_hw_blockpool_entry *)p)->item);
5044 kfree(p);
5045 blockpool->pool_size--;
5046 }
5047
5048 list_for_each_safe(p, n, &blockpool->free_entry_list) {
5049 list_del(
5050 &((struct __vxge_hw_blockpool_entry *)p)->item);
5051 kfree((void *)p);
5052 }
5053 ret = 0;
5054exit:
5055 return;
5056}
5057
5058/*
5059 * __vxge_hw_blockpool_blocks_add - Request additional blocks
5060 */
5061static
5062void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
5063{
5064 u32 nreq = 0, i;
5065
5066 if ((blockpool->pool_size + blockpool->req_out) <
5067 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
5068 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
5069 blockpool->req_out += nreq;
5070 }
5071
5072 for (i = 0; i < nreq; i++)
5073 vxge_os_dma_malloc_async(
5074 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5075 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
5076}
5077
5078/*
5079 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
5080 */
5081static
5082void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
5083{
5084 struct list_head *p, *n;
5085
5086 list_for_each_safe(p, n, &blockpool->free_block_list) {
5087
5088 if (blockpool->pool_size < blockpool->pool_max)
5089 break;
5090
5091 pci_unmap_single(
5092 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5093 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5094 ((struct __vxge_hw_blockpool_entry *)p)->length,
5095 PCI_DMA_BIDIRECTIONAL);
5096
5097 vxge_os_dma_free(
5098 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5099 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5100 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
5101
5102 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5103
5104 list_add(p, &blockpool->free_entry_list);
5105
5106 blockpool->pool_size--;
5107
5108 }
5109}
5110
5111/*
5112 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5113 * Adds a block to block pool
5114 */
stephen hemminger42821a52010-10-21 07:50:53 +00005115static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5116 void *block_addr,
5117 u32 length,
5118 struct pci_dev *dma_h,
5119 struct pci_dev *acc_handle)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00005120{
5121 struct __vxge_hw_blockpool *blockpool;
5122 struct __vxge_hw_blockpool_entry *entry = NULL;
5123 dma_addr_t dma_addr;
5124 enum vxge_hw_status status = VXGE_HW_OK;
5125 u32 req_out;
5126
5127 blockpool = &devh->block_pool;
5128
5129 if (block_addr == NULL) {
5130 blockpool->req_out--;
5131 status = VXGE_HW_FAIL;
5132 goto exit;
5133 }
5134
5135 dma_addr = pci_map_single(devh->pdev, block_addr, length,
5136 PCI_DMA_BIDIRECTIONAL);
5137
5138 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5139
5140 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5141 blockpool->req_out--;
5142 status = VXGE_HW_FAIL;
5143 goto exit;
5144 }
5145
5146
5147 if (!list_empty(&blockpool->free_entry_list))
5148 entry = (struct __vxge_hw_blockpool_entry *)
5149 list_first_entry(&blockpool->free_entry_list,
5150 struct __vxge_hw_blockpool_entry,
5151 item);
5152
5153 if (entry == NULL)
5154 entry = (struct __vxge_hw_blockpool_entry *)
5155 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5156 else
5157 list_del(&entry->item);
5158
5159 if (entry != NULL) {
5160 entry->length = length;
5161 entry->memblock = block_addr;
5162 entry->dma_addr = dma_addr;
5163 entry->acc_handle = acc_handle;
5164 entry->dma_handle = dma_h;
5165 list_add(&entry->item, &blockpool->free_block_list);
5166 blockpool->pool_size++;
5167 status = VXGE_HW_OK;
5168 } else
5169 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5170
5171 blockpool->req_out--;
5172
5173 req_out = blockpool->req_out;
5174exit:
5175 return;
5176}
5177
5178/*
5179 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5180 * Allocates a block of memory of given size, either from block pool
5181 * or by calling vxge_os_dma_malloc()
5182 */
5183void *
5184__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5185 struct vxge_hw_mempool_dma *dma_object)
5186{
5187 struct __vxge_hw_blockpool_entry *entry = NULL;
5188 struct __vxge_hw_blockpool *blockpool;
5189 void *memblock = NULL;
5190 enum vxge_hw_status status = VXGE_HW_OK;
5191
5192 blockpool = &devh->block_pool;
5193
5194 if (size != blockpool->block_size) {
5195
5196 memblock = vxge_os_dma_malloc(devh->pdev, size,
5197 &dma_object->handle,
5198 &dma_object->acc_handle);
5199
5200 if (memblock == NULL) {
5201 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5202 goto exit;
5203 }
5204
5205 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5206 PCI_DMA_BIDIRECTIONAL);
5207
5208 if (unlikely(pci_dma_mapping_error(devh->pdev,
5209 dma_object->addr))) {
5210 vxge_os_dma_free(devh->pdev, memblock,
5211 &dma_object->acc_handle);
5212 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5213 goto exit;
5214 }
5215
5216 } else {
5217
5218 if (!list_empty(&blockpool->free_block_list))
5219 entry = (struct __vxge_hw_blockpool_entry *)
5220 list_first_entry(&blockpool->free_block_list,
5221 struct __vxge_hw_blockpool_entry,
5222 item);
5223
5224 if (entry != NULL) {
5225 list_del(&entry->item);
5226 dma_object->addr = entry->dma_addr;
5227 dma_object->handle = entry->dma_handle;
5228 dma_object->acc_handle = entry->acc_handle;
5229 memblock = entry->memblock;
5230
5231 list_add(&entry->item,
5232 &blockpool->free_entry_list);
5233 blockpool->pool_size--;
5234 }
5235
5236 if (memblock != NULL)
5237 __vxge_hw_blockpool_blocks_add(blockpool);
5238 }
5239exit:
5240 return memblock;
5241}
5242
5243/*
5244 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5245 __vxge_hw_blockpool_malloc
5246 */
5247void
5248__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5249 void *memblock, u32 size,
5250 struct vxge_hw_mempool_dma *dma_object)
5251{
5252 struct __vxge_hw_blockpool_entry *entry = NULL;
5253 struct __vxge_hw_blockpool *blockpool;
5254 enum vxge_hw_status status = VXGE_HW_OK;
5255
5256 blockpool = &devh->block_pool;
5257
5258 if (size != blockpool->block_size) {
5259 pci_unmap_single(devh->pdev, dma_object->addr, size,
5260 PCI_DMA_BIDIRECTIONAL);
5261 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5262 } else {
5263
5264 if (!list_empty(&blockpool->free_entry_list))
5265 entry = (struct __vxge_hw_blockpool_entry *)
5266 list_first_entry(&blockpool->free_entry_list,
5267 struct __vxge_hw_blockpool_entry,
5268 item);
5269
5270 if (entry == NULL)
5271 entry = (struct __vxge_hw_blockpool_entry *)
5272 vmalloc(sizeof(
5273 struct __vxge_hw_blockpool_entry));
5274 else
5275 list_del(&entry->item);
5276
5277 if (entry != NULL) {
5278 entry->length = size;
5279 entry->memblock = memblock;
5280 entry->dma_addr = dma_object->addr;
5281 entry->acc_handle = dma_object->acc_handle;
5282 entry->dma_handle = dma_object->handle;
5283 list_add(&entry->item,
5284 &blockpool->free_block_list);
5285 blockpool->pool_size++;
5286 status = VXGE_HW_OK;
5287 } else
5288 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5289
5290 if (status == VXGE_HW_OK)
5291 __vxge_hw_blockpool_blocks_remove(blockpool);
5292 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00005293}
5294
5295/*
5296 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5297 * This function allocates a block from block pool or from the system
5298 */
5299struct __vxge_hw_blockpool_entry *
5300__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5301{
5302 struct __vxge_hw_blockpool_entry *entry = NULL;
5303 struct __vxge_hw_blockpool *blockpool;
5304
5305 blockpool = &devh->block_pool;
5306
5307 if (size == blockpool->block_size) {
5308
5309 if (!list_empty(&blockpool->free_block_list))
5310 entry = (struct __vxge_hw_blockpool_entry *)
5311 list_first_entry(&blockpool->free_block_list,
5312 struct __vxge_hw_blockpool_entry,
5313 item);
5314
5315 if (entry != NULL) {
5316 list_del(&entry->item);
5317 blockpool->pool_size--;
5318 }
5319 }
5320
5321 if (entry != NULL)
5322 __vxge_hw_blockpool_blocks_add(blockpool);
5323
5324 return entry;
5325}
5326
5327/*
5328 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5329 * @devh: Hal device
5330 * @entry: Entry of block to be freed
5331 *
5332 * This function frees a block from block pool
5333 */
5334void
5335__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5336 struct __vxge_hw_blockpool_entry *entry)
5337{
5338 struct __vxge_hw_blockpool *blockpool;
5339
5340 blockpool = &devh->block_pool;
5341
5342 if (entry->length == blockpool->block_size) {
5343 list_add(&entry->item, &blockpool->free_block_list);
5344 blockpool->pool_size++;
5345 }
5346
5347 __vxge_hw_blockpool_blocks_remove(blockpool);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00005348}