blob: 4e188f4289b4b2787f003644e055de8923b69583 [file] [log] [blame]
Li Yangce973b12006-08-14 23:00:11 -07001/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * QE UCC Gigabit Ethernet Driver
8 *
9 * Changelog:
10 * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/dma-mapping.h>
32#include <linux/fsl_devices.h>
33#include <linux/ethtool.h>
34#include <linux/platform_device.h>
35#include <linux/mii.h>
36
37#include <asm/uaccess.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/immap_qe.h>
41#include <asm/qe.h>
42#include <asm/ucc.h>
43#include <asm/ucc_fast.h>
44
45#include "ucc_geth.h"
46#include "ucc_geth_phy.h"
47
48#undef DEBUG
49
Andy Gospodarekd5b20692006-09-11 17:39:18 -040050#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:Sept 11, 2006"
Li Yangce973b12006-08-14 23:00:11 -070051#define DRV_NAME "ucc_geth"
52
53#define ugeth_printk(level, format, arg...) \
54 printk(level format "\n", ## arg)
55
56#define ugeth_dbg(format, arg...) \
57 ugeth_printk(KERN_DEBUG , format , ## arg)
58#define ugeth_err(format, arg...) \
59 ugeth_printk(KERN_ERR , format , ## arg)
60#define ugeth_info(format, arg...) \
61 ugeth_printk(KERN_INFO , format , ## arg)
62#define ugeth_warn(format, arg...) \
63 ugeth_printk(KERN_WARNING , format , ## arg)
64
65#ifdef UGETH_VERBOSE_DEBUG
66#define ugeth_vdbg ugeth_dbg
67#else
68#define ugeth_vdbg(fmt, args...) do { } while (0)
69#endif /* UGETH_VERBOSE_DEBUG */
70
71static DEFINE_SPINLOCK(ugeth_lock);
72
73static ucc_geth_info_t ugeth_primary_info = {
74 .uf_info = {
75 .bd_mem_part = MEM_PART_SYSTEM,
76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
77 .max_rx_buf_length = 1536,
78/* FIXME: should be changed in run time for 1G and 100M */
79#ifdef CONFIG_UGETH_HAS_GIGA
80 .urfs = UCC_GETH_URFS_GIGA_INIT,
81 .urfet = UCC_GETH_URFET_GIGA_INIT,
82 .urfset = UCC_GETH_URFSET_GIGA_INIT,
83 .utfs = UCC_GETH_UTFS_GIGA_INIT,
84 .utfet = UCC_GETH_UTFET_GIGA_INIT,
85 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
86#else
87 .urfs = UCC_GETH_URFS_INIT,
88 .urfet = UCC_GETH_URFET_INIT,
89 .urfset = UCC_GETH_URFSET_INIT,
90 .utfs = UCC_GETH_UTFS_INIT,
91 .utfet = UCC_GETH_UTFET_INIT,
92 .utftt = UCC_GETH_UTFTT_INIT,
93#endif
94 .ufpt = 256,
95 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
96 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
97 .tenc = UCC_FAST_TX_ENCODING_NRZ,
98 .renc = UCC_FAST_RX_ENCODING_NRZ,
99 .tcrc = UCC_FAST_16_BIT_CRC,
100 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
101 },
102 .numQueuesTx = 1,
103 .numQueuesRx = 1,
104 .extendedFilteringChainPointer = ((uint32_t) NULL),
105 .typeorlen = 3072 /*1536 */ ,
106 .nonBackToBackIfgPart1 = 0x40,
107 .nonBackToBackIfgPart2 = 0x60,
108 .miminumInterFrameGapEnforcement = 0x50,
109 .backToBackInterFrameGap = 0x60,
110 .mblinterval = 128,
111 .nortsrbytetime = 5,
112 .fracsiz = 1,
113 .strictpriorityq = 0xff,
114 .altBebTruncation = 0xa,
115 .excessDefer = 1,
116 .maxRetransmission = 0xf,
117 .collisionWindow = 0x37,
118 .receiveFlowControl = 1,
119 .maxGroupAddrInHash = 4,
120 .maxIndAddrInHash = 4,
121 .prel = 7,
122 .maxFrameLength = 1518,
123 .minFrameLength = 64,
124 .maxD1Length = 1520,
125 .maxD2Length = 1520,
126 .vlantype = 0x8100,
127 .ecamptr = ((uint32_t) NULL),
128 .eventRegMask = UCCE_OTHER,
129 .pausePeriod = 0xf000,
130 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
131 .bdRingLenTx = {
132 TX_BD_RING_LEN,
133 TX_BD_RING_LEN,
134 TX_BD_RING_LEN,
135 TX_BD_RING_LEN,
136 TX_BD_RING_LEN,
137 TX_BD_RING_LEN,
138 TX_BD_RING_LEN,
139 TX_BD_RING_LEN},
140
141 .bdRingLenRx = {
142 RX_BD_RING_LEN,
143 RX_BD_RING_LEN,
144 RX_BD_RING_LEN,
145 RX_BD_RING_LEN,
146 RX_BD_RING_LEN,
147 RX_BD_RING_LEN,
148 RX_BD_RING_LEN,
149 RX_BD_RING_LEN},
150
151 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
152 .largestexternallookupkeysize =
153 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
154 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
155 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
156 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
157 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
158 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
159 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
160 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
161 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
162 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164};
165
166static ucc_geth_info_t ugeth_info[8];
167
168#ifdef DEBUG
169static void mem_disp(u8 *addr, int size)
170{
171 u8 *i;
172 int size16Aling = (size >> 4) << 4;
173 int size4Aling = (size >> 2) << 2;
174 int notAlign = 0;
175 if (size % 16)
176 notAlign = 1;
177
178 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
179 printk("0x%08x: %08x %08x %08x %08x\r\n",
180 (u32) i,
181 *((u32 *) (i)),
182 *((u32 *) (i + 4)),
183 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
184 if (notAlign == 1)
185 printk("0x%08x: ", (u32) i);
186 for (; (u32) i < (u32) addr + size4Aling; i += 4)
187 printk("%08x ", *((u32 *) (i)));
188 for (; (u32) i < (u32) addr + size; i++)
189 printk("%02x", *((u8 *) (i)));
190 if (notAlign == 1)
191 printk("\r\n");
192}
193#endif /* DEBUG */
194
195#ifdef CONFIG_UGETH_FILTERING
196static void enqueue(struct list_head *node, struct list_head *lh)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(ugeth_lock, flags);
201 list_add_tail(node, lh);
202 spin_unlock_irqrestore(ugeth_lock, flags);
203}
204#endif /* CONFIG_UGETH_FILTERING */
205
206static struct list_head *dequeue(struct list_head *lh)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(ugeth_lock, flags);
211 if (!list_empty(lh)) {
212 struct list_head *node = lh->next;
213 list_del(node);
214 spin_unlock_irqrestore(ugeth_lock, flags);
215 return node;
216 } else {
217 spin_unlock_irqrestore(ugeth_lock, flags);
218 return NULL;
219 }
220}
221
222static int get_interface_details(enet_interface_e enet_interface,
223 enet_speed_e *speed,
224 int *r10m,
225 int *rmm,
226 int *rpm,
227 int *tbi, int *limited_to_full_duplex)
228{
229 /* Analyze enet_interface according to Interface Mode
230 Configuration table */
231 switch (enet_interface) {
232 case ENET_10_MII:
233 *speed = ENET_SPEED_10BT;
234 break;
235 case ENET_10_RMII:
236 *speed = ENET_SPEED_10BT;
237 *r10m = 1;
238 *rmm = 1;
239 break;
240 case ENET_10_RGMII:
241 *speed = ENET_SPEED_10BT;
242 *rpm = 1;
243 *r10m = 1;
244 *limited_to_full_duplex = 1;
245 break;
246 case ENET_100_MII:
247 *speed = ENET_SPEED_100BT;
248 break;
249 case ENET_100_RMII:
250 *speed = ENET_SPEED_100BT;
251 *rmm = 1;
252 break;
253 case ENET_100_RGMII:
254 *speed = ENET_SPEED_100BT;
255 *rpm = 1;
256 *limited_to_full_duplex = 1;
257 break;
258 case ENET_1000_GMII:
259 *speed = ENET_SPEED_1000BT;
260 *limited_to_full_duplex = 1;
261 break;
262 case ENET_1000_RGMII:
263 *speed = ENET_SPEED_1000BT;
264 *rpm = 1;
265 *limited_to_full_duplex = 1;
266 break;
267 case ENET_1000_TBI:
268 *speed = ENET_SPEED_1000BT;
269 *tbi = 1;
270 *limited_to_full_duplex = 1;
271 break;
272 case ENET_1000_RTBI:
273 *speed = ENET_SPEED_1000BT;
274 *rpm = 1;
275 *tbi = 1;
276 *limited_to_full_duplex = 1;
277 break;
278 default:
279 return -EINVAL;
280 break;
281 }
282
283 return 0;
284}
285
286static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
287{
288 struct sk_buff *skb = NULL;
289
290 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
291 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
292
293 if (skb == NULL)
294 return NULL;
295
296 /* We need the data buffer to be aligned properly. We will reserve
297 * as many bytes as needed to align the data properly
298 */
299 skb_reserve(skb,
300 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
301 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
302 1)));
303
304 skb->dev = ugeth->dev;
305
306 BD_BUFFER_SET(bd,
307 dma_map_single(NULL,
308 skb->data,
309 ugeth->ug_info->uf_info.max_rx_buf_length +
310 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
311 DMA_FROM_DEVICE));
312
313 BD_STATUS_AND_LENGTH_SET(bd,
314 (R_E | R_I |
315 (BD_STATUS_AND_LENGTH(bd) & R_W)));
316
317 return skb;
318}
319
320static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
321{
322 u8 *bd;
323 u32 bd_status;
324 struct sk_buff *skb;
325 int i;
326
327 bd = ugeth->p_rx_bd_ring[rxQ];
328 i = 0;
329
330 do {
331 bd_status = BD_STATUS_AND_LENGTH(bd);
332 skb = get_new_skb(ugeth, bd);
333
334 if (!skb) /* If can not allocate data buffer,
335 abort. Cleanup will be elsewhere */
336 return -ENOMEM;
337
338 ugeth->rx_skbuff[rxQ][i] = skb;
339
340 /* advance the BD pointer */
341 bd += UCC_GETH_SIZE_OF_BD;
342 i++;
343 } while (!(bd_status & R_W));
344
345 return 0;
346}
347
348static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
349 volatile u32 *p_start,
350 u8 num_entries,
351 u32 thread_size,
352 u32 thread_alignment,
353 qe_risc_allocation_e risc,
354 int skip_page_for_first_entry)
355{
356 u32 init_enet_offset;
357 u8 i;
358 int snum;
359
360 for (i = 0; i < num_entries; i++) {
361 if ((snum = qe_get_snum()) < 0) {
362 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
363 return snum;
364 }
365 if ((i == 0) && skip_page_for_first_entry)
366 /* First entry of Rx does not have page */
367 init_enet_offset = 0;
368 else {
369 init_enet_offset =
370 qe_muram_alloc(thread_size, thread_alignment);
371 if (IS_MURAM_ERR(init_enet_offset)) {
372 ugeth_err
373 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
374 qe_put_snum((u8) snum);
375 return -ENOMEM;
376 }
377 }
378 *(p_start++) =
379 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
380 | risc;
381 }
382
383 return 0;
384}
385
386static int return_init_enet_entries(ucc_geth_private_t *ugeth,
387 volatile u32 *p_start,
388 u8 num_entries,
389 qe_risc_allocation_e risc,
390 int skip_page_for_first_entry)
391{
392 u32 init_enet_offset;
393 u8 i;
394 int snum;
395
396 for (i = 0; i < num_entries; i++) {
397 /* Check that this entry was actually valid --
398 needed in case failed in allocations */
399 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
400 snum =
401 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
402 ENET_INIT_PARAM_SNUM_SHIFT;
403 qe_put_snum((u8) snum);
404 if (!((i == 0) && skip_page_for_first_entry)) {
405 /* First entry of Rx does not have page */
406 init_enet_offset =
407 (in_be32(p_start) &
408 ENET_INIT_PARAM_PTR_MASK);
409 qe_muram_free(init_enet_offset);
410 }
411 *(p_start++) = 0; /* Just for cosmetics */
412 }
413 }
414
415 return 0;
416}
417
418#ifdef DEBUG
419static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
420 volatile u32 *p_start,
421 u8 num_entries,
422 u32 thread_size,
423 qe_risc_allocation_e risc,
424 int skip_page_for_first_entry)
425{
426 u32 init_enet_offset;
427 u8 i;
428 int snum;
429
430 for (i = 0; i < num_entries; i++) {
431 /* Check that this entry was actually valid --
432 needed in case failed in allocations */
433 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
434 snum =
435 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
436 ENET_INIT_PARAM_SNUM_SHIFT;
437 qe_put_snum((u8) snum);
438 if (!((i == 0) && skip_page_for_first_entry)) {
439 /* First entry of Rx does not have page */
440 init_enet_offset =
441 (in_be32(p_start) &
442 ENET_INIT_PARAM_PTR_MASK);
443 ugeth_info("Init enet entry %d:", i);
444 ugeth_info("Base address: 0x%08x",
445 (u32)
446 qe_muram_addr(init_enet_offset));
447 mem_disp(qe_muram_addr(init_enet_offset),
448 thread_size);
449 }
450 p_start++;
451 }
452 }
453
454 return 0;
455}
456#endif
457
458#ifdef CONFIG_UGETH_FILTERING
459static enet_addr_container_t *get_enet_addr_container(void)
460{
461 enet_addr_container_t *enet_addr_cont;
462
463 /* allocate memory */
464 enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
465 if (!enet_addr_cont) {
466 ugeth_err("%s: No memory for enet_addr_container_t object.",
467 __FUNCTION__);
468 return NULL;
469 }
470
471 return enet_addr_cont;
472}
473#endif /* CONFIG_UGETH_FILTERING */
474
475static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
476{
477 kfree(enet_addr_cont);
478}
479
480#ifdef CONFIG_UGETH_FILTERING
481static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
482 enet_addr_t *p_enet_addr, u8 paddr_num)
483{
484 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
485
486 if (!(paddr_num < NUM_OF_PADDRS)) {
487 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
488 return -EINVAL;
489 }
490
491 p_82xx_addr_filt =
492 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
493 addressfiltering;
494
495 /* Ethernet frames are defined in Little Endian mode, */
496 /* therefore to insert the address we reverse the bytes. */
497 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
498 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
499 (u16) (*p_enet_addr)[4]));
500 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
501 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
502 (u16) (*p_enet_addr)[2]));
503 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
504 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
505 (u16) (*p_enet_addr)[0]));
506
507 return 0;
508}
509#endif /* CONFIG_UGETH_FILTERING */
510
511static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
512{
513 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
514
515 if (!(paddr_num < NUM_OF_PADDRS)) {
516 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
517 return -EINVAL;
518 }
519
520 p_82xx_addr_filt =
521 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
522 addressfiltering;
523
524 /* Writing address ff.ff.ff.ff.ff.ff disables address
525 recognition for this register */
526 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
527 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
528 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
529
530 return 0;
531}
532
533static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
534 enet_addr_t *p_enet_addr)
535{
536 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
537 u32 cecr_subblock;
538
539 p_82xx_addr_filt =
540 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
541 addressfiltering;
542
543 cecr_subblock =
544 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
545
546 /* Ethernet frames are defined in Little Endian mode,
547 therefor to insert */
548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
549 out_be16(&p_82xx_addr_filt->taddr.h,
550 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
551 (u16) (*p_enet_addr)[4]));
552 out_be16(&p_82xx_addr_filt->taddr.m,
553 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
554 (u16) (*p_enet_addr)[2]));
555 out_be16(&p_82xx_addr_filt->taddr.l,
556 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
557 (u16) (*p_enet_addr)[0]));
558
559 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
560 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
561}
562
563#ifdef CONFIG_UGETH_MAGIC_PACKET
564static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
565{
566 ucc_fast_private_t *uccf;
567 ucc_geth_t *ug_regs;
568 u32 maccfg2, uccm;
569
570 uccf = ugeth->uccf;
571 ug_regs = ugeth->ug_regs;
572
573 /* Enable interrupts for magic packet detection */
574 uccm = in_be32(uccf->p_uccm);
575 uccm |= UCCE_MPD;
576 out_be32(uccf->p_uccm, uccm);
577
578 /* Enable magic packet detection */
579 maccfg2 = in_be32(&ug_regs->maccfg2);
580 maccfg2 |= MACCFG2_MPE;
581 out_be32(&ug_regs->maccfg2, maccfg2);
582}
583
584static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
585{
586 ucc_fast_private_t *uccf;
587 ucc_geth_t *ug_regs;
588 u32 maccfg2, uccm;
589
590 uccf = ugeth->uccf;
591 ug_regs = ugeth->ug_regs;
592
593 /* Disable interrupts for magic packet detection */
594 uccm = in_be32(uccf->p_uccm);
595 uccm &= ~UCCE_MPD;
596 out_be32(uccf->p_uccm, uccm);
597
598 /* Disable magic packet detection */
599 maccfg2 = in_be32(&ug_regs->maccfg2);
600 maccfg2 &= ~MACCFG2_MPE;
601 out_be32(&ug_regs->maccfg2, maccfg2);
602}
603#endif /* MAGIC_PACKET */
604
605static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
606{
607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
608}
609
610#ifdef DEBUG
611static void get_statistics(ucc_geth_private_t *ugeth,
612 ucc_geth_tx_firmware_statistics_t *
613 tx_firmware_statistics,
614 ucc_geth_rx_firmware_statistics_t *
615 rx_firmware_statistics,
616 ucc_geth_hardware_statistics_t *hardware_statistics)
617{
618 ucc_fast_t *uf_regs;
619 ucc_geth_t *ug_regs;
620 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
621 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
622
623 ug_regs = ugeth->ug_regs;
624 uf_regs = (ucc_fast_t *) ug_regs;
625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
627
628 /* Tx firmware only if user handed pointer and driver actually
629 gathers Tx firmware statistics */
630 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
631 tx_firmware_statistics->sicoltx =
632 in_be32(&p_tx_fw_statistics_pram->sicoltx);
633 tx_firmware_statistics->mulcoltx =
634 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
635 tx_firmware_statistics->latecoltxfr =
636 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
637 tx_firmware_statistics->frabortduecol =
638 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
639 tx_firmware_statistics->frlostinmactxer =
640 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
641 tx_firmware_statistics->carriersenseertx =
642 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
643 tx_firmware_statistics->frtxok =
644 in_be32(&p_tx_fw_statistics_pram->frtxok);
645 tx_firmware_statistics->txfrexcessivedefer =
646 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
647 tx_firmware_statistics->txpkts256 =
648 in_be32(&p_tx_fw_statistics_pram->txpkts256);
649 tx_firmware_statistics->txpkts512 =
650 in_be32(&p_tx_fw_statistics_pram->txpkts512);
651 tx_firmware_statistics->txpkts1024 =
652 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
653 tx_firmware_statistics->txpktsjumbo =
654 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
655 }
656
657 /* Rx firmware only if user handed pointer and driver actually
658 * gathers Rx firmware statistics */
659 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
660 int i;
661 rx_firmware_statistics->frrxfcser =
662 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
663 rx_firmware_statistics->fraligner =
664 in_be32(&p_rx_fw_statistics_pram->fraligner);
665 rx_firmware_statistics->inrangelenrxer =
666 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
667 rx_firmware_statistics->outrangelenrxer =
668 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
669 rx_firmware_statistics->frtoolong =
670 in_be32(&p_rx_fw_statistics_pram->frtoolong);
671 rx_firmware_statistics->runt =
672 in_be32(&p_rx_fw_statistics_pram->runt);
673 rx_firmware_statistics->verylongevent =
674 in_be32(&p_rx_fw_statistics_pram->verylongevent);
675 rx_firmware_statistics->symbolerror =
676 in_be32(&p_rx_fw_statistics_pram->symbolerror);
677 rx_firmware_statistics->dropbsy =
678 in_be32(&p_rx_fw_statistics_pram->dropbsy);
679 for (i = 0; i < 0x8; i++)
680 rx_firmware_statistics->res0[i] =
681 p_rx_fw_statistics_pram->res0[i];
682 rx_firmware_statistics->mismatchdrop =
683 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
684 rx_firmware_statistics->underpkts =
685 in_be32(&p_rx_fw_statistics_pram->underpkts);
686 rx_firmware_statistics->pkts256 =
687 in_be32(&p_rx_fw_statistics_pram->pkts256);
688 rx_firmware_statistics->pkts512 =
689 in_be32(&p_rx_fw_statistics_pram->pkts512);
690 rx_firmware_statistics->pkts1024 =
691 in_be32(&p_rx_fw_statistics_pram->pkts1024);
692 rx_firmware_statistics->pktsjumbo =
693 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
694 rx_firmware_statistics->frlossinmacer =
695 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
696 rx_firmware_statistics->pausefr =
697 in_be32(&p_rx_fw_statistics_pram->pausefr);
698 for (i = 0; i < 0x4; i++)
699 rx_firmware_statistics->res1[i] =
700 p_rx_fw_statistics_pram->res1[i];
701 rx_firmware_statistics->removevlan =
702 in_be32(&p_rx_fw_statistics_pram->removevlan);
703 rx_firmware_statistics->replacevlan =
704 in_be32(&p_rx_fw_statistics_pram->replacevlan);
705 rx_firmware_statistics->insertvlan =
706 in_be32(&p_rx_fw_statistics_pram->insertvlan);
707 }
708
709 /* Hardware only if user handed pointer and driver actually
710 gathers hardware statistics */
711 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
712 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
713 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
714 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
715 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
716 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
717 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
718 hardware_statistics->txok = in_be32(&ug_regs->txok);
719 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
720 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
721 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
722 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
723 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
724 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
725 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
726 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
727 }
728}
729
730static void dump_bds(ucc_geth_private_t *ugeth)
731{
732 int i;
733 int length;
734
735 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
736 if (ugeth->p_tx_bd_ring[i]) {
737 length =
738 (ugeth->ug_info->bdRingLenTx[i] *
739 UCC_GETH_SIZE_OF_BD);
740 ugeth_info("TX BDs[%d]", i);
741 mem_disp(ugeth->p_tx_bd_ring[i], length);
742 }
743 }
744 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
745 if (ugeth->p_rx_bd_ring[i]) {
746 length =
747 (ugeth->ug_info->bdRingLenRx[i] *
748 UCC_GETH_SIZE_OF_BD);
749 ugeth_info("RX BDs[%d]", i);
750 mem_disp(ugeth->p_rx_bd_ring[i], length);
751 }
752 }
753}
754
755static void dump_regs(ucc_geth_private_t *ugeth)
756{
757 int i;
758
759 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
761
762 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
763 (u32) & ugeth->ug_regs->maccfg1,
764 in_be32(&ugeth->ug_regs->maccfg1));
765 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
766 (u32) & ugeth->ug_regs->maccfg2,
767 in_be32(&ugeth->ug_regs->maccfg2));
768 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
769 (u32) & ugeth->ug_regs->ipgifg,
770 in_be32(&ugeth->ug_regs->ipgifg));
771 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->ug_regs->hafdup,
773 in_be32(&ugeth->ug_regs->hafdup));
774 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->ug_regs->miimng.miimcfg,
776 in_be32(&ugeth->ug_regs->miimng.miimcfg));
777 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
778 (u32) & ugeth->ug_regs->miimng.miimcom,
779 in_be32(&ugeth->ug_regs->miimng.miimcom));
780 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
781 (u32) & ugeth->ug_regs->miimng.miimadd,
782 in_be32(&ugeth->ug_regs->miimng.miimadd));
783 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
784 (u32) & ugeth->ug_regs->miimng.miimcon,
785 in_be32(&ugeth->ug_regs->miimng.miimcon));
786 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
787 (u32) & ugeth->ug_regs->miimng.miimstat,
788 in_be32(&ugeth->ug_regs->miimng.miimstat));
789 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
790 (u32) & ugeth->ug_regs->miimng.miimind,
791 in_be32(&ugeth->ug_regs->miimng.miimind));
792 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
793 (u32) & ugeth->ug_regs->ifctl,
794 in_be32(&ugeth->ug_regs->ifctl));
795 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
796 (u32) & ugeth->ug_regs->ifstat,
797 in_be32(&ugeth->ug_regs->ifstat));
798 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
799 (u32) & ugeth->ug_regs->macstnaddr1,
800 in_be32(&ugeth->ug_regs->macstnaddr1));
801 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->ug_regs->macstnaddr2,
803 in_be32(&ugeth->ug_regs->macstnaddr2));
804 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->ug_regs->uempr,
806 in_be32(&ugeth->ug_regs->uempr));
807 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->ug_regs->utbipar,
809 in_be32(&ugeth->ug_regs->utbipar));
810 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
811 (u32) & ugeth->ug_regs->uescr,
812 in_be16(&ugeth->ug_regs->uescr));
813 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->ug_regs->tx64,
815 in_be32(&ugeth->ug_regs->tx64));
816 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->ug_regs->tx127,
818 in_be32(&ugeth->ug_regs->tx127));
819 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->ug_regs->tx255,
821 in_be32(&ugeth->ug_regs->tx255));
822 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->ug_regs->rx64,
824 in_be32(&ugeth->ug_regs->rx64));
825 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->ug_regs->rx127,
827 in_be32(&ugeth->ug_regs->rx127));
828 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
829 (u32) & ugeth->ug_regs->rx255,
830 in_be32(&ugeth->ug_regs->rx255));
831 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
832 (u32) & ugeth->ug_regs->txok,
833 in_be32(&ugeth->ug_regs->txok));
834 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
835 (u32) & ugeth->ug_regs->txcf,
836 in_be16(&ugeth->ug_regs->txcf));
837 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
838 (u32) & ugeth->ug_regs->tmca,
839 in_be32(&ugeth->ug_regs->tmca));
840 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
841 (u32) & ugeth->ug_regs->tbca,
842 in_be32(&ugeth->ug_regs->tbca));
843 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->ug_regs->rxfok,
845 in_be32(&ugeth->ug_regs->rxfok));
846 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->ug_regs->rxbok,
848 in_be32(&ugeth->ug_regs->rxbok));
849 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
850 (u32) & ugeth->ug_regs->rbyt,
851 in_be32(&ugeth->ug_regs->rbyt));
852 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
853 (u32) & ugeth->ug_regs->rmca,
854 in_be32(&ugeth->ug_regs->rmca));
855 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->ug_regs->rbca,
857 in_be32(&ugeth->ug_regs->rbca));
858 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->ug_regs->scar,
860 in_be32(&ugeth->ug_regs->scar));
861 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
862 (u32) & ugeth->ug_regs->scam,
863 in_be32(&ugeth->ug_regs->scam));
864
865 if (ugeth->p_thread_data_tx) {
866 int numThreadsTxNumerical;
867 switch (ugeth->ug_info->numThreadsTx) {
868 case UCC_GETH_NUM_OF_THREADS_1:
869 numThreadsTxNumerical = 1;
870 break;
871 case UCC_GETH_NUM_OF_THREADS_2:
872 numThreadsTxNumerical = 2;
873 break;
874 case UCC_GETH_NUM_OF_THREADS_4:
875 numThreadsTxNumerical = 4;
876 break;
877 case UCC_GETH_NUM_OF_THREADS_6:
878 numThreadsTxNumerical = 6;
879 break;
880 case UCC_GETH_NUM_OF_THREADS_8:
881 numThreadsTxNumerical = 8;
882 break;
883 default:
884 numThreadsTxNumerical = 0;
885 break;
886 }
887
888 ugeth_info("Thread data TXs:");
889 ugeth_info("Base address: 0x%08x",
890 (u32) ugeth->p_thread_data_tx);
891 for (i = 0; i < numThreadsTxNumerical; i++) {
892 ugeth_info("Thread data TX[%d]:", i);
893 ugeth_info("Base address: 0x%08x",
894 (u32) & ugeth->p_thread_data_tx[i]);
895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
896 sizeof(ucc_geth_thread_data_tx_t));
897 }
898 }
899 if (ugeth->p_thread_data_rx) {
900 int numThreadsRxNumerical;
901 switch (ugeth->ug_info->numThreadsRx) {
902 case UCC_GETH_NUM_OF_THREADS_1:
903 numThreadsRxNumerical = 1;
904 break;
905 case UCC_GETH_NUM_OF_THREADS_2:
906 numThreadsRxNumerical = 2;
907 break;
908 case UCC_GETH_NUM_OF_THREADS_4:
909 numThreadsRxNumerical = 4;
910 break;
911 case UCC_GETH_NUM_OF_THREADS_6:
912 numThreadsRxNumerical = 6;
913 break;
914 case UCC_GETH_NUM_OF_THREADS_8:
915 numThreadsRxNumerical = 8;
916 break;
917 default:
918 numThreadsRxNumerical = 0;
919 break;
920 }
921
922 ugeth_info("Thread data RX:");
923 ugeth_info("Base address: 0x%08x",
924 (u32) ugeth->p_thread_data_rx);
925 for (i = 0; i < numThreadsRxNumerical; i++) {
926 ugeth_info("Thread data RX[%d]:", i);
927 ugeth_info("Base address: 0x%08x",
928 (u32) & ugeth->p_thread_data_rx[i]);
929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
930 sizeof(ucc_geth_thread_data_rx_t));
931 }
932 }
933 if (ugeth->p_exf_glbl_param) {
934 ugeth_info("EXF global param:");
935 ugeth_info("Base address: 0x%08x",
936 (u32) ugeth->p_exf_glbl_param);
937 mem_disp((u8 *) ugeth->p_exf_glbl_param,
938 sizeof(*ugeth->p_exf_glbl_param));
939 }
940 if (ugeth->p_tx_glbl_pram) {
941 ugeth_info("TX global param:");
942 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
943 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
944 (u32) & ugeth->p_tx_glbl_pram->temoder,
945 in_be16(&ugeth->p_tx_glbl_pram->temoder));
946 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
947 (u32) & ugeth->p_tx_glbl_pram->sqptr,
948 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
949 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
950 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
951 in_be32(&ugeth->p_tx_glbl_pram->
952 schedulerbasepointer));
953 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
955 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
956 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_tx_glbl_pram->tstate,
958 in_be32(&ugeth->p_tx_glbl_pram->tstate));
959 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
960 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
961 ugeth->p_tx_glbl_pram->iphoffset[0]);
962 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
963 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
964 ugeth->p_tx_glbl_pram->iphoffset[1]);
965 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
966 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
967 ugeth->p_tx_glbl_pram->iphoffset[2]);
968 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
969 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
970 ugeth->p_tx_glbl_pram->iphoffset[3]);
971 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
972 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
973 ugeth->p_tx_glbl_pram->iphoffset[4]);
974 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
975 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
976 ugeth->p_tx_glbl_pram->iphoffset[5]);
977 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
978 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
979 ugeth->p_tx_glbl_pram->iphoffset[6]);
980 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
981 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
982 ugeth->p_tx_glbl_pram->iphoffset[7]);
983 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
985 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
986 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
988 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
989 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
991 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
992 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
994 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
995 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
997 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
998 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
1000 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
1001 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
1003 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
1004 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
1006 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
1007 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
1008 (u32) & ugeth->p_tx_glbl_pram->tqptr,
1009 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
1010 }
1011 if (ugeth->p_rx_glbl_pram) {
1012 ugeth_info("RX global param:");
1013 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1014 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_glbl_pram->remoder,
1016 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1017 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1018 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1019 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1020 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1021 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1022 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1023 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1024 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1025 ugeth->p_rx_glbl_pram->rxgstpack);
1026 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1027 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1028 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1029 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1030 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1031 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1032 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1033 (u32) & ugeth->p_rx_glbl_pram->rstate,
1034 ugeth->p_rx_glbl_pram->rstate);
1035 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1036 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1037 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1038 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1040 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1041 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1042 (u32) & ugeth->p_rx_glbl_pram->mflr,
1043 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1044 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1045 (u32) & ugeth->p_rx_glbl_pram->minflr,
1046 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1047 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1048 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1049 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1050 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1051 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1052 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1053 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1054 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1055 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1056 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1057 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1058 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1059 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1060 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1061 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1062 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1063 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1064 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1065 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1067 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1068 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1069 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1070 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1071 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1072 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1073 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1074 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1076 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1077 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1079 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1080 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1081 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1082 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1083 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1084 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1085 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1086 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1087 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1088 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1089 for (i = 0; i < 64; i++)
1090 ugeth_info
1091 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1092 i,
1093 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1094 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1095 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1096 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1097 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1098 }
1099 if (ugeth->p_send_q_mem_reg) {
1100 ugeth_info("Send Q memory registers:");
1101 ugeth_info("Base address: 0x%08x",
1102 (u32) ugeth->p_send_q_mem_reg);
1103 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1104 ugeth_info("SQQD[%d]:", i);
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1108 sizeof(ucc_geth_send_queue_qd_t));
1109 }
1110 }
1111 if (ugeth->p_scheduler) {
1112 ugeth_info("Scheduler:");
1113 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1114 mem_disp((u8 *) ugeth->p_scheduler,
1115 sizeof(*ugeth->p_scheduler));
1116 }
1117 if (ugeth->p_tx_fw_statistics_pram) {
1118 ugeth_info("TX FW statistics pram:");
1119 ugeth_info("Base address: 0x%08x",
1120 (u32) ugeth->p_tx_fw_statistics_pram);
1121 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1122 sizeof(*ugeth->p_tx_fw_statistics_pram));
1123 }
1124 if (ugeth->p_rx_fw_statistics_pram) {
1125 ugeth_info("RX FW statistics pram:");
1126 ugeth_info("Base address: 0x%08x",
1127 (u32) ugeth->p_rx_fw_statistics_pram);
1128 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1129 sizeof(*ugeth->p_rx_fw_statistics_pram));
1130 }
1131 if (ugeth->p_rx_irq_coalescing_tbl) {
1132 ugeth_info("RX IRQ coalescing tables:");
1133 ugeth_info("Base address: 0x%08x",
1134 (u32) ugeth->p_rx_irq_coalescing_tbl);
1135 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1136 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1137 ugeth_info("Base address: 0x%08x",
1138 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1139 coalescingentry[i]);
1140 ugeth_info
1141 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1142 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1143 coalescingentry[i].interruptcoalescingmaxvalue,
1144 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1145 coalescingentry[i].
1146 interruptcoalescingmaxvalue));
1147 ugeth_info
1148 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1149 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1150 coalescingentry[i].interruptcoalescingcounter,
1151 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1152 coalescingentry[i].
1153 interruptcoalescingcounter));
1154 }
1155 }
1156 if (ugeth->p_rx_bd_qs_tbl) {
1157 ugeth_info("RX BD QS tables:");
1158 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1159 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1160 ugeth_info("RX BD QS table[%d]:", i);
1161 ugeth_info("Base address: 0x%08x",
1162 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1163 ugeth_info
1164 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1167 ugeth_info
1168 ("bdptr : addr - 0x%08x, val - 0x%08x",
1169 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1170 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1171 ugeth_info
1172 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1173 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1174 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1175 externalbdbaseptr));
1176 ugeth_info
1177 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1178 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1179 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1180 ugeth_info("ucode RX Prefetched BDs:");
1181 ugeth_info("Base address: 0x%08x",
1182 (u32)
1183 qe_muram_addr(in_be32
1184 (&ugeth->p_rx_bd_qs_tbl[i].
1185 bdbaseptr)));
1186 mem_disp((u8 *)
1187 qe_muram_addr(in_be32
1188 (&ugeth->p_rx_bd_qs_tbl[i].
1189 bdbaseptr)),
1190 sizeof(ucc_geth_rx_prefetched_bds_t));
1191 }
1192 }
1193 if (ugeth->p_init_enet_param_shadow) {
1194 int size;
1195 ugeth_info("Init enet param shadow:");
1196 ugeth_info("Base address: 0x%08x",
1197 (u32) ugeth->p_init_enet_param_shadow);
1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1199 sizeof(*ugeth->p_init_enet_param_shadow));
1200
1201 size = sizeof(ucc_geth_thread_rx_pram_t);
1202 if (ugeth->ug_info->rxExtendedFiltering) {
1203 size +=
1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1205 if (ugeth->ug_info->largestexternallookupkeysize ==
1206 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1207 size +=
1208 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1209 if (ugeth->ug_info->largestexternallookupkeysize ==
1210 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1211 size +=
1212 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1213 }
1214
1215 dump_init_enet_entries(ugeth,
1216 &(ugeth->p_init_enet_param_shadow->
1217 txthread[0]),
1218 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1219 sizeof(ucc_geth_thread_tx_pram_t),
1220 ugeth->ug_info->riscTx, 0);
1221 dump_init_enet_entries(ugeth,
1222 &(ugeth->p_init_enet_param_shadow->
1223 rxthread[0]),
1224 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1225 ugeth->ug_info->riscRx, 1);
1226 }
1227}
1228#endif /* DEBUG */
1229
1230static void init_default_reg_vals(volatile u32 *upsmr_register,
1231 volatile u32 *maccfg1_register,
1232 volatile u32 *maccfg2_register)
1233{
1234 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1235 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1236 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1237}
1238
1239static int init_half_duplex_params(int alt_beb,
1240 int back_pressure_no_backoff,
1241 int no_backoff,
1242 int excess_defer,
1243 u8 alt_beb_truncation,
1244 u8 max_retransmissions,
1245 u8 collision_window,
1246 volatile u32 *hafdup_register)
1247{
1248 u32 value = 0;
1249
1250 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1251 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1252 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1253 return -EINVAL;
1254
1255 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1256
1257 if (alt_beb)
1258 value |= HALFDUP_ALT_BEB;
1259 if (back_pressure_no_backoff)
1260 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1261 if (no_backoff)
1262 value |= HALFDUP_NO_BACKOFF;
1263 if (excess_defer)
1264 value |= HALFDUP_EXCESSIVE_DEFER;
1265
1266 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1267
1268 value |= collision_window;
1269
1270 out_be32(hafdup_register, value);
1271 return 0;
1272}
1273
1274static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1275 u8 non_btb_ipg,
1276 u8 min_ifg,
1277 u8 btb_ipg,
1278 volatile u32 *ipgifg_register)
1279{
1280 u32 value = 0;
1281
1282 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1283 IPG part 2 */
1284 if (non_btb_cs_ipg > non_btb_ipg)
1285 return -EINVAL;
1286
1287 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1288 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1289 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1290 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1291 return -EINVAL;
1292
1293 value |=
1294 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1295 IPGIFG_NBTB_CS_IPG_MASK);
1296 value |=
1297 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1298 IPGIFG_NBTB_IPG_MASK);
1299 value |=
1300 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1301 IPGIFG_MIN_IFG_MASK);
1302 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1303
1304 out_be32(ipgifg_register, value);
1305 return 0;
1306}
1307
1308static int init_flow_control_params(u32 automatic_flow_control_mode,
1309 int rx_flow_control_enable,
1310 int tx_flow_control_enable,
1311 u16 pause_period,
1312 u16 extension_field,
1313 volatile u32 *upsmr_register,
1314 volatile u32 *uempr_register,
1315 volatile u32 *maccfg1_register)
1316{
1317 u32 value = 0;
1318
1319 /* Set UEMPR register */
1320 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1321 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1322 out_be32(uempr_register, value);
1323
1324 /* Set UPSMR register */
1325 value = in_be32(upsmr_register);
1326 value |= automatic_flow_control_mode;
1327 out_be32(upsmr_register, value);
1328
1329 value = in_be32(maccfg1_register);
1330 if (rx_flow_control_enable)
1331 value |= MACCFG1_FLOW_RX;
1332 if (tx_flow_control_enable)
1333 value |= MACCFG1_FLOW_TX;
1334 out_be32(maccfg1_register, value);
1335
1336 return 0;
1337}
1338
1339static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1340 int auto_zero_hardware_statistics,
1341 volatile u32 *upsmr_register,
1342 volatile u16 *uescr_register)
1343{
1344 u32 upsmr_value = 0;
1345 u16 uescr_value = 0;
1346 /* Enable hardware statistics gathering if requested */
1347 if (enable_hardware_statistics) {
1348 upsmr_value = in_be32(upsmr_register);
1349 upsmr_value |= UPSMR_HSE;
1350 out_be32(upsmr_register, upsmr_value);
1351 }
1352
1353 /* Clear hardware statistics counters */
1354 uescr_value = in_be16(uescr_register);
1355 uescr_value |= UESCR_CLRCNT;
1356 /* Automatically zero hardware statistics counters on read,
1357 if requested */
1358 if (auto_zero_hardware_statistics)
1359 uescr_value |= UESCR_AUTOZ;
1360 out_be16(uescr_register, uescr_value);
1361
1362 return 0;
1363}
1364
1365static int init_firmware_statistics_gathering_mode(int
1366 enable_tx_firmware_statistics,
1367 int enable_rx_firmware_statistics,
1368 volatile u32 *tx_rmon_base_ptr,
1369 u32 tx_firmware_statistics_structure_address,
1370 volatile u32 *rx_rmon_base_ptr,
1371 u32 rx_firmware_statistics_structure_address,
1372 volatile u16 *temoder_register,
1373 volatile u32 *remoder_register)
1374{
1375 /* Note: this function does not check if */
1376 /* the parameters it receives are NULL */
1377 u16 temoder_value;
1378 u32 remoder_value;
1379
1380 if (enable_tx_firmware_statistics) {
1381 out_be32(tx_rmon_base_ptr,
1382 tx_firmware_statistics_structure_address);
1383 temoder_value = in_be16(temoder_register);
1384 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1385 out_be16(temoder_register, temoder_value);
1386 }
1387
1388 if (enable_rx_firmware_statistics) {
1389 out_be32(rx_rmon_base_ptr,
1390 rx_firmware_statistics_structure_address);
1391 remoder_value = in_be32(remoder_register);
1392 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1393 out_be32(remoder_register, remoder_value);
1394 }
1395
1396 return 0;
1397}
1398
1399static int init_mac_station_addr_regs(u8 address_byte_0,
1400 u8 address_byte_1,
1401 u8 address_byte_2,
1402 u8 address_byte_3,
1403 u8 address_byte_4,
1404 u8 address_byte_5,
1405 volatile u32 *macstnaddr1_register,
1406 volatile u32 *macstnaddr2_register)
1407{
1408 u32 value = 0;
1409
1410 /* Example: for a station address of 0x12345678ABCD, */
1411 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1412
1413 /* MACSTNADDR1 Register: */
1414
1415 /* 0 7 8 15 */
1416 /* station address byte 5 station address byte 4 */
1417 /* 16 23 24 31 */
1418 /* station address byte 3 station address byte 2 */
1419 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1420 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1421 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1422 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1423
1424 out_be32(macstnaddr1_register, value);
1425
1426 /* MACSTNADDR2 Register: */
1427
1428 /* 0 7 8 15 */
1429 /* station address byte 1 station address byte 0 */
1430 /* 16 23 24 31 */
1431 /* reserved reserved */
1432 value = 0;
1433 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1434 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1435
1436 out_be32(macstnaddr2_register, value);
1437
1438 return 0;
1439}
1440
1441static int init_mac_duplex_mode(int full_duplex,
1442 int limited_to_full_duplex,
1443 volatile u32 *maccfg2_register)
1444{
1445 u32 value = 0;
1446
1447 /* some interfaces must work in full duplex mode */
1448 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1449 return -EINVAL;
1450
1451 value = in_be32(maccfg2_register);
1452
1453 if (full_duplex)
1454 value |= MACCFG2_FDX;
1455 else
1456 value &= ~MACCFG2_FDX;
1457
1458 out_be32(maccfg2_register, value);
1459 return 0;
1460}
1461
1462static int init_check_frame_length_mode(int length_check,
1463 volatile u32 *maccfg2_register)
1464{
1465 u32 value = 0;
1466
1467 value = in_be32(maccfg2_register);
1468
1469 if (length_check)
1470 value |= MACCFG2_LC;
1471 else
1472 value &= ~MACCFG2_LC;
1473
1474 out_be32(maccfg2_register, value);
1475 return 0;
1476}
1477
1478static int init_preamble_length(u8 preamble_length,
1479 volatile u32 *maccfg2_register)
1480{
1481 u32 value = 0;
1482
1483 if ((preamble_length < 3) || (preamble_length > 7))
1484 return -EINVAL;
1485
1486 value = in_be32(maccfg2_register);
1487 value &= ~MACCFG2_PREL_MASK;
1488 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1489 out_be32(maccfg2_register, value);
1490 return 0;
1491}
1492
1493static int init_mii_management_configuration(int reset_mgmt,
1494 int preamble_supress,
1495 volatile u32 *miimcfg_register,
1496 volatile u32 *miimind_register)
1497{
1498 unsigned int timeout = PHY_INIT_TIMEOUT;
1499 u32 value = 0;
1500
1501 value = in_be32(miimcfg_register);
1502 if (reset_mgmt) {
1503 value |= MIIMCFG_RESET_MANAGEMENT;
1504 out_be32(miimcfg_register, value);
1505 }
1506
1507 value = 0;
1508
1509 if (preamble_supress)
1510 value |= MIIMCFG_NO_PREAMBLE;
1511
1512 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1513 out_be32(miimcfg_register, value);
1514
1515 /* Wait until the bus is free */
1516 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1517 cpu_relax();
1518
1519 if (timeout <= 0) {
1520 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1521 return -ETIMEDOUT;
1522 }
1523
1524 return 0;
1525}
1526
1527static int init_rx_parameters(int reject_broadcast,
1528 int receive_short_frames,
1529 int promiscuous, volatile u32 *upsmr_register)
1530{
1531 u32 value = 0;
1532
1533 value = in_be32(upsmr_register);
1534
1535 if (reject_broadcast)
1536 value |= UPSMR_BRO;
1537 else
1538 value &= ~UPSMR_BRO;
1539
1540 if (receive_short_frames)
1541 value |= UPSMR_RSH;
1542 else
1543 value &= ~UPSMR_RSH;
1544
1545 if (promiscuous)
1546 value |= UPSMR_PRO;
1547 else
1548 value &= ~UPSMR_PRO;
1549
1550 out_be32(upsmr_register, value);
1551
1552 return 0;
1553}
1554
1555static int init_max_rx_buff_len(u16 max_rx_buf_len,
1556 volatile u16 *mrblr_register)
1557{
1558 /* max_rx_buf_len value must be a multiple of 128 */
1559 if ((max_rx_buf_len == 0)
1560 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1561 return -EINVAL;
1562
1563 out_be16(mrblr_register, max_rx_buf_len);
1564 return 0;
1565}
1566
1567static int init_min_frame_len(u16 min_frame_length,
1568 volatile u16 *minflr_register,
1569 volatile u16 *mrblr_register)
1570{
1571 u16 mrblr_value = 0;
1572
1573 mrblr_value = in_be16(mrblr_register);
1574 if (min_frame_length >= (mrblr_value - 4))
1575 return -EINVAL;
1576
1577 out_be16(minflr_register, min_frame_length);
1578 return 0;
1579}
1580
1581static int adjust_enet_interface(ucc_geth_private_t *ugeth)
1582{
1583 ucc_geth_info_t *ug_info;
1584 ucc_geth_t *ug_regs;
1585 ucc_fast_t *uf_regs;
1586 enet_speed_e speed;
1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1588 0, limited_to_full_duplex = 0;
1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1590 u16 value;
1591
1592 ugeth_vdbg("%s: IN", __FUNCTION__);
1593
1594 ug_info = ugeth->ug_info;
1595 ug_regs = ugeth->ug_regs;
1596 uf_regs = ugeth->uccf->uf_regs;
1597
1598 /* Analyze enet_interface according to Interface Mode Configuration
1599 table */
1600 ret_val =
1601 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1602 &rpm, &tbi, &limited_to_full_duplex);
1603 if (ret_val != 0) {
1604 ugeth_err
1605 ("%s: half duplex not supported in requested configuration.",
1606 __FUNCTION__);
1607 return ret_val;
1608 }
1609
1610 /* Set MACCFG2 */
1611 maccfg2 = in_be32(&ug_regs->maccfg2);
1612 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1613 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1614 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1615 else if (speed == ENET_SPEED_1000BT)
1616 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1617 maccfg2 |= ug_info->padAndCrc;
1618 out_be32(&ug_regs->maccfg2, maccfg2);
1619
1620 /* Set UPSMR */
1621 upsmr = in_be32(&uf_regs->upsmr);
1622 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1623 if (rpm)
1624 upsmr |= UPSMR_RPM;
1625 if (r10m)
1626 upsmr |= UPSMR_R10M;
1627 if (tbi)
1628 upsmr |= UPSMR_TBIM;
1629 if (rmm)
1630 upsmr |= UPSMR_RMM;
1631 out_be32(&uf_regs->upsmr, upsmr);
1632
1633 /* Set UTBIPAR */
1634 utbipar = in_be32(&ug_regs->utbipar);
1635 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1636 if (tbi)
1637 utbipar |=
1638 (ug_info->phy_address +
1639 ugeth->ug_info->uf_info.
1640 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1641 else
1642 utbipar |=
1643 (0x10 +
1644 ugeth->ug_info->uf_info.
1645 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1646 out_be32(&ug_regs->utbipar, utbipar);
1647
1648 /* Disable autonegotiation in tbi mode, because by default it
1649 comes up in autonegotiation mode. */
1650 /* Note that this depends on proper setting in utbipar register. */
1651 if (tbi) {
1652 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1653 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1654 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1655 value =
1656 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1657 ENET_TBI_MII_CR);
1658 value &= ~0x1000; /* Turn off autonegotiation */
1659 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1660 ENET_TBI_MII_CR, value);
1661 }
1662
1663 ret_val = init_mac_duplex_mode(1,
1664 limited_to_full_duplex,
1665 &ug_regs->maccfg2);
1666 if (ret_val != 0) {
1667 ugeth_err
1668 ("%s: half duplex not supported in requested configuration.",
1669 __FUNCTION__);
1670 return ret_val;
1671 }
1672
1673 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1674
1675 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1676 if (ret_val != 0) {
1677 ugeth_err
1678 ("%s: Preamble length must be between 3 and 7 inclusive.",
1679 __FUNCTION__);
1680 return ret_val;
1681 }
1682
1683 return 0;
1684}
1685
1686/* Called every time the controller might need to be made
1687 * aware of new link state. The PHY code conveys this
1688 * information through variables in the ugeth structure, and this
1689 * function converts those variables into the appropriate
1690 * register values, and can bring down the device if needed.
1691 */
1692static void adjust_link(struct net_device *dev)
1693{
1694 ucc_geth_private_t *ugeth = netdev_priv(dev);
1695 ucc_geth_t *ug_regs;
1696 u32 tempval;
1697 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1698
1699 ug_regs = ugeth->ug_regs;
1700
1701 if (mii_info->link) {
1702 /* Now we make sure that we can be in full duplex mode.
1703 * If not, we operate in half-duplex mode. */
1704 if (mii_info->duplex != ugeth->oldduplex) {
1705 if (!(mii_info->duplex)) {
1706 tempval = in_be32(&ug_regs->maccfg2);
1707 tempval &= ~(MACCFG2_FDX);
1708 out_be32(&ug_regs->maccfg2, tempval);
1709
1710 ugeth_info("%s: Half Duplex", dev->name);
1711 } else {
1712 tempval = in_be32(&ug_regs->maccfg2);
1713 tempval |= MACCFG2_FDX;
1714 out_be32(&ug_regs->maccfg2, tempval);
1715
1716 ugeth_info("%s: Full Duplex", dev->name);
1717 }
1718
1719 ugeth->oldduplex = mii_info->duplex;
1720 }
1721
1722 if (mii_info->speed != ugeth->oldspeed) {
1723 switch (mii_info->speed) {
1724 case 1000:
1725#ifdef CONFIG_MPC836x
1726/* FIXME: This code is for 100Mbs BUG fixing,
1727remove this when it is fixed!!! */
1728 if (ugeth->ug_info->enet_interface ==
1729 ENET_1000_GMII)
1730 /* Run the commands which initialize the PHY */
1731 {
1732 tempval =
1733 (u32) mii_info->mdio_read(ugeth->
1734 dev, mii_info->mii_id, 0x1b);
1735 tempval |= 0x000f;
1736 mii_info->mdio_write(ugeth->dev,
1737 mii_info->mii_id, 0x1b,
1738 (u16) tempval);
1739 tempval =
1740 (u32) mii_info->mdio_read(ugeth->
1741 dev, mii_info->mii_id,
1742 MII_BMCR);
1743 mii_info->mdio_write(ugeth->dev,
1744 mii_info->mii_id, MII_BMCR,
1745 (u16) (tempval | BMCR_RESET));
1746 } else if (ugeth->ug_info->enet_interface ==
1747 ENET_1000_RGMII)
1748 /* Run the commands which initialize the PHY */
1749 {
1750 tempval =
1751 (u32) mii_info->mdio_read(ugeth->
1752 dev, mii_info->mii_id, 0x1b);
1753 tempval = (tempval & ~0x000f) | 0x000b;
1754 mii_info->mdio_write(ugeth->dev,
1755 mii_info->mii_id, 0x1b,
1756 (u16) tempval);
1757 tempval =
1758 (u32) mii_info->mdio_read(ugeth->
1759 dev, mii_info->mii_id,
1760 MII_BMCR);
1761 mii_info->mdio_write(ugeth->dev,
1762 mii_info->mii_id, MII_BMCR,
1763 (u16) (tempval | BMCR_RESET));
1764 }
1765 msleep(4000);
1766#endif /* CONFIG_MPC8360 */
1767 adjust_enet_interface(ugeth);
1768 break;
1769 case 100:
1770 case 10:
1771#ifdef CONFIG_MPC836x
1772/* FIXME: This code is for 100Mbs BUG fixing,
1773remove this lines when it will be fixed!!! */
1774 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1775 tempval =
1776 (u32) mii_info->mdio_read(ugeth->dev,
1777 mii_info->mii_id,
1778 0x1b);
1779 tempval = (tempval & ~0x000f) | 0x000b;
1780 mii_info->mdio_write(ugeth->dev,
1781 mii_info->mii_id, 0x1b,
1782 (u16) tempval);
1783 tempval =
1784 (u32) mii_info->mdio_read(ugeth->dev,
1785 mii_info->mii_id,
1786 MII_BMCR);
1787 mii_info->mdio_write(ugeth->dev,
1788 mii_info->mii_id, MII_BMCR,
1789 (u16) (tempval |
1790 BMCR_RESET));
1791 msleep(4000);
1792#endif /* CONFIG_MPC8360 */
1793 adjust_enet_interface(ugeth);
1794 break;
1795 default:
1796 ugeth_warn
1797 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1798 dev->name, mii_info->speed);
1799 break;
1800 }
1801
1802 ugeth_info("%s: Speed %dBT", dev->name,
1803 mii_info->speed);
1804
1805 ugeth->oldspeed = mii_info->speed;
1806 }
1807
1808 if (!ugeth->oldlink) {
1809 ugeth_info("%s: Link is up", dev->name);
1810 ugeth->oldlink = 1;
1811 netif_carrier_on(dev);
1812 netif_schedule(dev);
1813 }
1814 } else {
1815 if (ugeth->oldlink) {
1816 ugeth_info("%s: Link is down", dev->name);
1817 ugeth->oldlink = 0;
1818 ugeth->oldspeed = 0;
1819 ugeth->oldduplex = -1;
1820 netif_carrier_off(dev);
1821 }
1822 }
1823}
1824
1825/* Configure the PHY for dev.
1826 * returns 0 if success. -1 if failure
1827 */
1828static int init_phy(struct net_device *dev)
1829{
1830 ucc_geth_private_t *ugeth = netdev_priv(dev);
1831 struct phy_info *curphy;
1832 ucc_mii_mng_t *mii_regs;
1833 struct ugeth_mii_info *mii_info;
1834 int err;
1835
1836 mii_regs = &ugeth->ug_regs->miimng;
1837
1838 ugeth->oldlink = 0;
1839 ugeth->oldspeed = 0;
1840 ugeth->oldduplex = -1;
1841
1842 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1843
1844 if (NULL == mii_info) {
1845 ugeth_err("%s: Could not allocate mii_info", dev->name);
1846 return -ENOMEM;
1847 }
1848
1849 mii_info->mii_regs = mii_regs;
1850 mii_info->speed = SPEED_1000;
1851 mii_info->duplex = DUPLEX_FULL;
1852 mii_info->pause = 0;
1853 mii_info->link = 0;
1854
1855 mii_info->advertising = (ADVERTISED_10baseT_Half |
1856 ADVERTISED_10baseT_Full |
1857 ADVERTISED_100baseT_Half |
1858 ADVERTISED_100baseT_Full |
1859 ADVERTISED_1000baseT_Full);
1860 mii_info->autoneg = 1;
1861
1862 mii_info->mii_id = ugeth->ug_info->phy_address;
1863
1864 mii_info->dev = dev;
1865
1866 mii_info->mdio_read = &read_phy_reg;
1867 mii_info->mdio_write = &write_phy_reg;
1868
1869 ugeth->mii_info = mii_info;
1870
1871 spin_lock_irq(&ugeth->lock);
1872
1873 /* Set this UCC to be the master of the MII managment */
1874 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1875
1876 if (init_mii_management_configuration(1,
1877 ugeth->ug_info->
1878 miiPreambleSupress,
1879 &mii_regs->miimcfg,
1880 &mii_regs->miimind)) {
1881 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1882 err = -1;
1883 goto bus_fail;
1884 }
1885
1886 spin_unlock_irq(&ugeth->lock);
1887
1888 /* get info for this PHY */
1889 curphy = get_phy_info(ugeth->mii_info);
1890
1891 if (curphy == NULL) {
1892 ugeth_err("%s: No PHY found", dev->name);
1893 err = -1;
1894 goto no_phy;
1895 }
1896
1897 mii_info->phyinfo = curphy;
1898
1899 /* Run the commands which initialize the PHY */
1900 if (curphy->init) {
1901 err = curphy->init(ugeth->mii_info);
1902 if (err)
1903 goto phy_init_fail;
1904 }
1905
1906 return 0;
1907
1908 phy_init_fail:
1909 no_phy:
1910 bus_fail:
1911 kfree(mii_info);
1912
1913 return err;
1914}
1915
1916#ifdef CONFIG_UGETH_TX_ON_DEMOND
1917static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
1918{
1919 ucc_fast_transmit_on_demand(ugeth->uccf);
1920
1921 return 0;
1922}
1923#endif
1924
1925static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
1926{
1927 ucc_fast_private_t *uccf;
1928 u32 cecr_subblock;
1929 u32 temp;
1930
1931 uccf = ugeth->uccf;
1932
1933 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1934 temp = in_be32(uccf->p_uccm);
1935 temp &= ~UCCE_GRA;
1936 out_be32(uccf->p_uccm, temp);
1937 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1938
1939 /* Issue host command */
1940 cecr_subblock =
1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1943 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1944
1945 /* Wait for command to complete */
1946 do {
1947 temp = in_be32(uccf->p_ucce);
1948 } while (!(temp & UCCE_GRA));
1949
1950 uccf->stopped_tx = 1;
1951
1952 return 0;
1953}
1954
1955static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
1956{
1957 ucc_fast_private_t *uccf;
1958 u32 cecr_subblock;
1959 u8 temp;
1960
1961 uccf = ugeth->uccf;
1962
1963 /* Clear acknowledge bit */
1964 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1965 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1966 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1967
1968 /* Keep issuing command and checking acknowledge bit until
1969 it is asserted, according to spec */
1970 do {
1971 /* Issue host command */
1972 cecr_subblock =
1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1974 ucc_num);
1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1976 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1977
1978 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1980
1981 uccf->stopped_rx = 1;
1982
1983 return 0;
1984}
1985
1986static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
1987{
1988 ucc_fast_private_t *uccf;
1989 u32 cecr_subblock;
1990
1991 uccf = ugeth->uccf;
1992
1993 cecr_subblock =
1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1995 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
1996 0);
1997 uccf->stopped_tx = 0;
1998
1999 return 0;
2000}
2001
2002static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
2003{
2004 ucc_fast_private_t *uccf;
2005 u32 cecr_subblock;
2006
2007 uccf = ugeth->uccf;
2008
2009 cecr_subblock =
2010 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
2011 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
2012 0);
2013 uccf->stopped_rx = 0;
2014
2015 return 0;
2016}
2017
2018static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
2019{
2020 ucc_fast_private_t *uccf;
2021 int enabled_tx, enabled_rx;
2022
2023 uccf = ugeth->uccf;
2024
2025 /* check if the UCC number is in range. */
2026 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2027 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2028 return -EINVAL;
2029 }
2030
2031 enabled_tx = uccf->enabled_tx;
2032 enabled_rx = uccf->enabled_rx;
2033
2034 /* Get Tx and Rx going again, in case this channel was actively
2035 disabled. */
2036 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
2037 ugeth_restart_tx(ugeth);
2038 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
2039 ugeth_restart_rx(ugeth);
2040
2041 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
2042
2043 return 0;
2044
2045}
2046
2047static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
2048{
2049 ucc_fast_private_t *uccf;
2050
2051 uccf = ugeth->uccf;
2052
2053 /* check if the UCC number is in range. */
2054 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2055 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2056 return -EINVAL;
2057 }
2058
2059 /* Stop any transmissions */
2060 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
2061 ugeth_graceful_stop_tx(ugeth);
2062
2063 /* Stop any receptions */
2064 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
2065 ugeth_graceful_stop_rx(ugeth);
2066
2067 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
2068
2069 return 0;
2070}
2071
2072static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
2073{
2074#ifdef DEBUG
2075 ucc_fast_dump_regs(ugeth->uccf);
2076 dump_regs(ugeth);
2077 dump_bds(ugeth);
2078#endif
2079}
2080
2081#ifdef CONFIG_UGETH_FILTERING
2082static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
2083 p_UccGethTadParams,
2084 qe_fltr_tad_t *qe_fltr_tad)
2085{
2086 u16 temp;
2087
2088 /* Zero serialized TAD */
2089 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2090
2091 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2092 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2093 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2094 || (p_UccGethTadParams->vnontag_op !=
2095 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2096 )
2097 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2098 if (p_UccGethTadParams->reject_frame)
2099 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2100 temp =
2101 (u16) (((u16) p_UccGethTadParams->
2102 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2103 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2104
2105 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2106 if (p_UccGethTadParams->vnontag_op ==
2107 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2108 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2109 qe_fltr_tad->serialized[1] |=
2110 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2111
2112 qe_fltr_tad->serialized[2] |=
2113 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2114 /* upper bits */
2115 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2116 /* lower bits */
2117 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2118
2119 return 0;
2120}
2121
2122static enet_addr_container_t
2123 *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
2124 enet_addr_t *p_enet_addr)
2125{
2126 enet_addr_container_t *enet_addr_cont;
2127 struct list_head *p_lh;
2128 u16 i, num;
2129 int32_t j;
2130 u8 *p_counter;
2131
2132 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2133 p_lh = &ugeth->group_hash_q;
2134 p_counter = &(ugeth->numGroupAddrInHash);
2135 } else {
2136 p_lh = &ugeth->ind_hash_q;
2137 p_counter = &(ugeth->numIndAddrInHash);
2138 }
2139
2140 if (!p_lh)
2141 return NULL;
2142
2143 num = *p_counter;
2144
2145 for (i = 0; i < num; i++) {
2146 enet_addr_cont =
2147 (enet_addr_container_t *)
2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2151 break;
2152 if (j == 0)
2153 return enet_addr_cont; /* Found */
2154 }
2155 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2156 }
2157 return NULL;
2158}
2159
2160static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
2161 enet_addr_t *p_enet_addr)
2162{
2163 ucc_geth_enet_address_recognition_location_e location;
2164 enet_addr_container_t *enet_addr_cont;
2165 struct list_head *p_lh;
2166 u8 i;
2167 u32 limit;
2168 u8 *p_counter;
2169
2170 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2171 p_lh = &ugeth->group_hash_q;
2172 limit = ugeth->ug_info->maxGroupAddrInHash;
2173 location =
2174 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2175 p_counter = &(ugeth->numGroupAddrInHash);
2176 } else {
2177 p_lh = &ugeth->ind_hash_q;
2178 limit = ugeth->ug_info->maxIndAddrInHash;
2179 location =
2180 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2181 p_counter = &(ugeth->numIndAddrInHash);
2182 }
2183
2184 if ((enet_addr_cont =
2185 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2186 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2187 return 0;
2188 }
2189 if ((!p_lh) || (!(*p_counter < limit)))
2190 return -EBUSY;
2191 if (!(enet_addr_cont = get_enet_addr_container()))
2192 return -ENOMEM;
2193 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2194 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2195 enet_addr_cont->location = location;
2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2197 ++(*p_counter);
2198
2199 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2200
2201 return 0;
2202}
2203
2204static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
2205 enet_addr_t *p_enet_addr)
2206{
2207 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2208 enet_addr_container_t *enet_addr_cont;
2209 ucc_fast_private_t *uccf;
2210 comm_dir_e comm_dir;
2211 u16 i, num;
2212 struct list_head *p_lh;
2213 u32 *addr_h, *addr_l;
2214 u8 *p_counter;
2215
2216 uccf = ugeth->uccf;
2217
2218 p_82xx_addr_filt =
2219 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2220 addressfiltering;
2221
2222 if (!
2223 (enet_addr_cont =
2224 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2225 return -ENOENT;
2226
2227 /* It's been found and removed from the CQ. */
2228 /* Now destroy its container */
2229 put_enet_addr_container(enet_addr_cont);
2230
2231 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2232 addr_h = &(p_82xx_addr_filt->gaddr_h);
2233 addr_l = &(p_82xx_addr_filt->gaddr_l);
2234 p_lh = &ugeth->group_hash_q;
2235 p_counter = &(ugeth->numGroupAddrInHash);
2236 } else {
2237 addr_h = &(p_82xx_addr_filt->iaddr_h);
2238 addr_l = &(p_82xx_addr_filt->iaddr_l);
2239 p_lh = &ugeth->ind_hash_q;
2240 p_counter = &(ugeth->numIndAddrInHash);
2241 }
2242
2243 comm_dir = 0;
2244 if (uccf->enabled_tx)
2245 comm_dir |= COMM_DIR_TX;
2246 if (uccf->enabled_rx)
2247 comm_dir |= COMM_DIR_RX;
2248 if (comm_dir)
2249 ugeth_disable(ugeth, comm_dir);
2250
2251 /* Clear the hash table. */
2252 out_be32(addr_h, 0x00000000);
2253 out_be32(addr_l, 0x00000000);
2254
2255 /* Add all remaining CQ elements back into hash */
2256 num = --(*p_counter);
2257 for (i = 0; i < num; i++) {
2258 enet_addr_cont =
2259 (enet_addr_container_t *)
2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2261 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2263 }
2264
2265 if (comm_dir)
2266 ugeth_enable(ugeth, comm_dir);
2267
2268 return 0;
2269}
2270#endif /* CONFIG_UGETH_FILTERING */
2271
2272static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
2273 ugeth,
2274 enet_addr_type_e
2275 enet_addr_type)
2276{
2277 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2278 ucc_fast_private_t *uccf;
2279 comm_dir_e comm_dir;
2280 struct list_head *p_lh;
2281 u16 i, num;
2282 u32 *addr_h, *addr_l;
2283 u8 *p_counter;
2284
2285 uccf = ugeth->uccf;
2286
2287 p_82xx_addr_filt =
2288 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2289 addressfiltering;
2290
2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2292 addr_h = &(p_82xx_addr_filt->gaddr_h);
2293 addr_l = &(p_82xx_addr_filt->gaddr_l);
2294 p_lh = &ugeth->group_hash_q;
2295 p_counter = &(ugeth->numGroupAddrInHash);
2296 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2297 addr_h = &(p_82xx_addr_filt->iaddr_h);
2298 addr_l = &(p_82xx_addr_filt->iaddr_l);
2299 p_lh = &ugeth->ind_hash_q;
2300 p_counter = &(ugeth->numIndAddrInHash);
2301 } else
2302 return -EINVAL;
2303
2304 comm_dir = 0;
2305 if (uccf->enabled_tx)
2306 comm_dir |= COMM_DIR_TX;
2307 if (uccf->enabled_rx)
2308 comm_dir |= COMM_DIR_RX;
2309 if (comm_dir)
2310 ugeth_disable(ugeth, comm_dir);
2311
2312 /* Clear the hash table. */
2313 out_be32(addr_h, 0x00000000);
2314 out_be32(addr_l, 0x00000000);
2315
2316 if (!p_lh)
2317 return 0;
2318
2319 num = *p_counter;
2320
2321 /* Delete all remaining CQ elements */
2322 for (i = 0; i < num; i++)
2323 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2324
2325 *p_counter = 0;
2326
2327 if (comm_dir)
2328 ugeth_enable(ugeth, comm_dir);
2329
2330 return 0;
2331}
2332
2333#ifdef CONFIG_UGETH_FILTERING
2334static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
2335 enet_addr_t *p_enet_addr,
2336 u8 paddr_num)
2337{
2338 int i;
2339
2340 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2341 ugeth_warn
2342 ("%s: multicast address added to paddr will have no "
2343 "effect - is this what you wanted?",
2344 __FUNCTION__);
2345
2346 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2347 /* store address in our database */
2348 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2349 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2350 /* put in hardware */
2351 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2352}
2353#endif /* CONFIG_UGETH_FILTERING */
2354
2355static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
2356 u8 paddr_num)
2357{
2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2360}
2361
2362static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
2363{
2364 u16 i, j;
2365 u8 *bd;
2366
2367 if (!ugeth)
2368 return;
2369
2370 if (ugeth->uccf)
2371 ucc_fast_free(ugeth->uccf);
2372
2373 if (ugeth->p_thread_data_tx) {
2374 qe_muram_free(ugeth->thread_dat_tx_offset);
2375 ugeth->p_thread_data_tx = NULL;
2376 }
2377 if (ugeth->p_thread_data_rx) {
2378 qe_muram_free(ugeth->thread_dat_rx_offset);
2379 ugeth->p_thread_data_rx = NULL;
2380 }
2381 if (ugeth->p_exf_glbl_param) {
2382 qe_muram_free(ugeth->exf_glbl_param_offset);
2383 ugeth->p_exf_glbl_param = NULL;
2384 }
2385 if (ugeth->p_rx_glbl_pram) {
2386 qe_muram_free(ugeth->rx_glbl_pram_offset);
2387 ugeth->p_rx_glbl_pram = NULL;
2388 }
2389 if (ugeth->p_tx_glbl_pram) {
2390 qe_muram_free(ugeth->tx_glbl_pram_offset);
2391 ugeth->p_tx_glbl_pram = NULL;
2392 }
2393 if (ugeth->p_send_q_mem_reg) {
2394 qe_muram_free(ugeth->send_q_mem_reg_offset);
2395 ugeth->p_send_q_mem_reg = NULL;
2396 }
2397 if (ugeth->p_scheduler) {
2398 qe_muram_free(ugeth->scheduler_offset);
2399 ugeth->p_scheduler = NULL;
2400 }
2401 if (ugeth->p_tx_fw_statistics_pram) {
2402 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2403 ugeth->p_tx_fw_statistics_pram = NULL;
2404 }
2405 if (ugeth->p_rx_fw_statistics_pram) {
2406 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2407 ugeth->p_rx_fw_statistics_pram = NULL;
2408 }
2409 if (ugeth->p_rx_irq_coalescing_tbl) {
2410 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2411 ugeth->p_rx_irq_coalescing_tbl = NULL;
2412 }
2413 if (ugeth->p_rx_bd_qs_tbl) {
2414 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2415 ugeth->p_rx_bd_qs_tbl = NULL;
2416 }
2417 if (ugeth->p_init_enet_param_shadow) {
2418 return_init_enet_entries(ugeth,
2419 &(ugeth->p_init_enet_param_shadow->
2420 rxthread[0]),
2421 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2422 ugeth->ug_info->riscRx, 1);
2423 return_init_enet_entries(ugeth,
2424 &(ugeth->p_init_enet_param_shadow->
2425 txthread[0]),
2426 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2427 ugeth->ug_info->riscTx, 0);
2428 kfree(ugeth->p_init_enet_param_shadow);
2429 ugeth->p_init_enet_param_shadow = NULL;
2430 }
2431 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2432 bd = ugeth->p_tx_bd_ring[i];
2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2434 if (ugeth->tx_skbuff[i][j]) {
2435 dma_unmap_single(NULL,
2436 BD_BUFFER_ARG(bd),
2437 (BD_STATUS_AND_LENGTH(bd) &
2438 BD_LENGTH_MASK),
2439 DMA_TO_DEVICE);
2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2441 ugeth->tx_skbuff[i][j] = NULL;
2442 }
2443 }
2444
2445 kfree(ugeth->tx_skbuff[i]);
2446
2447 if (ugeth->p_tx_bd_ring[i]) {
2448 if (ugeth->ug_info->uf_info.bd_mem_part ==
2449 MEM_PART_SYSTEM)
2450 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2451 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2452 MEM_PART_MURAM)
2453 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2454 ugeth->p_tx_bd_ring[i] = NULL;
2455 }
2456 }
2457 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2458 if (ugeth->p_rx_bd_ring[i]) {
2459 /* Return existing data buffers in ring */
2460 bd = ugeth->p_rx_bd_ring[i];
2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2462 if (ugeth->rx_skbuff[i][j]) {
2463 dma_unmap_single(NULL, BD_BUFFER(bd),
2464 ugeth->ug_info->
2465 uf_info.
2466 max_rx_buf_length +
2467 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2468 DMA_FROM_DEVICE);
2469
2470 dev_kfree_skb_any(ugeth->
2471 rx_skbuff[i][j]);
2472 ugeth->rx_skbuff[i][j] = NULL;
2473 }
2474 bd += UCC_GETH_SIZE_OF_BD;
2475 }
2476
2477 kfree(ugeth->rx_skbuff[i]);
2478
2479 if (ugeth->ug_info->uf_info.bd_mem_part ==
2480 MEM_PART_SYSTEM)
2481 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2482 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2483 MEM_PART_MURAM)
2484 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2485 ugeth->p_rx_bd_ring[i] = NULL;
2486 }
2487 }
2488 while (!list_empty(&ugeth->group_hash_q))
2489 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2490 (dequeue(&ugeth->group_hash_q)));
2491 while (!list_empty(&ugeth->ind_hash_q))
2492 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2493 (dequeue(&ugeth->ind_hash_q)));
2494
2495}
2496
2497static void ucc_geth_set_multi(struct net_device *dev)
2498{
2499 ucc_geth_private_t *ugeth;
2500 struct dev_mc_list *dmi;
2501 ucc_fast_t *uf_regs;
2502 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2503 enet_addr_t tempaddr;
2504 u8 *mcptr, *tdptr;
2505 int i, j;
2506
2507 ugeth = netdev_priv(dev);
2508
2509 uf_regs = ugeth->uccf->uf_regs;
2510
2511 if (dev->flags & IFF_PROMISC) {
2512
Li Yangce973b12006-08-14 23:00:11 -07002513 uf_regs->upsmr |= UPSMR_PRO;
2514
2515 } else {
2516
2517 uf_regs->upsmr &= ~UPSMR_PRO;
2518
2519 p_82xx_addr_filt =
2520 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
2521 p_rx_glbl_pram->addressfiltering;
2522
2523 if (dev->flags & IFF_ALLMULTI) {
2524 /* Catch all multicast addresses, so set the
2525 * filter to all 1's.
2526 */
2527 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2528 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2529 } else {
2530 /* Clear filter and add the addresses in the list.
2531 */
2532 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2533 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2534
2535 dmi = dev->mc_list;
2536
2537 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2538
2539 /* Only support group multicast for now.
2540 */
2541 if (!(dmi->dmi_addr[0] & 1))
2542 continue;
2543
2544 /* The address in dmi_addr is LSB first,
2545 * and taddr is MSB first. We have to
2546 * copy bytes MSB first from dmi_addr.
2547 */
2548 mcptr = (u8 *) dmi->dmi_addr + 5;
2549 tdptr = (u8 *) & tempaddr;
2550 for (j = 0; j < 6; j++)
2551 *tdptr++ = *mcptr--;
2552
2553 /* Ask CPM to run CRC and set bit in
2554 * filter mask.
2555 */
2556 hw_add_addr_in_hash(ugeth, &tempaddr);
2557
2558 }
2559 }
2560 }
2561}
2562
2563static void ucc_geth_stop(ucc_geth_private_t *ugeth)
2564{
2565 ucc_geth_t *ug_regs = ugeth->ug_regs;
2566 u32 tempval;
2567
2568 ugeth_vdbg("%s: IN", __FUNCTION__);
2569
2570 /* Disable the controller */
2571 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2572
2573 /* Tell the kernel the link is down */
2574 ugeth->mii_info->link = 0;
2575 adjust_link(ugeth->dev);
2576
2577 /* Mask all interrupts */
2578 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2579
2580 /* Clear all interrupts */
2581 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2582
2583 /* Disable Rx and Tx */
2584 tempval = in_be32(&ug_regs->maccfg1);
2585 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2586 out_be32(&ug_regs->maccfg1, tempval);
2587
2588 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2589 /* Clear any pending interrupts */
2590 mii_clear_phy_interrupt(ugeth->mii_info);
2591
2592 /* Disable PHY Interrupts */
2593 mii_configure_phy_interrupt(ugeth->mii_info,
2594 MII_INTERRUPT_DISABLED);
2595 }
2596
2597 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2598
2599 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2600 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2601 } else {
2602 del_timer_sync(&ugeth->phy_info_timer);
2603 }
2604
2605 ucc_geth_memclean(ugeth);
2606}
2607
2608static int ucc_geth_startup(ucc_geth_private_t *ugeth)
2609{
2610 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2611 ucc_geth_init_pram_t *p_init_enet_pram;
2612 ucc_fast_private_t *uccf;
2613 ucc_geth_info_t *ug_info;
2614 ucc_fast_info_t *uf_info;
2615 ucc_fast_t *uf_regs;
2616 ucc_geth_t *ug_regs;
2617 int ret_val = -EINVAL;
2618 u32 remoder = UCC_GETH_REMODER_INIT;
2619 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2620 u32 ifstat, i, j, size, l2qt, l3qt, length;
2621 u16 temoder = UCC_GETH_TEMODER_INIT;
2622 u16 test;
2623 u8 function_code = 0;
2624 u8 *bd, *endOfRing;
2625 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2626
2627 ugeth_vdbg("%s: IN", __FUNCTION__);
2628
2629 ug_info = ugeth->ug_info;
2630 uf_info = &ug_info->uf_info;
2631
2632 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2633 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2634 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2635 return -EINVAL;
2636 }
2637
2638 /* Rx BD lengths */
2639 for (i = 0; i < ug_info->numQueuesRx; i++) {
2640 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2641 (ug_info->bdRingLenRx[i] %
2642 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2643 ugeth_err
2644 ("%s: Rx BD ring length must be multiple of 4,"
2645 " no smaller than 8.", __FUNCTION__);
2646 return -EINVAL;
2647 }
2648 }
2649
2650 /* Tx BD lengths */
2651 for (i = 0; i < ug_info->numQueuesTx; i++) {
2652 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2653 ugeth_err
2654 ("%s: Tx BD ring length must be no smaller than 2.",
2655 __FUNCTION__);
2656 return -EINVAL;
2657 }
2658 }
2659
2660 /* mrblr */
2661 if ((uf_info->max_rx_buf_length == 0) ||
2662 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2663 ugeth_err
2664 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2665 __FUNCTION__);
2666 return -EINVAL;
2667 }
2668
2669 /* num Tx queues */
2670 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2671 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2672 return -EINVAL;
2673 }
2674
2675 /* num Rx queues */
2676 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2677 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2678 return -EINVAL;
2679 }
2680
2681 /* l2qt */
2682 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2683 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2684 ugeth_err
2685 ("%s: VLAN priority table entry must not be"
2686 " larger than number of Rx queues.",
2687 __FUNCTION__);
2688 return -EINVAL;
2689 }
2690 }
2691
2692 /* l3qt */
2693 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2694 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2695 ugeth_err
2696 ("%s: IP priority table entry must not be"
2697 " larger than number of Rx queues.",
2698 __FUNCTION__);
2699 return -EINVAL;
2700 }
2701 }
2702
2703 if (ug_info->cam && !ug_info->ecamptr) {
2704 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2705 __FUNCTION__);
2706 return -EINVAL;
2707 }
2708
2709 if ((ug_info->numStationAddresses !=
2710 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2711 && ug_info->rxExtendedFiltering) {
2712 ugeth_err("%s: Number of station addresses greater than 1 "
2713 "not allowed in extended parsing mode.",
2714 __FUNCTION__);
2715 return -EINVAL;
2716 }
2717
2718 /* Generate uccm_mask for receive */
2719 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2720 for (i = 0; i < ug_info->numQueuesRx; i++)
2721 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2722
2723 for (i = 0; i < ug_info->numQueuesTx; i++)
2724 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2725 /* Initialize the general fast UCC block. */
2726 if (ucc_fast_init(uf_info, &uccf)) {
2727 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2728 ucc_geth_memclean(ugeth);
2729 return -ENOMEM;
2730 }
2731 ugeth->uccf = uccf;
2732
2733 switch (ug_info->numThreadsRx) {
2734 case UCC_GETH_NUM_OF_THREADS_1:
2735 numThreadsRxNumerical = 1;
2736 break;
2737 case UCC_GETH_NUM_OF_THREADS_2:
2738 numThreadsRxNumerical = 2;
2739 break;
2740 case UCC_GETH_NUM_OF_THREADS_4:
2741 numThreadsRxNumerical = 4;
2742 break;
2743 case UCC_GETH_NUM_OF_THREADS_6:
2744 numThreadsRxNumerical = 6;
2745 break;
2746 case UCC_GETH_NUM_OF_THREADS_8:
2747 numThreadsRxNumerical = 8;
2748 break;
2749 default:
2750 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2751 ucc_geth_memclean(ugeth);
2752 return -EINVAL;
2753 break;
2754 }
2755
2756 switch (ug_info->numThreadsTx) {
2757 case UCC_GETH_NUM_OF_THREADS_1:
2758 numThreadsTxNumerical = 1;
2759 break;
2760 case UCC_GETH_NUM_OF_THREADS_2:
2761 numThreadsTxNumerical = 2;
2762 break;
2763 case UCC_GETH_NUM_OF_THREADS_4:
2764 numThreadsTxNumerical = 4;
2765 break;
2766 case UCC_GETH_NUM_OF_THREADS_6:
2767 numThreadsTxNumerical = 6;
2768 break;
2769 case UCC_GETH_NUM_OF_THREADS_8:
2770 numThreadsTxNumerical = 8;
2771 break;
2772 default:
2773 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2774 ucc_geth_memclean(ugeth);
2775 return -EINVAL;
2776 break;
2777 }
2778
2779 /* Calculate rx_extended_features */
2780 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2781 ug_info->ipAddressAlignment ||
2782 (ug_info->numStationAddresses !=
2783 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2784
2785 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2786 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2787 || (ug_info->vlanOperationNonTagged !=
2788 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2789
2790 uf_regs = uccf->uf_regs;
2791 ug_regs = (ucc_geth_t *) (uccf->uf_regs);
2792 ugeth->ug_regs = ug_regs;
2793
2794 init_default_reg_vals(&uf_regs->upsmr,
2795 &ug_regs->maccfg1, &ug_regs->maccfg2);
2796
2797 /* Set UPSMR */
2798 /* For more details see the hardware spec. */
2799 init_rx_parameters(ug_info->bro,
2800 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2801
2802 /* We're going to ignore other registers for now, */
2803 /* except as needed to get up and running */
2804
2805 /* Set MACCFG1 */
2806 /* For more details see the hardware spec. */
2807 init_flow_control_params(ug_info->aufc,
2808 ug_info->receiveFlowControl,
2809 1,
2810 ug_info->pausePeriod,
2811 ug_info->extensionField,
2812 &uf_regs->upsmr,
2813 &ug_regs->uempr, &ug_regs->maccfg1);
2814
2815 maccfg1 = in_be32(&ug_regs->maccfg1);
2816 maccfg1 |= MACCFG1_ENABLE_RX;
2817 maccfg1 |= MACCFG1_ENABLE_TX;
2818 out_be32(&ug_regs->maccfg1, maccfg1);
2819
2820 /* Set IPGIFG */
2821 /* For more details see the hardware spec. */
2822 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2823 ug_info->nonBackToBackIfgPart2,
2824 ug_info->
2825 miminumInterFrameGapEnforcement,
2826 ug_info->backToBackInterFrameGap,
2827 &ug_regs->ipgifg);
2828 if (ret_val != 0) {
2829 ugeth_err("%s: IPGIFG initialization parameter too large.",
2830 __FUNCTION__);
2831 ucc_geth_memclean(ugeth);
2832 return ret_val;
2833 }
2834
2835 /* Set HAFDUP */
2836 /* For more details see the hardware spec. */
2837 ret_val = init_half_duplex_params(ug_info->altBeb,
2838 ug_info->backPressureNoBackoff,
2839 ug_info->noBackoff,
2840 ug_info->excessDefer,
2841 ug_info->altBebTruncation,
2842 ug_info->maxRetransmission,
2843 ug_info->collisionWindow,
2844 &ug_regs->hafdup);
2845 if (ret_val != 0) {
2846 ugeth_err("%s: Half Duplex initialization parameter too large.",
2847 __FUNCTION__);
2848 ucc_geth_memclean(ugeth);
2849 return ret_val;
2850 }
2851
2852 /* Set IFSTAT */
2853 /* For more details see the hardware spec. */
2854 /* Read only - resets upon read */
2855 ifstat = in_be32(&ug_regs->ifstat);
2856
2857 /* Clear UEMPR */
2858 /* For more details see the hardware spec. */
2859 out_be32(&ug_regs->uempr, 0);
2860
2861 /* Set UESCR */
2862 /* For more details see the hardware spec. */
2863 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2864 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2865 0, &uf_regs->upsmr, &ug_regs->uescr);
2866
2867 /* Allocate Tx bds */
2868 for (j = 0; j < ug_info->numQueuesTx; j++) {
2869 /* Allocate in multiple of
2870 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2871 according to spec */
2872 length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
2873 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2874 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2875 if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
2876 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2877 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2878 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2879 u32 align = 4;
2880 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2881 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2882 ugeth->tx_bd_ring_offset[j] =
2883 (u32) (kmalloc((u32) (length + align),
2884 GFP_KERNEL));
2885 if (ugeth->tx_bd_ring_offset[j] != 0)
2886 ugeth->p_tx_bd_ring[j] =
2887 (void*)((ugeth->tx_bd_ring_offset[j] +
2888 align) & ~(align - 1));
2889 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2890 ugeth->tx_bd_ring_offset[j] =
2891 qe_muram_alloc(length,
2892 UCC_GETH_TX_BD_RING_ALIGNMENT);
2893 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2894 ugeth->p_tx_bd_ring[j] =
2895 (u8 *) qe_muram_addr(ugeth->
2896 tx_bd_ring_offset[j]);
2897 }
2898 if (!ugeth->p_tx_bd_ring[j]) {
2899 ugeth_err
2900 ("%s: Can not allocate memory for Tx bd rings.",
2901 __FUNCTION__);
2902 ucc_geth_memclean(ugeth);
2903 return -ENOMEM;
2904 }
2905 /* Zero unused end of bd ring, according to spec */
2906 memset(ugeth->p_tx_bd_ring[j] +
2907 ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
2908 length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
2909 }
2910
2911 /* Allocate Rx bds */
2912 for (j = 0; j < ug_info->numQueuesRx; j++) {
2913 length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
2914 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2915 u32 align = 4;
2916 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2917 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2918 ugeth->rx_bd_ring_offset[j] =
2919 (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
2920 if (ugeth->rx_bd_ring_offset[j] != 0)
2921 ugeth->p_rx_bd_ring[j] =
2922 (void*)((ugeth->rx_bd_ring_offset[j] +
2923 align) & ~(align - 1));
2924 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2925 ugeth->rx_bd_ring_offset[j] =
2926 qe_muram_alloc(length,
2927 UCC_GETH_RX_BD_RING_ALIGNMENT);
2928 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2929 ugeth->p_rx_bd_ring[j] =
2930 (u8 *) qe_muram_addr(ugeth->
2931 rx_bd_ring_offset[j]);
2932 }
2933 if (!ugeth->p_rx_bd_ring[j]) {
2934 ugeth_err
2935 ("%s: Can not allocate memory for Rx bd rings.",
2936 __FUNCTION__);
2937 ucc_geth_memclean(ugeth);
2938 return -ENOMEM;
2939 }
2940 }
2941
2942 /* Init Tx bds */
2943 for (j = 0; j < ug_info->numQueuesTx; j++) {
2944 /* Setup the skbuff rings */
2945 ugeth->tx_skbuff[j] =
2946 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2947 ugeth->ug_info->bdRingLenTx[j],
2948 GFP_KERNEL);
2949
2950 if (ugeth->tx_skbuff[j] == NULL) {
2951 ugeth_err("%s: Could not allocate tx_skbuff",
2952 __FUNCTION__);
2953 ucc_geth_memclean(ugeth);
2954 return -ENOMEM;
2955 }
2956
2957 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2958 ugeth->tx_skbuff[j][i] = NULL;
2959
2960 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2961 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2962 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2963 BD_BUFFER_CLEAR(bd);
2964 BD_STATUS_AND_LENGTH_SET(bd, 0);
2965 bd += UCC_GETH_SIZE_OF_BD;
2966 }
2967 bd -= UCC_GETH_SIZE_OF_BD;
2968 BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
2969 }
2970
2971 /* Init Rx bds */
2972 for (j = 0; j < ug_info->numQueuesRx; j++) {
2973 /* Setup the skbuff rings */
2974 ugeth->rx_skbuff[j] =
2975 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2976 ugeth->ug_info->bdRingLenRx[j],
2977 GFP_KERNEL);
2978
2979 if (ugeth->rx_skbuff[j] == NULL) {
2980 ugeth_err("%s: Could not allocate rx_skbuff",
2981 __FUNCTION__);
2982 ucc_geth_memclean(ugeth);
2983 return -ENOMEM;
2984 }
2985
2986 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2987 ugeth->rx_skbuff[j][i] = NULL;
2988
2989 ugeth->skb_currx[j] = 0;
2990 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2991 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2992 BD_STATUS_AND_LENGTH_SET(bd, R_I);
2993 BD_BUFFER_CLEAR(bd);
2994 bd += UCC_GETH_SIZE_OF_BD;
2995 }
2996 bd -= UCC_GETH_SIZE_OF_BD;
2997 BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
2998 }
2999
3000 /*
3001 * Global PRAM
3002 */
3003 /* Tx global PRAM */
3004 /* Allocate global tx parameter RAM page */
3005 ugeth->tx_glbl_pram_offset =
3006 qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
3007 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
3008 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
3009 ugeth_err
3010 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
3011 __FUNCTION__);
3012 ucc_geth_memclean(ugeth);
3013 return -ENOMEM;
3014 }
3015 ugeth->p_tx_glbl_pram =
3016 (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
3017 tx_glbl_pram_offset);
3018 /* Zero out p_tx_glbl_pram */
3019 memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
3020
3021 /* Fill global PRAM */
3022
3023 /* TQPTR */
3024 /* Size varies with number of Tx threads */
3025 ugeth->thread_dat_tx_offset =
3026 qe_muram_alloc(numThreadsTxNumerical *
3027 sizeof(ucc_geth_thread_data_tx_t) +
3028 32 * (numThreadsTxNumerical == 1),
3029 UCC_GETH_THREAD_DATA_ALIGNMENT);
3030 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
3031 ugeth_err
3032 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
3033 __FUNCTION__);
3034 ucc_geth_memclean(ugeth);
3035 return -ENOMEM;
3036 }
3037
3038 ugeth->p_thread_data_tx =
3039 (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
3040 thread_dat_tx_offset);
3041 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
3042
3043 /* vtagtable */
3044 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
3045 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
3046 ug_info->vtagtable[i]);
3047
3048 /* iphoffset */
3049 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
3050 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
3051
3052 /* SQPTR */
3053 /* Size varies with number of Tx queues */
3054 ugeth->send_q_mem_reg_offset =
3055 qe_muram_alloc(ug_info->numQueuesTx *
3056 sizeof(ucc_geth_send_queue_qd_t),
3057 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
3058 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
3059 ugeth_err
3060 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
3061 __FUNCTION__);
3062 ucc_geth_memclean(ugeth);
3063 return -ENOMEM;
3064 }
3065
3066 ugeth->p_send_q_mem_reg =
3067 (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
3068 send_q_mem_reg_offset);
3069 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
3070
3071 /* Setup the table */
3072 /* Assume BD rings are already established */
3073 for (i = 0; i < ug_info->numQueuesTx; i++) {
3074 endOfRing =
3075 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3076 1) * UCC_GETH_SIZE_OF_BD;
3077 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3078 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3079 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3080 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3081 last_bd_completed_address,
3082 (u32) virt_to_phys(endOfRing));
3083 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3084 MEM_PART_MURAM) {
3085 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3086 (u32) immrbar_virt_to_phys(ugeth->
3087 p_tx_bd_ring[i]));
3088 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3089 last_bd_completed_address,
3090 (u32) immrbar_virt_to_phys(endOfRing));
3091 }
3092 }
3093
3094 /* schedulerbasepointer */
3095
3096 if (ug_info->numQueuesTx > 1) {
3097 /* scheduler exists only if more than 1 tx queue */
3098 ugeth->scheduler_offset =
3099 qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
3100 UCC_GETH_SCHEDULER_ALIGNMENT);
3101 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3102 ugeth_err
3103 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3104 __FUNCTION__);
3105 ucc_geth_memclean(ugeth);
3106 return -ENOMEM;
3107 }
3108
3109 ugeth->p_scheduler =
3110 (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
3111 scheduler_offset);
3112 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3113 ugeth->scheduler_offset);
3114 /* Zero out p_scheduler */
3115 memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
3116
3117 /* Set values in scheduler */
3118 out_be32(&ugeth->p_scheduler->mblinterval,
3119 ug_info->mblinterval);
3120 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3121 ug_info->nortsrbytetime);
3122 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3123 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3124 ugeth->p_scheduler->txasap = ug_info->txasap;
3125 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3126 for (i = 0; i < NUM_TX_QUEUES; i++)
3127 ugeth->p_scheduler->weightfactor[i] =
3128 ug_info->weightfactor[i];
3129
3130 /* Set pointers to cpucount registers in scheduler */
3131 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3132 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3133 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3134 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3135 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3136 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3137 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3138 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3139 }
3140
3141 /* schedulerbasepointer */
3142 /* TxRMON_PTR (statistics) */
3143 if (ug_info->
3144 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3145 ugeth->tx_fw_statistics_pram_offset =
3146 qe_muram_alloc(sizeof
3147 (ucc_geth_tx_firmware_statistics_pram_t),
3148 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3149 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3150 ugeth_err
3151 ("%s: Can not allocate DPRAM memory for"
3152 " p_tx_fw_statistics_pram.", __FUNCTION__);
3153 ucc_geth_memclean(ugeth);
3154 return -ENOMEM;
3155 }
3156 ugeth->p_tx_fw_statistics_pram =
3157 (ucc_geth_tx_firmware_statistics_pram_t *)
3158 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3159 /* Zero out p_tx_fw_statistics_pram */
3160 memset(ugeth->p_tx_fw_statistics_pram,
3161 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
3162 }
3163
3164 /* temoder */
3165 /* Already has speed set */
3166
3167 if (ug_info->numQueuesTx > 1)
3168 temoder |= TEMODER_SCHEDULER_ENABLE;
3169 if (ug_info->ipCheckSumGenerate)
3170 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3171 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3172 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3173
3174 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3175
3176 /* Function code register value to be used later */
3177 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3178 /* Required for QE */
3179
3180 /* function code register */
3181 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3182
3183 /* Rx global PRAM */
3184 /* Allocate global rx parameter RAM page */
3185 ugeth->rx_glbl_pram_offset =
3186 qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
3187 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3188 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3189 ugeth_err
3190 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3191 __FUNCTION__);
3192 ucc_geth_memclean(ugeth);
3193 return -ENOMEM;
3194 }
3195 ugeth->p_rx_glbl_pram =
3196 (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
3197 rx_glbl_pram_offset);
3198 /* Zero out p_rx_glbl_pram */
3199 memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
3200
3201 /* Fill global PRAM */
3202
3203 /* RQPTR */
3204 /* Size varies with number of Rx threads */
3205 ugeth->thread_dat_rx_offset =
3206 qe_muram_alloc(numThreadsRxNumerical *
3207 sizeof(ucc_geth_thread_data_rx_t),
3208 UCC_GETH_THREAD_DATA_ALIGNMENT);
3209 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3210 ugeth_err
3211 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3212 __FUNCTION__);
3213 ucc_geth_memclean(ugeth);
3214 return -ENOMEM;
3215 }
3216
3217 ugeth->p_thread_data_rx =
3218 (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
3219 thread_dat_rx_offset);
3220 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3221
3222 /* typeorlen */
3223 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3224
3225 /* rxrmonbaseptr (statistics) */
3226 if (ug_info->
3227 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3228 ugeth->rx_fw_statistics_pram_offset =
3229 qe_muram_alloc(sizeof
3230 (ucc_geth_rx_firmware_statistics_pram_t),
3231 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3232 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3233 ugeth_err
3234 ("%s: Can not allocate DPRAM memory for"
3235 " p_rx_fw_statistics_pram.", __FUNCTION__);
3236 ucc_geth_memclean(ugeth);
3237 return -ENOMEM;
3238 }
3239 ugeth->p_rx_fw_statistics_pram =
3240 (ucc_geth_rx_firmware_statistics_pram_t *)
3241 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3242 /* Zero out p_rx_fw_statistics_pram */
3243 memset(ugeth->p_rx_fw_statistics_pram, 0,
3244 sizeof(ucc_geth_rx_firmware_statistics_pram_t));
3245 }
3246
3247 /* intCoalescingPtr */
3248
3249 /* Size varies with number of Rx queues */
3250 ugeth->rx_irq_coalescing_tbl_offset =
3251 qe_muram_alloc(ug_info->numQueuesRx *
3252 sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
3253 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3254 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3255 ugeth_err
3256 ("%s: Can not allocate DPRAM memory for"
3257 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3258 ucc_geth_memclean(ugeth);
3259 return -ENOMEM;
3260 }
3261
3262 ugeth->p_rx_irq_coalescing_tbl =
3263 (ucc_geth_rx_interrupt_coalescing_table_t *)
3264 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3265 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3266 ugeth->rx_irq_coalescing_tbl_offset);
3267
3268 /* Fill interrupt coalescing table */
3269 for (i = 0; i < ug_info->numQueuesRx; i++) {
3270 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3271 interruptcoalescingmaxvalue,
3272 ug_info->interruptcoalescingmaxvalue[i]);
3273 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3274 interruptcoalescingcounter,
3275 ug_info->interruptcoalescingmaxvalue[i]);
3276 }
3277
3278 /* MRBLR */
3279 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3280 &ugeth->p_rx_glbl_pram->mrblr);
3281 /* MFLR */
3282 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3283 /* MINFLR */
3284 init_min_frame_len(ug_info->minFrameLength,
3285 &ugeth->p_rx_glbl_pram->minflr,
3286 &ugeth->p_rx_glbl_pram->mrblr);
3287 /* MAXD1 */
3288 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3289 /* MAXD2 */
3290 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3291
3292 /* l2qt */
3293 l2qt = 0;
3294 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3295 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3296 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3297
3298 /* l3qt */
3299 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3300 l3qt = 0;
3301 for (i = 0; i < 8; i++)
3302 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3303 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
3304 }
3305
3306 /* vlantype */
3307 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3308
3309 /* vlantci */
3310 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3311
3312 /* ecamptr */
3313 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3314
3315 /* RBDQPTR */
3316 /* Size varies with number of Rx queues */
3317 ugeth->rx_bd_qs_tbl_offset =
3318 qe_muram_alloc(ug_info->numQueuesRx *
3319 (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3320 sizeof(ucc_geth_rx_prefetched_bds_t)),
3321 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3322 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3323 ugeth_err
3324 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3325 __FUNCTION__);
3326 ucc_geth_memclean(ugeth);
3327 return -ENOMEM;
3328 }
3329
3330 ugeth->p_rx_bd_qs_tbl =
3331 (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
3332 rx_bd_qs_tbl_offset);
3333 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3334 /* Zero out p_rx_bd_qs_tbl */
3335 memset(ugeth->p_rx_bd_qs_tbl,
3336 0,
3337 ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3338 sizeof(ucc_geth_rx_prefetched_bds_t)));
3339
3340 /* Setup the table */
3341 /* Assume BD rings are already established */
3342 for (i = 0; i < ug_info->numQueuesRx; i++) {
3343 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3344 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3345 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3346 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3347 MEM_PART_MURAM) {
3348 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3349 (u32) immrbar_virt_to_phys(ugeth->
3350 p_rx_bd_ring[i]));
3351 }
3352 /* rest of fields handled by QE */
3353 }
3354
3355 /* remoder */
3356 /* Already has speed set */
3357
3358 if (ugeth->rx_extended_features)
3359 remoder |= REMODER_RX_EXTENDED_FEATURES;
3360 if (ug_info->rxExtendedFiltering)
3361 remoder |= REMODER_RX_EXTENDED_FILTERING;
3362 if (ug_info->dynamicMaxFrameLength)
3363 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3364 if (ug_info->dynamicMinFrameLength)
3365 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3366 remoder |=
3367 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3368 remoder |=
3369 ug_info->
3370 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3371 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3372 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3373 if (ug_info->ipCheckSumCheck)
3374 remoder |= REMODER_IP_CHECKSUM_CHECK;
3375 if (ug_info->ipAddressAlignment)
3376 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3377 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3378
3379 /* Note that this function must be called */
3380 /* ONLY AFTER p_tx_fw_statistics_pram */
3381 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3382 init_firmware_statistics_gathering_mode((ug_info->
3383 statisticsMode &
3384 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3385 (ug_info->statisticsMode &
3386 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3387 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3388 ugeth->tx_fw_statistics_pram_offset,
3389 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3390 ugeth->rx_fw_statistics_pram_offset,
3391 &ugeth->p_tx_glbl_pram->temoder,
3392 &ugeth->p_rx_glbl_pram->remoder);
3393
3394 /* function code register */
3395 ugeth->p_rx_glbl_pram->rstate = function_code;
3396
3397 /* initialize extended filtering */
3398 if (ug_info->rxExtendedFiltering) {
3399 if (!ug_info->extendedFilteringChainPointer) {
3400 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3401 __FUNCTION__);
3402 ucc_geth_memclean(ugeth);
3403 return -EINVAL;
3404 }
3405
3406 /* Allocate memory for extended filtering Mode Global
3407 Parameters */
3408 ugeth->exf_glbl_param_offset =
3409 qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
3410 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3411 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3412 ugeth_err
3413 ("%s: Can not allocate DPRAM memory for"
3414 " p_exf_glbl_param.", __FUNCTION__);
3415 ucc_geth_memclean(ugeth);
3416 return -ENOMEM;
3417 }
3418
3419 ugeth->p_exf_glbl_param =
3420 (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
3421 exf_glbl_param_offset);
3422 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3423 ugeth->exf_glbl_param_offset);
3424 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3425 (u32) ug_info->extendedFilteringChainPointer);
3426
3427 } else { /* initialize 82xx style address filtering */
3428
3429 /* Init individual address recognition registers to disabled */
3430
3431 for (j = 0; j < NUM_OF_PADDRS; j++)
3432 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3433
3434 /* Create CQs for hash tables */
3435 if (ug_info->maxGroupAddrInHash > 0) {
3436 INIT_LIST_HEAD(&ugeth->group_hash_q);
3437 }
3438 if (ug_info->maxIndAddrInHash > 0) {
3439 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3440 }
3441 p_82xx_addr_filt =
3442 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
3443 p_rx_glbl_pram->addressfiltering;
3444
3445 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3446 ENET_ADDR_TYPE_GROUP);
3447 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3448 ENET_ADDR_TYPE_INDIVIDUAL);
3449 }
3450
3451 /*
3452 * Initialize UCC at QE level
3453 */
3454
3455 command = QE_INIT_TX_RX;
3456
3457 /* Allocate shadow InitEnet command parameter structure.
3458 * This is needed because after the InitEnet command is executed,
3459 * the structure in DPRAM is released, because DPRAM is a premium
3460 * resource.
3461 * This shadow structure keeps a copy of what was done so that the
3462 * allocated resources can be released when the channel is freed.
3463 */
3464 if (!(ugeth->p_init_enet_param_shadow =
3465 (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
3466 GFP_KERNEL))) {
3467 ugeth_err
3468 ("%s: Can not allocate memory for"
3469 " p_UccInitEnetParamShadows.", __FUNCTION__);
3470 ucc_geth_memclean(ugeth);
3471 return -ENOMEM;
3472 }
3473 /* Zero out *p_init_enet_param_shadow */
3474 memset((char *)ugeth->p_init_enet_param_shadow,
3475 0, sizeof(ucc_geth_init_pram_t));
3476
3477 /* Fill shadow InitEnet command parameter structure */
3478
3479 ugeth->p_init_enet_param_shadow->resinit1 =
3480 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3481 ugeth->p_init_enet_param_shadow->resinit2 =
3482 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3483 ugeth->p_init_enet_param_shadow->resinit3 =
3484 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3485 ugeth->p_init_enet_param_shadow->resinit4 =
3486 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3487 ugeth->p_init_enet_param_shadow->resinit5 =
3488 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3489 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3490 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3491 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3492 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3493
3494 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3495 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3496 if ((ug_info->largestexternallookupkeysize !=
3497 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3498 && (ug_info->largestexternallookupkeysize !=
3499 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3500 && (ug_info->largestexternallookupkeysize !=
3501 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3502 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3503 __FUNCTION__);
3504 ucc_geth_memclean(ugeth);
3505 return -EINVAL;
3506 }
3507 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3508 ug_info->largestexternallookupkeysize;
3509 size = sizeof(ucc_geth_thread_rx_pram_t);
3510 if (ug_info->rxExtendedFiltering) {
3511 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3512 if (ug_info->largestexternallookupkeysize ==
3513 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3514 size +=
3515 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3516 if (ug_info->largestexternallookupkeysize ==
3517 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3518 size +=
3519 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3520 }
3521
3522 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3523 p_init_enet_param_shadow->rxthread[0]),
3524 (u8) (numThreadsRxNumerical + 1)
3525 /* Rx needs one extra for terminator */
3526 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3527 ug_info->riscRx, 1)) != 0) {
3528 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3529 __FUNCTION__);
3530 ucc_geth_memclean(ugeth);
3531 return ret_val;
3532 }
3533
3534 ugeth->p_init_enet_param_shadow->txglobal =
3535 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3536 if ((ret_val =
3537 fill_init_enet_entries(ugeth,
3538 &(ugeth->p_init_enet_param_shadow->
3539 txthread[0]), numThreadsTxNumerical,
3540 sizeof(ucc_geth_thread_tx_pram_t),
3541 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3542 ug_info->riscTx, 0)) != 0) {
3543 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3544 __FUNCTION__);
3545 ucc_geth_memclean(ugeth);
3546 return ret_val;
3547 }
3548
3549 /* Load Rx bds with buffers */
3550 for (i = 0; i < ug_info->numQueuesRx; i++) {
3551 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3552 ugeth_err("%s: Can not fill Rx bds with buffers.",
3553 __FUNCTION__);
3554 ucc_geth_memclean(ugeth);
3555 return ret_val;
3556 }
3557 }
3558
3559 /* Allocate InitEnet command parameter structure */
3560 init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
3561 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3562 ugeth_err
3563 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3564 __FUNCTION__);
3565 ucc_geth_memclean(ugeth);
3566 return -ENOMEM;
3567 }
3568 p_init_enet_pram =
3569 (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
3570
3571 /* Copy shadow InitEnet command parameter structure into PRAM */
3572 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3573 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3574 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3575 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3576 out_be16(&p_init_enet_pram->resinit5,
3577 ugeth->p_init_enet_param_shadow->resinit5);
3578 p_init_enet_pram->largestexternallookupkeysize =
3579 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3580 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3581 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3582 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3583 out_be32(&p_init_enet_pram->rxthread[i],
3584 ugeth->p_init_enet_param_shadow->rxthread[i]);
3585 out_be32(&p_init_enet_pram->txglobal,
3586 ugeth->p_init_enet_param_shadow->txglobal);
3587 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3588 out_be32(&p_init_enet_pram->txthread[i],
3589 ugeth->p_init_enet_param_shadow->txthread[i]);
3590
3591 /* Issue QE command */
3592 cecr_subblock =
3593 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3594 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
3595 init_enet_pram_offset);
3596
3597 /* Free InitEnet command parameter */
3598 qe_muram_free(init_enet_pram_offset);
3599
3600 return 0;
3601}
3602
3603/* returns a net_device_stats structure pointer */
3604static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3605{
3606 ucc_geth_private_t *ugeth = netdev_priv(dev);
3607
3608 return &(ugeth->stats);
3609}
3610
3611/* ucc_geth_timeout gets called when a packet has not been
3612 * transmitted after a set amount of time.
3613 * For now, assume that clearing out all the structures, and
3614 * starting over will fix the problem. */
3615static void ucc_geth_timeout(struct net_device *dev)
3616{
3617 ucc_geth_private_t *ugeth = netdev_priv(dev);
3618
3619 ugeth_vdbg("%s: IN", __FUNCTION__);
3620
3621 ugeth->stats.tx_errors++;
3622
3623 ugeth_dump_regs(ugeth);
3624
3625 if (dev->flags & IFF_UP) {
3626 ucc_geth_stop(ugeth);
3627 ucc_geth_startup(ugeth);
3628 }
3629
3630 netif_schedule(dev);
3631}
3632
3633/* This is called by the kernel when a frame is ready for transmission. */
3634/* It is pointed to by the dev->hard_start_xmit function pointer */
3635static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3636{
3637 ucc_geth_private_t *ugeth = netdev_priv(dev);
3638 u8 *bd; /* BD pointer */
3639 u32 bd_status;
3640 u8 txQ = 0;
3641
3642 ugeth_vdbg("%s: IN", __FUNCTION__);
3643
3644 spin_lock_irq(&ugeth->lock);
3645
3646 ugeth->stats.tx_bytes += skb->len;
3647
3648 /* Start from the next BD that should be filled */
3649 bd = ugeth->txBd[txQ];
3650 bd_status = BD_STATUS_AND_LENGTH(bd);
3651 /* Save the skb pointer so we can free it later */
3652 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3653
3654 /* Update the current skb pointer (wrapping if this was the last) */
3655 ugeth->skb_curtx[txQ] =
3656 (ugeth->skb_curtx[txQ] +
3657 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3658
3659 /* set up the buffer descriptor */
3660 BD_BUFFER_SET(bd,
3661 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3662
3663 //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
3664
3665 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3666
3667 BD_STATUS_AND_LENGTH_SET(bd, bd_status);
3668
3669 dev->trans_start = jiffies;
3670
3671 /* Move to next BD in the ring */
3672 if (!(bd_status & T_W))
3673 ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
3674 else
3675 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3676
3677 /* If the next BD still needs to be cleaned up, then the bds
3678 are full. We need to tell the kernel to stop sending us stuff. */
3679 if (bd == ugeth->confBd[txQ]) {
3680 if (!netif_queue_stopped(dev))
3681 netif_stop_queue(dev);
3682 }
3683
3684 if (ugeth->p_scheduler) {
3685 ugeth->cpucount[txQ]++;
3686 /* Indicate to QE that there are more Tx bds ready for
3687 transmission */
3688 /* This is done by writing a running counter of the bd
3689 count to the scheduler PRAM. */
3690 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3691 }
3692
3693 spin_unlock_irq(&ugeth->lock);
3694
3695 return 0;
3696}
3697
3698static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
3699{
3700 struct sk_buff *skb;
3701 u8 *bd;
3702 u16 length, howmany = 0;
3703 u32 bd_status;
3704 u8 *bdBuffer;
3705
3706 ugeth_vdbg("%s: IN", __FUNCTION__);
3707
3708 spin_lock(&ugeth->lock);
3709 /* collect received buffers */
3710 bd = ugeth->rxBd[rxQ];
3711
3712 bd_status = BD_STATUS_AND_LENGTH(bd);
3713
3714 /* while there are received buffers and BD is full (~R_E) */
3715 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3716 bdBuffer = (u8 *) BD_BUFFER(bd);
3717 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3718 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3719
3720 /* determine whether buffer is first, last, first and last
3721 (single buffer frame) or middle (not first and not last) */
3722 if (!skb ||
3723 (!(bd_status & (R_F | R_L))) ||
3724 (bd_status & R_ERRORS_FATAL)) {
3725 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3726 __FUNCTION__, __LINE__, (u32) skb);
3727 if (skb)
3728 dev_kfree_skb_any(skb);
3729
3730 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3731 ugeth->stats.rx_dropped++;
3732 } else {
3733 ugeth->stats.rx_packets++;
3734 howmany++;
3735
3736 /* Prep the skb for the packet */
3737 skb_put(skb, length);
3738
3739 /* Tell the skb what kind of packet this is */
3740 skb->protocol = eth_type_trans(skb, ugeth->dev);
3741
3742 ugeth->stats.rx_bytes += length;
3743 /* Send the packet up the stack */
3744#ifdef CONFIG_UGETH_NAPI
3745 netif_receive_skb(skb);
3746#else
3747 netif_rx(skb);
3748#endif /* CONFIG_UGETH_NAPI */
3749 }
3750
3751 ugeth->dev->last_rx = jiffies;
3752
3753 skb = get_new_skb(ugeth, bd);
3754 if (!skb) {
3755 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3756 spin_unlock(&ugeth->lock);
3757 ugeth->stats.rx_dropped++;
3758 break;
3759 }
3760
3761 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3762
3763 /* update to point at the next skb */
3764 ugeth->skb_currx[rxQ] =
3765 (ugeth->skb_currx[rxQ] +
3766 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3767
3768 if (bd_status & R_W)
3769 bd = ugeth->p_rx_bd_ring[rxQ];
3770 else
3771 bd += UCC_GETH_SIZE_OF_BD;
3772
3773 bd_status = BD_STATUS_AND_LENGTH(bd);
3774 }
3775
3776 ugeth->rxBd[rxQ] = bd;
3777 spin_unlock(&ugeth->lock);
3778 return howmany;
3779}
3780
3781static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3782{
3783 /* Start from the next BD that should be filled */
3784 ucc_geth_private_t *ugeth = netdev_priv(dev);
3785 u8 *bd; /* BD pointer */
3786 u32 bd_status;
3787
3788 bd = ugeth->confBd[txQ];
3789 bd_status = BD_STATUS_AND_LENGTH(bd);
3790
3791 /* Normal processing. */
3792 while ((bd_status & T_R) == 0) {
3793 /* BD contains already transmitted buffer. */
3794 /* Handle the transmitted buffer and release */
3795 /* the BD to be used with the current frame */
3796
3797 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3798 break;
3799
3800 ugeth->stats.tx_packets++;
3801
3802 /* Free the sk buffer associated with this TxBD */
3803 dev_kfree_skb_irq(ugeth->
3804 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3805 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3806 ugeth->skb_dirtytx[txQ] =
3807 (ugeth->skb_dirtytx[txQ] +
3808 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3809
3810 /* We freed a buffer, so now we can restart transmission */
3811 if (netif_queue_stopped(dev))
3812 netif_wake_queue(dev);
3813
3814 /* Advance the confirmation BD pointer */
3815 if (!(bd_status & T_W))
3816 ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
3817 else
3818 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3819 }
3820 return 0;
3821}
3822
3823#ifdef CONFIG_UGETH_NAPI
3824static int ucc_geth_poll(struct net_device *dev, int *budget)
3825{
3826 ucc_geth_private_t *ugeth = netdev_priv(dev);
3827 int howmany;
3828 int rx_work_limit = *budget;
3829 u8 rxQ = 0;
3830
3831 if (rx_work_limit > dev->quota)
3832 rx_work_limit = dev->quota;
3833
3834 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3835
3836 dev->quota -= howmany;
3837 rx_work_limit -= howmany;
3838 *budget -= howmany;
3839
3840 if (rx_work_limit >= 0)
3841 netif_rx_complete(dev);
3842
3843 return (rx_work_limit < 0) ? 1 : 0;
3844}
3845#endif /* CONFIG_UGETH_NAPI */
3846
3847static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
3848 struct pt_regs *regs)
3849{
3850 struct net_device *dev = (struct net_device *)info;
3851 ucc_geth_private_t *ugeth = netdev_priv(dev);
3852 ucc_fast_private_t *uccf;
3853 ucc_geth_info_t *ug_info;
3854 register u32 ucce = 0;
3855 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3856 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3857 register u8 i;
3858
3859 ugeth_vdbg("%s: IN", __FUNCTION__);
3860
3861 if (!ugeth)
3862 return IRQ_NONE;
3863
3864 uccf = ugeth->uccf;
3865 ug_info = ugeth->ug_info;
3866
3867 do {
3868 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3869
3870 /* clear event bits for next time */
3871 /* Side effect here is to mask ucce variable
3872 for future processing below. */
3873 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3874 but only bits in UCCM */
3875
3876 /* We ignore Tx interrupts because Tx confirmation is
3877 done inside Tx routine */
3878
3879 for (i = 0; i < ug_info->numQueuesRx; i++) {
3880 if (ucce & bit_mask)
3881 ucc_geth_rx(ugeth, i,
3882 (int)ugeth->ug_info->
3883 bdRingLenRx[i]);
3884 ucce &= ~bit_mask;
3885 bit_mask <<= 1;
3886 }
3887
3888 for (i = 0; i < ug_info->numQueuesTx; i++) {
3889 if (ucce & tx_mask)
3890 ucc_geth_tx(dev, i);
3891 ucce &= ~tx_mask;
3892 tx_mask <<= 1;
3893 }
3894
3895 /* Exceptions */
3896 if (ucce & UCCE_BSY) {
3897 ugeth_vdbg("Got BUSY irq!!!!");
3898 ugeth->stats.rx_errors++;
3899 ucce &= ~UCCE_BSY;
3900 }
3901 if (ucce & UCCE_OTHER) {
3902 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3903 ucce);
3904 ugeth->stats.rx_errors++;
3905 ucce &= ~ucce;
3906 }
3907 }
3908 while (ucce);
3909
3910 return IRQ_HANDLED;
3911}
3912
3913static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3914{
3915 struct net_device *dev = (struct net_device *)dev_id;
3916 ucc_geth_private_t *ugeth = netdev_priv(dev);
3917
3918 ugeth_vdbg("%s: IN", __FUNCTION__);
3919
3920 /* Clear the interrupt */
3921 mii_clear_phy_interrupt(ugeth->mii_info);
3922
3923 /* Disable PHY interrupts */
3924 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3925
3926 /* Schedule the phy change */
3927 schedule_work(&ugeth->tq);
3928
3929 return IRQ_HANDLED;
3930}
3931
3932/* Scheduled by the phy_interrupt/timer to handle PHY changes */
3933static void ugeth_phy_change(void *data)
3934{
3935 struct net_device *dev = (struct net_device *)data;
3936 ucc_geth_private_t *ugeth = netdev_priv(dev);
3937 ucc_geth_t *ug_regs;
3938 int result = 0;
3939
3940 ugeth_vdbg("%s: IN", __FUNCTION__);
3941
3942 ug_regs = ugeth->ug_regs;
3943
3944 /* Delay to give the PHY a chance to change the
3945 * register state */
3946 msleep(1);
3947
3948 /* Update the link, speed, duplex */
3949 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3950
3951 /* Adjust the known status as long as the link
3952 * isn't still coming up */
3953 if ((0 == result) || (ugeth->mii_info->link == 0))
3954 adjust_link(dev);
3955
3956 /* Reenable interrupts, if needed */
3957 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3958 mii_configure_phy_interrupt(ugeth->mii_info,
3959 MII_INTERRUPT_ENABLED);
3960}
3961
3962/* Called every so often on systems that don't interrupt
3963 * the core for PHY changes */
3964static void ugeth_phy_timer(unsigned long data)
3965{
3966 struct net_device *dev = (struct net_device *)data;
3967 ucc_geth_private_t *ugeth = netdev_priv(dev);
3968
3969 schedule_work(&ugeth->tq);
3970
3971 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3972}
3973
3974/* Keep trying aneg for some time
3975 * If, after GFAR_AN_TIMEOUT seconds, it has not
3976 * finished, we switch to forced.
3977 * Either way, once the process has completed, we either
3978 * request the interrupt, or switch the timer over to
3979 * using ugeth_phy_timer to check status */
3980static void ugeth_phy_startup_timer(unsigned long data)
3981{
3982 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3983 ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
3984 static int secondary = UGETH_AN_TIMEOUT;
3985 int result;
3986
3987 /* Configure the Auto-negotiation */
3988 result = mii_info->phyinfo->config_aneg(mii_info);
3989
3990 /* If autonegotiation failed to start, and
3991 * we haven't timed out, reset the timer, and return */
3992 if (result && secondary--) {
3993 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3994 return;
3995 } else if (result) {
3996 /* Couldn't start autonegotiation.
3997 * Try switching to forced */
3998 mii_info->autoneg = 0;
3999 result = mii_info->phyinfo->config_aneg(mii_info);
4000
4001 /* Forcing failed! Give up */
4002 if (result) {
4003 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
4004 return;
4005 }
4006 }
4007
4008 /* Kill the timer so it can be restarted */
4009 del_timer_sync(&ugeth->phy_info_timer);
4010
4011 /* Grab the PHY interrupt, if necessary/possible */
4012 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
4013 if (request_irq(ugeth->ug_info->phy_interrupt,
4014 phy_interrupt,
4015 SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
4016 ugeth_err("%s: Can't get IRQ %d (PHY)",
4017 mii_info->dev->name,
4018 ugeth->ug_info->phy_interrupt);
4019 } else {
4020 mii_configure_phy_interrupt(ugeth->mii_info,
4021 MII_INTERRUPT_ENABLED);
4022 return;
4023 }
4024 }
4025
4026 /* Start the timer again, this time in order to
4027 * handle a change in status */
4028 init_timer(&ugeth->phy_info_timer);
4029 ugeth->phy_info_timer.function = &ugeth_phy_timer;
4030 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
4031 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
4032}
4033
4034/* Called when something needs to use the ethernet device */
4035/* Returns 0 for success. */
4036static int ucc_geth_open(struct net_device *dev)
4037{
4038 ucc_geth_private_t *ugeth = netdev_priv(dev);
4039 int err;
4040
4041 ugeth_vdbg("%s: IN", __FUNCTION__);
4042
4043 /* Test station address */
4044 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
4045 ugeth_err("%s: Multicast address used for station address"
4046 " - is this what you wanted?", __FUNCTION__);
4047 return -EINVAL;
4048 }
4049
4050 err = ucc_geth_startup(ugeth);
4051 if (err) {
4052 ugeth_err("%s: Cannot configure net device, aborting.",
4053 dev->name);
4054 return err;
4055 }
4056
4057 err = adjust_enet_interface(ugeth);
4058 if (err) {
4059 ugeth_err("%s: Cannot configure net device, aborting.",
4060 dev->name);
4061 return err;
4062 }
4063
4064 /* Set MACSTNADDR1, MACSTNADDR2 */
4065 /* For more details see the hardware spec. */
4066 init_mac_station_addr_regs(dev->dev_addr[0],
4067 dev->dev_addr[1],
4068 dev->dev_addr[2],
4069 dev->dev_addr[3],
4070 dev->dev_addr[4],
4071 dev->dev_addr[5],
4072 &ugeth->ug_regs->macstnaddr1,
4073 &ugeth->ug_regs->macstnaddr2);
4074
4075 err = init_phy(dev);
4076 if (err) {
4077 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4078 return err;
4079 }
4080#ifndef CONFIG_UGETH_NAPI
4081 err =
4082 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4083 "UCC Geth", dev);
4084 if (err) {
4085 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4086 dev->name);
4087 ucc_geth_stop(ugeth);
4088 return err;
4089 }
4090#endif /* CONFIG_UGETH_NAPI */
4091
4092 /* Set up the PHY change work queue */
4093 INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
4094
4095 init_timer(&ugeth->phy_info_timer);
4096 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4097 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4098 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4099
4100 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4101 if (err) {
4102 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4103 ucc_geth_stop(ugeth);
4104 return err;
4105 }
4106
4107 netif_start_queue(dev);
4108
4109 return err;
4110}
4111
4112/* Stops the kernel queue, and halts the controller */
4113static int ucc_geth_close(struct net_device *dev)
4114{
4115 ucc_geth_private_t *ugeth = netdev_priv(dev);
4116
4117 ugeth_vdbg("%s: IN", __FUNCTION__);
4118
4119 ucc_geth_stop(ugeth);
4120
4121 /* Shutdown the PHY */
4122 if (ugeth->mii_info->phyinfo->close)
4123 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4124
4125 kfree(ugeth->mii_info);
4126
4127 netif_stop_queue(dev);
4128
4129 return 0;
4130}
4131
4132struct ethtool_ops ucc_geth_ethtool_ops = {
4133 .get_settings = NULL,
4134 .get_drvinfo = NULL,
4135 .get_regs_len = NULL,
4136 .get_regs = NULL,
4137 .get_link = NULL,
4138 .get_coalesce = NULL,
4139 .set_coalesce = NULL,
4140 .get_ringparam = NULL,
4141 .set_ringparam = NULL,
4142 .get_strings = NULL,
4143 .get_stats_count = NULL,
4144 .get_ethtool_stats = NULL,
4145};
4146
4147static int ucc_geth_probe(struct device *device)
4148{
4149 struct platform_device *pdev = to_platform_device(device);
4150 struct ucc_geth_platform_data *ugeth_pdata;
4151 struct net_device *dev = NULL;
4152 struct ucc_geth_private *ugeth = NULL;
4153 struct ucc_geth_info *ug_info;
4154 int err;
4155 static int mii_mng_configured = 0;
4156
4157 ugeth_vdbg("%s: IN", __FUNCTION__);
4158
4159 ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
4160
4161 ug_info = &ugeth_info[pdev->id];
4162 ug_info->uf_info.ucc_num = pdev->id;
4163 ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
4164 ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
4165 ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
4166 ug_info->uf_info.irq = platform_get_irq(pdev, 0);
4167 ug_info->phy_address = ugeth_pdata->phy_id;
4168 ug_info->enet_interface = ugeth_pdata->phy_interface;
4169 ug_info->board_flags = ugeth_pdata->board_flags;
4170 ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
4171
4172 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4173 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4174 ug_info->uf_info.irq);
4175
4176 if (ug_info == NULL) {
4177 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4178 pdev->id);
4179 return -ENODEV;
4180 }
4181
4182 if (!mii_mng_configured) {
4183 ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
4184 mii_mng_configured = 1;
4185 }
4186
4187 /* Create an ethernet device instance */
4188 dev = alloc_etherdev(sizeof(*ugeth));
4189
4190 if (dev == NULL)
4191 return -ENOMEM;
4192
4193 ugeth = netdev_priv(dev);
4194 spin_lock_init(&ugeth->lock);
4195
4196 dev_set_drvdata(device, dev);
4197
4198 /* Set the dev->base_addr to the gfar reg region */
4199 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4200
4201 SET_MODULE_OWNER(dev);
4202 SET_NETDEV_DEV(dev, device);
4203
4204 /* Fill in the dev structure */
4205 dev->open = ucc_geth_open;
4206 dev->hard_start_xmit = ucc_geth_start_xmit;
4207 dev->tx_timeout = ucc_geth_timeout;
4208 dev->watchdog_timeo = TX_TIMEOUT;
4209#ifdef CONFIG_UGETH_NAPI
4210 dev->poll = ucc_geth_poll;
4211 dev->weight = UCC_GETH_DEV_WEIGHT;
4212#endif /* CONFIG_UGETH_NAPI */
4213 dev->stop = ucc_geth_close;
4214 dev->get_stats = ucc_geth_get_stats;
4215// dev->change_mtu = ucc_geth_change_mtu;
4216 dev->mtu = 1500;
4217 dev->set_multicast_list = ucc_geth_set_multi;
4218 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4219
4220 err = register_netdev(dev);
4221 if (err) {
4222 ugeth_err("%s: Cannot register net device, aborting.",
4223 dev->name);
4224 free_netdev(dev);
4225 return err;
4226 }
4227
4228 ugeth->ug_info = ug_info;
4229 ugeth->dev = dev;
4230 memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
4231
4232 return 0;
4233}
4234
4235static int ucc_geth_remove(struct device *device)
4236{
4237 struct net_device *dev = dev_get_drvdata(device);
4238 struct ucc_geth_private *ugeth = netdev_priv(dev);
4239
4240 dev_set_drvdata(device, NULL);
4241 ucc_geth_memclean(ugeth);
4242 free_netdev(dev);
4243
4244 return 0;
4245}
4246
4247/* Structure for a device driver */
4248static struct device_driver ucc_geth_driver = {
4249 .name = DRV_NAME,
4250 .bus = &platform_bus_type,
4251 .probe = ucc_geth_probe,
4252 .remove = ucc_geth_remove,
4253};
4254
4255static int __init ucc_geth_init(void)
4256{
4257 int i;
4258 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4259 for (i = 0; i < 8; i++)
4260 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4261 sizeof(ugeth_primary_info));
4262
4263 return driver_register(&ucc_geth_driver);
4264}
4265
4266static void __exit ucc_geth_exit(void)
4267{
4268 driver_unregister(&ucc_geth_driver);
4269}
4270
4271module_init(ucc_geth_init);
4272module_exit(ucc_geth_exit);
4273
4274MODULE_AUTHOR("Freescale Semiconductor, Inc");
4275MODULE_DESCRIPTION(DRV_DESC);
4276MODULE_LICENSE("GPL");