Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. |
| 3 | * |
| 4 | * Authors: Shlomi Gridish <gridish@freescale.com> |
| 5 | * Li Yang <leoli@freescale.com> |
| 6 | * |
| 7 | * Description: |
| 8 | * QE UCC Slow API Set - UCC Slow specific routines implementations. |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify it |
| 11 | * under the terms of the GNU General Public License as published by the |
| 12 | * Free Software Foundation; either version 2 of the License, or (at your |
| 13 | * option) any later version. |
| 14 | */ |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/stddef.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | |
| 22 | #include <asm/irq.h> |
| 23 | #include <asm/io.h> |
| 24 | #include <asm/immap_qe.h> |
| 25 | #include <asm/qe.h> |
| 26 | |
| 27 | #include <asm/ucc.h> |
| 28 | #include <asm/ucc_slow.h> |
| 29 | |
| 30 | #define uccs_printk(level, format, arg...) \ |
| 31 | printk(level format "\n", ## arg) |
| 32 | |
| 33 | #define uccs_dbg(format, arg...) \ |
| 34 | uccs_printk(KERN_DEBUG , format , ## arg) |
| 35 | #define uccs_err(format, arg...) \ |
| 36 | uccs_printk(KERN_ERR , format , ## arg) |
| 37 | #define uccs_info(format, arg...) \ |
| 38 | uccs_printk(KERN_INFO , format , ## arg) |
| 39 | #define uccs_warn(format, arg...) \ |
| 40 | uccs_printk(KERN_WARNING , format , ## arg) |
| 41 | |
| 42 | #ifdef UCCS_VERBOSE_DEBUG |
| 43 | #define uccs_vdbg uccs_dbg |
| 44 | #else |
| 45 | #define uccs_vdbg(fmt, args...) do { } while (0) |
| 46 | #endif /* UCCS_VERBOSE_DEBUG */ |
| 47 | |
| 48 | u32 ucc_slow_get_qe_cr_subblock(int uccs_num) |
| 49 | { |
| 50 | switch (uccs_num) { |
| 51 | case 0: return QE_CR_SUBBLOCK_UCCSLOW1; |
| 52 | case 1: return QE_CR_SUBBLOCK_UCCSLOW2; |
| 53 | case 2: return QE_CR_SUBBLOCK_UCCSLOW3; |
| 54 | case 3: return QE_CR_SUBBLOCK_UCCSLOW4; |
| 55 | case 4: return QE_CR_SUBBLOCK_UCCSLOW5; |
| 56 | case 5: return QE_CR_SUBBLOCK_UCCSLOW6; |
| 57 | case 6: return QE_CR_SUBBLOCK_UCCSLOW7; |
| 58 | case 7: return QE_CR_SUBBLOCK_UCCSLOW8; |
| 59 | default: return QE_CR_SUBBLOCK_INVALID; |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs) |
| 64 | { |
| 65 | out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD); |
| 66 | } |
| 67 | |
| 68 | void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) |
| 69 | { |
| 70 | struct ucc_slow_info *us_info = uccs->us_info; |
| 71 | u32 id; |
| 72 | |
| 73 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
| 74 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, id, |
| 75 | QE_CR_PROTOCOL_UNSPECIFIED, 0); |
| 76 | } |
| 77 | |
| 78 | void ucc_slow_stop_tx(struct ucc_slow_private * uccs) |
| 79 | { |
| 80 | struct ucc_slow_info *us_info = uccs->us_info; |
| 81 | u32 id; |
| 82 | |
| 83 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
| 84 | qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); |
| 85 | } |
| 86 | |
| 87 | void ucc_slow_restart_tx(struct ucc_slow_private * uccs) |
| 88 | { |
| 89 | struct ucc_slow_info *us_info = uccs->us_info; |
| 90 | u32 id; |
| 91 | |
| 92 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
| 93 | qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); |
| 94 | } |
| 95 | |
| 96 | void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) |
| 97 | { |
| 98 | struct ucc_slow *us_regs; |
| 99 | u32 gumr_l; |
| 100 | |
| 101 | us_regs = uccs->us_regs; |
| 102 | |
| 103 | /* Enable reception and/or transmission on this UCC. */ |
| 104 | gumr_l = in_be32(&us_regs->gumr_l); |
| 105 | if (mode & COMM_DIR_TX) { |
| 106 | gumr_l |= UCC_SLOW_GUMR_L_ENT; |
| 107 | uccs->enabled_tx = 1; |
| 108 | } |
| 109 | if (mode & COMM_DIR_RX) { |
| 110 | gumr_l |= UCC_SLOW_GUMR_L_ENR; |
| 111 | uccs->enabled_rx = 1; |
| 112 | } |
| 113 | out_be32(&us_regs->gumr_l, gumr_l); |
| 114 | } |
| 115 | |
| 116 | void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) |
| 117 | { |
| 118 | struct ucc_slow *us_regs; |
| 119 | u32 gumr_l; |
| 120 | |
| 121 | us_regs = uccs->us_regs; |
| 122 | |
| 123 | /* Disable reception and/or transmission on this UCC. */ |
| 124 | gumr_l = in_be32(&us_regs->gumr_l); |
| 125 | if (mode & COMM_DIR_TX) { |
| 126 | gumr_l &= ~UCC_SLOW_GUMR_L_ENT; |
| 127 | uccs->enabled_tx = 0; |
| 128 | } |
| 129 | if (mode & COMM_DIR_RX) { |
| 130 | gumr_l &= ~UCC_SLOW_GUMR_L_ENR; |
| 131 | uccs->enabled_rx = 0; |
| 132 | } |
| 133 | out_be32(&us_regs->gumr_l, gumr_l); |
| 134 | } |
| 135 | |
| 136 | int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) |
| 137 | { |
| 138 | u32 i; |
| 139 | struct ucc_slow *us_regs; |
| 140 | u32 gumr; |
| 141 | u8 function_code = 0; |
| 142 | u8 *bd; |
| 143 | struct ucc_slow_private *uccs; |
| 144 | u32 id; |
| 145 | u32 command; |
| 146 | int ret; |
| 147 | |
| 148 | uccs_vdbg("%s: IN", __FUNCTION__); |
| 149 | |
| 150 | if (!us_info) |
| 151 | return -EINVAL; |
| 152 | |
| 153 | /* check if the UCC port number is in range. */ |
| 154 | if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { |
Timur Tabi | aa7a32c | 2006-10-18 17:27:32 -0500 | [diff] [blame] | 155 | uccs_err("ucc_slow_init: Illegal UCC number!"); |
Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 156 | return -EINVAL; |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Set mrblr |
| 161 | * Check that 'max_rx_buf_length' is properly aligned (4), unless |
| 162 | * rfw is 1, meaning that QE accepts one byte at a time, unlike normal |
| 163 | * case when QE accepts 32 bits at a time. |
| 164 | */ |
| 165 | if ((!us_info->rfw) && |
| 166 | (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { |
| 167 | uccs_err("max_rx_buf_length not aligned."); |
| 168 | return -EINVAL; |
| 169 | } |
| 170 | |
| 171 | uccs = (struct ucc_slow_private *) |
| 172 | kmalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); |
| 173 | if (!uccs) { |
| 174 | uccs_err |
| 175 | ("ucc_slow_init: No memory for UCC slow data structure!"); |
| 176 | return -ENOMEM; |
| 177 | } |
| 178 | memset(uccs, 0, sizeof(struct ucc_slow_private)); |
| 179 | |
| 180 | /* Fill slow UCC structure */ |
| 181 | uccs->us_info = us_info; |
| 182 | uccs->saved_uccm = 0; |
| 183 | uccs->p_rx_frame = 0; |
| 184 | uccs->us_regs = us_info->us_regs; |
| 185 | us_regs = uccs->us_regs; |
| 186 | uccs->p_ucce = (u16 *) & (us_regs->ucce); |
| 187 | uccs->p_uccm = (u16 *) & (us_regs->uccm); |
| 188 | #ifdef STATISTICS |
| 189 | uccs->rx_frames = 0; |
| 190 | uccs->tx_frames = 0; |
| 191 | uccs->rx_discarded = 0; |
| 192 | #endif /* STATISTICS */ |
| 193 | |
| 194 | /* Get PRAM base */ |
| 195 | uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE, |
| 196 | ALIGNMENT_OF_UCC_SLOW_PRAM); |
| 197 | if (IS_MURAM_ERR(uccs->us_pram_offset)) { |
| 198 | uccs_err |
| 199 | ("ucc_slow_init: Can not allocate MURAM memory " |
| 200 | "for Slow UCC."); |
| 201 | ucc_slow_free(uccs); |
| 202 | return -ENOMEM; |
| 203 | } |
| 204 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
| 205 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED, |
| 206 | (u32) uccs->us_pram_offset); |
| 207 | |
| 208 | uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); |
| 209 | |
| 210 | /* Init Guemr register */ |
| 211 | if ((ret = ucc_init_guemr((struct ucc_common *) (us_info->us_regs)))) { |
| 212 | uccs_err("ucc_slow_init: Could not init the guemr register."); |
| 213 | ucc_slow_free(uccs); |
| 214 | return ret; |
| 215 | } |
| 216 | |
| 217 | /* Set UCC to slow type */ |
| 218 | if ((ret = ucc_set_type(us_info->ucc_num, |
| 219 | (struct ucc_common *) (us_info->us_regs), |
| 220 | UCC_SPEED_TYPE_SLOW))) { |
| 221 | uccs_err("ucc_slow_init: Could not init the guemr register."); |
| 222 | ucc_slow_free(uccs); |
| 223 | return ret; |
| 224 | } |
| 225 | |
| 226 | out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); |
| 227 | |
| 228 | INIT_LIST_HEAD(&uccs->confQ); |
| 229 | |
| 230 | /* Allocate BDs. */ |
| 231 | uccs->rx_base_offset = |
| 232 | qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), |
| 233 | QE_ALIGNMENT_OF_BD); |
| 234 | if (IS_MURAM_ERR(uccs->rx_base_offset)) { |
| 235 | uccs_err("ucc_slow_init: No memory for Rx BD's."); |
| 236 | uccs->rx_base_offset = 0; |
| 237 | ucc_slow_free(uccs); |
| 238 | return -ENOMEM; |
| 239 | } |
| 240 | |
| 241 | uccs->tx_base_offset = |
| 242 | qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), |
| 243 | QE_ALIGNMENT_OF_BD); |
| 244 | if (IS_MURAM_ERR(uccs->tx_base_offset)) { |
| 245 | uccs_err("ucc_slow_init: No memory for Tx BD's."); |
| 246 | uccs->tx_base_offset = 0; |
| 247 | ucc_slow_free(uccs); |
| 248 | return -ENOMEM; |
| 249 | } |
| 250 | |
| 251 | /* Init Tx bds */ |
| 252 | bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); |
| 253 | for (i = 0; i < us_info->tx_bd_ring_len; i++) { |
| 254 | /* clear bd buffer */ |
| 255 | out_be32(&(((struct qe_bd *)bd)->buf), 0); |
| 256 | /* set bd status and length */ |
| 257 | out_be32((u32*)bd, 0); |
| 258 | bd += sizeof(struct qe_bd); |
| 259 | } |
| 260 | bd -= sizeof(struct qe_bd); |
| 261 | /* set bd status and length */ |
| 262 | out_be32((u32*)bd, T_W); /* for last BD set Wrap bit */ |
| 263 | |
| 264 | /* Init Rx bds */ |
| 265 | bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); |
| 266 | for (i = 0; i < us_info->rx_bd_ring_len; i++) { |
| 267 | /* set bd status and length */ |
| 268 | out_be32((u32*)bd, 0); |
| 269 | /* clear bd buffer */ |
| 270 | out_be32(&(((struct qe_bd *)bd)->buf), 0); |
| 271 | bd += sizeof(struct qe_bd); |
| 272 | } |
| 273 | bd -= sizeof(struct qe_bd); |
| 274 | /* set bd status and length */ |
| 275 | out_be32((u32*)bd, R_W); /* for last BD set Wrap bit */ |
| 276 | |
| 277 | /* Set GUMR (For more details see the hardware spec.). */ |
| 278 | /* gumr_h */ |
| 279 | gumr = 0; |
| 280 | gumr |= us_info->tcrc; |
| 281 | if (us_info->cdp) |
| 282 | gumr |= UCC_SLOW_GUMR_H_CDP; |
| 283 | if (us_info->ctsp) |
| 284 | gumr |= UCC_SLOW_GUMR_H_CTSP; |
| 285 | if (us_info->cds) |
| 286 | gumr |= UCC_SLOW_GUMR_H_CDS; |
| 287 | if (us_info->ctss) |
| 288 | gumr |= UCC_SLOW_GUMR_H_CTSS; |
| 289 | if (us_info->tfl) |
| 290 | gumr |= UCC_SLOW_GUMR_H_TFL; |
| 291 | if (us_info->rfw) |
| 292 | gumr |= UCC_SLOW_GUMR_H_RFW; |
| 293 | if (us_info->txsy) |
| 294 | gumr |= UCC_SLOW_GUMR_H_TXSY; |
| 295 | if (us_info->rtsm) |
| 296 | gumr |= UCC_SLOW_GUMR_H_RTSM; |
| 297 | out_be32(&us_regs->gumr_h, gumr); |
| 298 | |
| 299 | /* gumr_l */ |
| 300 | gumr = 0; |
| 301 | if (us_info->tci) |
| 302 | gumr |= UCC_SLOW_GUMR_L_TCI; |
| 303 | if (us_info->rinv) |
| 304 | gumr |= UCC_SLOW_GUMR_L_RINV; |
| 305 | if (us_info->tinv) |
| 306 | gumr |= UCC_SLOW_GUMR_L_TINV; |
| 307 | if (us_info->tend) |
| 308 | gumr |= UCC_SLOW_GUMR_L_TEND; |
| 309 | gumr |= us_info->tdcr; |
| 310 | gumr |= us_info->rdcr; |
| 311 | gumr |= us_info->tenc; |
| 312 | gumr |= us_info->renc; |
| 313 | gumr |= us_info->diag; |
| 314 | gumr |= us_info->mode; |
| 315 | out_be32(&us_regs->gumr_l, gumr); |
| 316 | |
| 317 | /* Function code registers */ |
| 318 | /* function_code has initial value 0 */ |
| 319 | |
| 320 | /* if the data is in cachable memory, the 'global' */ |
| 321 | /* in the function code should be set. */ |
| 322 | function_code |= us_info->data_mem_part; |
| 323 | function_code |= QE_BMR_BYTE_ORDER_BO_MOT; /* Required for QE */ |
| 324 | uccs->us_pram->tfcr = function_code; |
| 325 | uccs->us_pram->rfcr = function_code; |
| 326 | |
| 327 | /* rbase, tbase are offsets from MURAM base */ |
| 328 | out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset); |
| 329 | out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset); |
| 330 | |
| 331 | /* Mux clocking */ |
| 332 | /* Grant Support */ |
| 333 | ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); |
| 334 | /* Breakpoint Support */ |
| 335 | ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); |
| 336 | /* Set Tsa or NMSI mode. */ |
| 337 | ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); |
| 338 | /* If NMSI (not Tsa), set Tx and Rx clock. */ |
| 339 | if (!us_info->tsa) { |
| 340 | /* Rx clock routing */ |
| 341 | if (ucc_set_qe_mux_rxtx |
| 342 | (us_info->ucc_num, us_info->rx_clock, COMM_DIR_RX)) { |
| 343 | uccs_err |
| 344 | ("ucc_slow_init: Illegal value for parameter" |
| 345 | " 'RxClock'."); |
| 346 | ucc_slow_free(uccs); |
| 347 | return -EINVAL; |
| 348 | } |
| 349 | /* Tx clock routing */ |
| 350 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, |
| 351 | us_info->tx_clock, COMM_DIR_TX)) { |
| 352 | uccs_err |
| 353 | ("ucc_slow_init: Illegal value for parameter " |
| 354 | "'TxClock'."); |
| 355 | ucc_slow_free(uccs); |
| 356 | return -EINVAL; |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | /* |
| 361 | * INTERRUPTS |
| 362 | */ |
| 363 | /* Set interrupt mask register at UCC level. */ |
| 364 | out_be16(&us_regs->uccm, us_info->uccm_mask); |
| 365 | |
| 366 | /* First, clear anything pending at UCC level, */ |
| 367 | /* otherwise, old garbage may come through */ |
| 368 | /* as soon as the dam is opened. */ |
| 369 | |
| 370 | /* Writing '1' clears */ |
| 371 | out_be16(&us_regs->ucce, 0xffff); |
| 372 | |
| 373 | /* Issue QE Init command */ |
| 374 | if (us_info->init_tx && us_info->init_rx) |
| 375 | command = QE_INIT_TX_RX; |
| 376 | else if (us_info->init_tx) |
| 377 | command = QE_INIT_TX; |
| 378 | else |
| 379 | command = QE_INIT_RX; /* We know at least one is TRUE */ |
| 380 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
| 381 | qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); |
| 382 | |
| 383 | *uccs_ret = uccs; |
| 384 | return 0; |
| 385 | } |
| 386 | |
| 387 | void ucc_slow_free(struct ucc_slow_private * uccs) |
| 388 | { |
| 389 | if (!uccs) |
| 390 | return; |
| 391 | |
| 392 | if (uccs->rx_base_offset) |
| 393 | qe_muram_free(uccs->rx_base_offset); |
| 394 | |
| 395 | if (uccs->tx_base_offset) |
| 396 | qe_muram_free(uccs->tx_base_offset); |
| 397 | |
| 398 | if (uccs->us_pram) { |
| 399 | qe_muram_free(uccs->us_pram_offset); |
| 400 | uccs->us_pram = NULL; |
| 401 | } |
| 402 | |
| 403 | kfree(uccs); |
| 404 | } |