Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. |
| 3 | * |
| 4 | * Authors: Shlomi Gridish <gridish@freescale.com> |
| 5 | * Li Yang <leoli@freescale.com> |
| 6 | * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net) |
| 7 | * |
| 8 | * Description: |
| 9 | * General Purpose functions for the global management of the |
| 10 | * QUICC Engine (QE). |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify it |
| 13 | * under the terms of the GNU General Public License as published by the |
| 14 | * Free Software Foundation; either version 2 of the License, or (at your |
| 15 | * option) any later version. |
| 16 | */ |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/param.h> |
| 21 | #include <linux/string.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/bootmem.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/delay.h> |
| 27 | #include <linux/ioport.h> |
| 28 | #include <asm/irq.h> |
| 29 | #include <asm/page.h> |
| 30 | #include <asm/pgtable.h> |
| 31 | #include <asm/immap_qe.h> |
| 32 | #include <asm/qe.h> |
| 33 | #include <asm/prom.h> |
| 34 | #include <asm/rheap.h> |
| 35 | |
| 36 | static void qe_snums_init(void); |
| 37 | static void qe_muram_init(void); |
| 38 | static int qe_sdma_init(void); |
| 39 | |
| 40 | static DEFINE_SPINLOCK(qe_lock); |
| 41 | |
| 42 | /* QE snum state */ |
| 43 | enum qe_snum_state { |
| 44 | QE_SNUM_STATE_USED, |
| 45 | QE_SNUM_STATE_FREE |
| 46 | }; |
| 47 | |
| 48 | /* QE snum */ |
| 49 | struct qe_snum { |
| 50 | u8 num; |
| 51 | enum qe_snum_state state; |
| 52 | }; |
| 53 | |
| 54 | /* We allocate this here because it is used almost exclusively for |
| 55 | * the communication processor devices. |
| 56 | */ |
| 57 | struct qe_immap *qe_immr = NULL; |
| 58 | EXPORT_SYMBOL(qe_immr); |
| 59 | |
| 60 | static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ |
| 61 | |
| 62 | static phys_addr_t qebase = -1; |
| 63 | |
| 64 | phys_addr_t get_qe_base(void) |
| 65 | { |
| 66 | struct device_node *qe; |
| 67 | |
| 68 | if (qebase != -1) |
| 69 | return qebase; |
| 70 | |
| 71 | qe = of_find_node_by_type(NULL, "qe"); |
| 72 | if (qe) { |
| 73 | unsigned int size; |
| 74 | const void *prop = get_property(qe, "reg", &size); |
| 75 | qebase = of_translate_address(qe, prop); |
| 76 | of_node_put(qe); |
| 77 | }; |
| 78 | |
| 79 | return qebase; |
| 80 | } |
| 81 | |
| 82 | EXPORT_SYMBOL(get_qe_base); |
| 83 | |
| 84 | void qe_reset(void) |
| 85 | { |
| 86 | if (qe_immr == NULL) |
| 87 | qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE); |
| 88 | |
| 89 | qe_snums_init(); |
| 90 | |
| 91 | qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID, |
| 92 | QE_CR_PROTOCOL_UNSPECIFIED, 0); |
| 93 | |
| 94 | /* Reclaim the MURAM memory for our use. */ |
| 95 | qe_muram_init(); |
| 96 | |
| 97 | if (qe_sdma_init()) |
| 98 | panic("sdma init failed!"); |
| 99 | } |
| 100 | |
| 101 | int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) |
| 102 | { |
| 103 | unsigned long flags; |
| 104 | u8 mcn_shift = 0, dev_shift = 0; |
| 105 | |
| 106 | spin_lock_irqsave(&qe_lock, flags); |
| 107 | if (cmd == QE_RESET) { |
| 108 | out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); |
| 109 | } else { |
| 110 | if (cmd == QE_ASSIGN_PAGE) { |
| 111 | /* Here device is the SNUM, not sub-block */ |
| 112 | dev_shift = QE_CR_SNUM_SHIFT; |
| 113 | } else if (cmd == QE_ASSIGN_RISC) { |
| 114 | /* Here device is the SNUM, and mcnProtocol is |
| 115 | * e_QeCmdRiscAssignment value */ |
| 116 | dev_shift = QE_CR_SNUM_SHIFT; |
| 117 | mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT; |
| 118 | } else { |
| 119 | if (device == QE_CR_SUBBLOCK_USB) |
| 120 | mcn_shift = QE_CR_MCN_USB_SHIFT; |
| 121 | else |
| 122 | mcn_shift = QE_CR_MCN_NORMAL_SHIFT; |
| 123 | } |
| 124 | |
Timur Tabi | 302439d | 2006-10-31 17:53:42 +0800 | [diff] [blame] | 125 | out_be32(&qe_immr->cp.cecdr, cmd_input); |
Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 126 | out_be32(&qe_immr->cp.cecr, |
| 127 | (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) |
| 128 | mcn_protocol << mcn_shift)); |
| 129 | } |
| 130 | |
| 131 | /* wait for the QE_CR_FLG to clear */ |
| 132 | while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) |
| 133 | cpu_relax(); |
| 134 | spin_unlock_irqrestore(&qe_lock, flags); |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | EXPORT_SYMBOL(qe_issue_cmd); |
| 139 | |
| 140 | /* Set a baud rate generator. This needs lots of work. There are |
| 141 | * 16 BRGs, which can be connected to the QE channels or output |
| 142 | * as clocks. The BRGs are in two different block of internal |
| 143 | * memory mapped space. |
| 144 | * The baud rate clock is the system clock divided by something. |
| 145 | * It was set up long ago during the initial boot phase and is |
| 146 | * is given to us. |
| 147 | * Baud rate clocks are zero-based in the driver code (as that maps |
| 148 | * to port numbers). Documentation uses 1-based numbering. |
| 149 | */ |
| 150 | static unsigned int brg_clk = 0; |
| 151 | |
| 152 | unsigned int get_brg_clk(void) |
| 153 | { |
| 154 | struct device_node *qe; |
| 155 | if (brg_clk) |
| 156 | return brg_clk; |
| 157 | |
| 158 | qe = of_find_node_by_type(NULL, "qe"); |
| 159 | if (qe) { |
| 160 | unsigned int size; |
| 161 | const u32 *prop = get_property(qe, "brg-frequency", &size); |
| 162 | brg_clk = *prop; |
| 163 | of_node_put(qe); |
| 164 | }; |
| 165 | return brg_clk; |
| 166 | } |
| 167 | |
| 168 | /* This function is used by UARTS, or anything else that uses a 16x |
| 169 | * oversampled clock. |
| 170 | */ |
| 171 | void qe_setbrg(u32 brg, u32 rate) |
| 172 | { |
| 173 | volatile u32 *bp; |
| 174 | u32 divisor, tempval; |
| 175 | int div16 = 0; |
| 176 | |
Timur Tabi | fc9e8b4 | 2006-11-09 15:42:44 -0600 | [diff] [blame] | 177 | bp = &qe_immr->brg.brgc[brg]; |
Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 178 | |
| 179 | divisor = (get_brg_clk() / rate); |
| 180 | if (divisor > QE_BRGC_DIVISOR_MAX + 1) { |
| 181 | div16 = 1; |
| 182 | divisor /= 16; |
| 183 | } |
| 184 | |
| 185 | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE; |
| 186 | if (div16) |
| 187 | tempval |= QE_BRGC_DIV16; |
| 188 | |
| 189 | out_be32(bp, tempval); |
| 190 | } |
| 191 | |
| 192 | /* Initialize SNUMs (thread serial numbers) according to |
| 193 | * QE Module Control chapter, SNUM table |
| 194 | */ |
| 195 | static void qe_snums_init(void) |
| 196 | { |
| 197 | int i; |
| 198 | static const u8 snum_init[] = { |
| 199 | 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, |
| 200 | 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, |
| 201 | 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9, |
| 202 | 0xD8, 0xD9, 0xE8, 0xE9, |
| 203 | }; |
| 204 | |
| 205 | for (i = 0; i < QE_NUM_OF_SNUM; i++) { |
| 206 | snums[i].num = snum_init[i]; |
| 207 | snums[i].state = QE_SNUM_STATE_FREE; |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | int qe_get_snum(void) |
| 212 | { |
| 213 | unsigned long flags; |
| 214 | int snum = -EBUSY; |
| 215 | int i; |
| 216 | |
| 217 | spin_lock_irqsave(&qe_lock, flags); |
| 218 | for (i = 0; i < QE_NUM_OF_SNUM; i++) { |
| 219 | if (snums[i].state == QE_SNUM_STATE_FREE) { |
| 220 | snums[i].state = QE_SNUM_STATE_USED; |
| 221 | snum = snums[i].num; |
| 222 | break; |
| 223 | } |
| 224 | } |
| 225 | spin_unlock_irqrestore(&qe_lock, flags); |
| 226 | |
| 227 | return snum; |
| 228 | } |
| 229 | EXPORT_SYMBOL(qe_get_snum); |
| 230 | |
| 231 | void qe_put_snum(u8 snum) |
| 232 | { |
| 233 | int i; |
| 234 | |
| 235 | for (i = 0; i < QE_NUM_OF_SNUM; i++) { |
| 236 | if (snums[i].num == snum) { |
| 237 | snums[i].state = QE_SNUM_STATE_FREE; |
| 238 | break; |
| 239 | } |
| 240 | } |
| 241 | } |
| 242 | EXPORT_SYMBOL(qe_put_snum); |
| 243 | |
| 244 | static int qe_sdma_init(void) |
| 245 | { |
| 246 | struct sdma *sdma = &qe_immr->sdma; |
| 247 | u32 sdma_buf_offset; |
| 248 | |
| 249 | if (!sdma) |
| 250 | return -ENODEV; |
| 251 | |
| 252 | /* allocate 2 internal temporary buffers (512 bytes size each) for |
| 253 | * the SDMA */ |
Chuck Meade | 7f013bc | 2007-03-27 10:46:10 -0400 | [diff] [blame^] | 254 | sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); |
Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 255 | if (IS_MURAM_ERR(sdma_buf_offset)) |
| 256 | return -ENOMEM; |
| 257 | |
| 258 | out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK); |
Chuck Meade | 7f013bc | 2007-03-27 10:46:10 -0400 | [diff] [blame^] | 259 | out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | |
| 260 | (0x1 << QE_SDMR_CEN_SHIFT))); |
Li Yang | 9865853 | 2006-10-03 23:10:46 -0500 | [diff] [blame] | 261 | |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | /* |
| 266 | * muram_alloc / muram_free bits. |
| 267 | */ |
| 268 | static DEFINE_SPINLOCK(qe_muram_lock); |
| 269 | |
| 270 | /* 16 blocks should be enough to satisfy all requests |
| 271 | * until the memory subsystem goes up... */ |
| 272 | static rh_block_t qe_boot_muram_rh_block[16]; |
| 273 | static rh_info_t qe_muram_info; |
| 274 | |
| 275 | static void qe_muram_init(void) |
| 276 | { |
| 277 | struct device_node *np; |
| 278 | u32 address; |
| 279 | u64 size; |
| 280 | unsigned int flags; |
| 281 | |
| 282 | /* initialize the info header */ |
| 283 | rh_init(&qe_muram_info, 1, |
| 284 | sizeof(qe_boot_muram_rh_block) / |
| 285 | sizeof(qe_boot_muram_rh_block[0]), qe_boot_muram_rh_block); |
| 286 | |
| 287 | /* Attach the usable muram area */ |
| 288 | /* XXX: This is a subset of the available muram. It |
| 289 | * varies with the processor and the microcode patches activated. |
| 290 | */ |
| 291 | if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) { |
| 292 | address = *of_get_address(np, 0, &size, &flags); |
| 293 | of_node_put(np); |
| 294 | rh_attach_region(&qe_muram_info, |
| 295 | (void *)address, (int)size); |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | /* This function returns an index into the MURAM area. |
| 300 | */ |
| 301 | u32 qe_muram_alloc(u32 size, u32 align) |
| 302 | { |
| 303 | void *start; |
| 304 | unsigned long flags; |
| 305 | |
| 306 | spin_lock_irqsave(&qe_muram_lock, flags); |
| 307 | start = rh_alloc_align(&qe_muram_info, size, align, "QE"); |
| 308 | spin_unlock_irqrestore(&qe_muram_lock, flags); |
| 309 | |
| 310 | return (u32) start; |
| 311 | } |
| 312 | EXPORT_SYMBOL(qe_muram_alloc); |
| 313 | |
| 314 | int qe_muram_free(u32 offset) |
| 315 | { |
| 316 | int ret; |
| 317 | unsigned long flags; |
| 318 | |
| 319 | spin_lock_irqsave(&qe_muram_lock, flags); |
| 320 | ret = rh_free(&qe_muram_info, (void *)offset); |
| 321 | spin_unlock_irqrestore(&qe_muram_lock, flags); |
| 322 | |
| 323 | return ret; |
| 324 | } |
| 325 | EXPORT_SYMBOL(qe_muram_free); |
| 326 | |
| 327 | /* not sure if this is ever needed */ |
| 328 | u32 qe_muram_alloc_fixed(u32 offset, u32 size) |
| 329 | { |
| 330 | void *start; |
| 331 | unsigned long flags; |
| 332 | |
| 333 | spin_lock_irqsave(&qe_muram_lock, flags); |
| 334 | start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc"); |
| 335 | spin_unlock_irqrestore(&qe_muram_lock, flags); |
| 336 | |
| 337 | return (u32) start; |
| 338 | } |
| 339 | EXPORT_SYMBOL(qe_muram_alloc_fixed); |
| 340 | |
| 341 | void qe_muram_dump(void) |
| 342 | { |
| 343 | rh_dump(&qe_muram_info); |
| 344 | } |
| 345 | EXPORT_SYMBOL(qe_muram_dump); |
| 346 | |
| 347 | void *qe_muram_addr(u32 offset) |
| 348 | { |
| 349 | return (void *)&qe_immr->muram[offset]; |
| 350 | } |
| 351 | EXPORT_SYMBOL(qe_muram_addr); |