Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1 | /* |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 2 | * Freescale MPC85xx/MPC86xx RapidIO support |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 3 | * |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 4 | * Copyright 2009 Sysgo AG |
| 5 | * Thomas Moll <thomas.moll@sysgo.com> |
| 6 | * - fixed maintenance access routines, check for aligned access |
| 7 | * |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 8 | * Copyright 2009 Integrated Device Technology, Inc. |
| 9 | * Alex Bounine <alexandre.bounine@idt.com> |
| 10 | * - Added Port-Write message handling |
| 11 | * - Added Machine Check exception handling |
| 12 | * |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 13 | * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. |
| 14 | * Zhang Wei <wei.zhang@freescale.com> |
| 15 | * |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 16 | * Copyright 2005 MontaVista Software, Inc. |
| 17 | * Matt Porter <mporter@kernel.crashing.org> |
| 18 | * |
| 19 | * This program is free software; you can redistribute it and/or modify it |
| 20 | * under the terms of the GNU General Public License as published by the |
| 21 | * Free Software Foundation; either version 2 of the License, or (at your |
| 22 | * option) any later version. |
| 23 | */ |
| 24 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 25 | #include <linux/init.h> |
| 26 | #include <linux/module.h> |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/dma-mapping.h> |
| 29 | #include <linux/interrupt.h> |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 30 | #include <linux/device.h> |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 31 | #include <linux/rio.h> |
| 32 | #include <linux/rio_drv.h> |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 33 | #include <linux/of_platform.h> |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 34 | #include <linux/delay.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/slab.h> |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 36 | #include <linux/kfifo.h> |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 37 | |
| 38 | #include <asm/io.h> |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 39 | #include <asm/machdep.h> |
| 40 | #include <asm/uaccess.h> |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 41 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 42 | #undef DEBUG_PW /* Port-Write debugging */ |
| 43 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 44 | /* RapidIO definition irq, which read from OF-tree */ |
| 45 | #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) |
| 46 | #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) |
| 47 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 48 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 49 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 50 | #define RIO_ATMU_REGS_OFFSET 0x10c00 |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 51 | #define RIO_P_MSG_REGS_OFFSET 0x11000 |
| 52 | #define RIO_S_MSG_REGS_OFFSET 0x13000 |
| 53 | #define RIO_ESCSR 0x158 |
| 54 | #define RIO_CCSR 0x15c |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 55 | #define RIO_LTLEDCSR 0x0608 |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 56 | #define RIO_LTLEDCSR_IER 0x80000000 |
| 57 | #define RIO_LTLEDCSR_PRT 0x01000000 |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 58 | #define RIO_LTLEECSR 0x060c |
| 59 | #define RIO_EPWISR 0x10010 |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 60 | #define RIO_ISR_AACR 0x10120 |
| 61 | #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 62 | #define RIO_MAINT_WIN_SIZE 0x400000 |
| 63 | #define RIO_DBELL_WIN_SIZE 0x1000 |
| 64 | |
| 65 | #define RIO_MSG_OMR_MUI 0x00000002 |
| 66 | #define RIO_MSG_OSR_TE 0x00000080 |
| 67 | #define RIO_MSG_OSR_QOI 0x00000020 |
| 68 | #define RIO_MSG_OSR_QFI 0x00000010 |
| 69 | #define RIO_MSG_OSR_MUB 0x00000004 |
| 70 | #define RIO_MSG_OSR_EOMI 0x00000002 |
| 71 | #define RIO_MSG_OSR_QEI 0x00000001 |
| 72 | |
| 73 | #define RIO_MSG_IMR_MI 0x00000002 |
| 74 | #define RIO_MSG_ISR_TE 0x00000080 |
| 75 | #define RIO_MSG_ISR_QFI 0x00000010 |
| 76 | #define RIO_MSG_ISR_DIQI 0x00000001 |
| 77 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 78 | #define RIO_IPWMR_SEN 0x00100000 |
| 79 | #define RIO_IPWMR_QFIE 0x00000100 |
| 80 | #define RIO_IPWMR_EIE 0x00000020 |
| 81 | #define RIO_IPWMR_CQ 0x00000002 |
| 82 | #define RIO_IPWMR_PWE 0x00000001 |
| 83 | |
| 84 | #define RIO_IPWSR_QF 0x00100000 |
| 85 | #define RIO_IPWSR_TE 0x00000080 |
| 86 | #define RIO_IPWSR_QFI 0x00000010 |
| 87 | #define RIO_IPWSR_PWD 0x00000008 |
| 88 | #define RIO_IPWSR_PWB 0x00000004 |
| 89 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 90 | #define RIO_MSG_DESC_SIZE 32 |
| 91 | #define RIO_MSG_BUFFER_SIZE 4096 |
| 92 | #define RIO_MIN_TX_RING_SIZE 2 |
| 93 | #define RIO_MAX_TX_RING_SIZE 2048 |
| 94 | #define RIO_MIN_RX_RING_SIZE 2 |
| 95 | #define RIO_MAX_RX_RING_SIZE 2048 |
| 96 | |
| 97 | #define DOORBELL_DMR_DI 0x00000002 |
| 98 | #define DOORBELL_DSR_TE 0x00000080 |
| 99 | #define DOORBELL_DSR_QFI 0x00000010 |
| 100 | #define DOORBELL_DSR_DIQI 0x00000001 |
Zhang Wei | 6c39103 | 2008-04-18 13:33:48 -0700 | [diff] [blame] | 101 | #define DOORBELL_TID_OFFSET 0x02 |
| 102 | #define DOORBELL_SID_OFFSET 0x04 |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 103 | #define DOORBELL_INFO_OFFSET 0x06 |
| 104 | |
| 105 | #define DOORBELL_MESSAGE_SIZE 0x08 |
Zhang Wei | 6c39103 | 2008-04-18 13:33:48 -0700 | [diff] [blame] | 106 | #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) |
| 107 | #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 108 | #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) |
| 109 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 110 | struct rio_atmu_regs { |
| 111 | u32 rowtar; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 112 | u32 rowtear; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 113 | u32 rowbar; |
| 114 | u32 pad2; |
| 115 | u32 rowar; |
| 116 | u32 pad3[3]; |
| 117 | }; |
| 118 | |
| 119 | struct rio_msg_regs { |
| 120 | u32 omr; |
| 121 | u32 osr; |
| 122 | u32 pad1; |
| 123 | u32 odqdpar; |
| 124 | u32 pad2; |
| 125 | u32 osar; |
| 126 | u32 odpr; |
| 127 | u32 odatr; |
| 128 | u32 odcr; |
| 129 | u32 pad3; |
| 130 | u32 odqepar; |
| 131 | u32 pad4[13]; |
| 132 | u32 imr; |
| 133 | u32 isr; |
| 134 | u32 pad5; |
| 135 | u32 ifqdpar; |
| 136 | u32 pad6; |
| 137 | u32 ifqepar; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 138 | u32 pad7[226]; |
| 139 | u32 odmr; |
| 140 | u32 odsr; |
| 141 | u32 res0[4]; |
| 142 | u32 oddpr; |
| 143 | u32 oddatr; |
| 144 | u32 res1[3]; |
| 145 | u32 odretcr; |
| 146 | u32 res2[12]; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 147 | u32 dmr; |
| 148 | u32 dsr; |
| 149 | u32 pad8; |
| 150 | u32 dqdpar; |
| 151 | u32 pad9; |
| 152 | u32 dqepar; |
| 153 | u32 pad10[26]; |
| 154 | u32 pwmr; |
| 155 | u32 pwsr; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 156 | u32 epwqbar; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 157 | u32 pwqbar; |
| 158 | }; |
| 159 | |
| 160 | struct rio_tx_desc { |
| 161 | u32 res1; |
| 162 | u32 saddr; |
| 163 | u32 dport; |
| 164 | u32 dattr; |
| 165 | u32 res2; |
| 166 | u32 res3; |
| 167 | u32 dwcnt; |
| 168 | u32 res4; |
| 169 | }; |
| 170 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 171 | struct rio_dbell_ring { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 172 | void *virt; |
| 173 | dma_addr_t phys; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 174 | }; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 175 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 176 | struct rio_msg_tx_ring { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 177 | void *virt; |
| 178 | dma_addr_t phys; |
| 179 | void *virt_buffer[RIO_MAX_TX_RING_SIZE]; |
| 180 | dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; |
| 181 | int tx_slot; |
| 182 | int size; |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 183 | void *dev_id; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 184 | }; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 185 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 186 | struct rio_msg_rx_ring { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 187 | void *virt; |
| 188 | dma_addr_t phys; |
| 189 | void *virt_buffer[RIO_MAX_RX_RING_SIZE]; |
| 190 | int rx_slot; |
| 191 | int size; |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 192 | void *dev_id; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 193 | }; |
| 194 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 195 | struct rio_port_write_msg { |
| 196 | void *virt; |
| 197 | dma_addr_t phys; |
| 198 | u32 msg_count; |
| 199 | u32 err_count; |
| 200 | u32 discard_count; |
| 201 | }; |
| 202 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 203 | struct rio_priv { |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 204 | struct device *dev; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 205 | void __iomem *regs_win; |
| 206 | struct rio_atmu_regs __iomem *atmu_regs; |
| 207 | struct rio_atmu_regs __iomem *maint_atmu_regs; |
| 208 | struct rio_atmu_regs __iomem *dbell_atmu_regs; |
| 209 | void __iomem *dbell_win; |
| 210 | void __iomem *maint_win; |
| 211 | struct rio_msg_regs __iomem *msg_regs; |
| 212 | struct rio_dbell_ring dbell_ring; |
| 213 | struct rio_msg_tx_ring msg_tx_ring; |
| 214 | struct rio_msg_rx_ring msg_rx_ring; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 215 | struct rio_port_write_msg port_write_msg; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 216 | int bellirq; |
| 217 | int txirq; |
| 218 | int rxirq; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 219 | int pwirq; |
| 220 | struct work_struct pw_work; |
| 221 | struct kfifo pw_fifo; |
| 222 | spinlock_t pw_fifo_lock; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 223 | }; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 224 | |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 225 | #define __fsl_read_rio_config(x, addr, err, op) \ |
| 226 | __asm__ __volatile__( \ |
| 227 | "1: "op" %1,0(%2)\n" \ |
| 228 | " eieio\n" \ |
| 229 | "2:\n" \ |
| 230 | ".section .fixup,\"ax\"\n" \ |
| 231 | "3: li %1,-1\n" \ |
| 232 | " li %0,%3\n" \ |
| 233 | " b 2b\n" \ |
| 234 | ".section __ex_table,\"a\"\n" \ |
| 235 | " .align 2\n" \ |
| 236 | " .long 1b,3b\n" \ |
| 237 | ".text" \ |
| 238 | : "=r" (err), "=r" (x) \ |
| 239 | : "b" (addr), "i" (-EFAULT), "0" (err)) |
| 240 | |
| 241 | static void __iomem *rio_regs_win; |
| 242 | |
| 243 | static int (*saved_mcheck_exception)(struct pt_regs *regs); |
| 244 | |
| 245 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) |
| 246 | { |
| 247 | const struct exception_table_entry *entry = NULL; |
| 248 | unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK); |
| 249 | |
| 250 | if (reason & MCSR_BUS_RBERR) { |
| 251 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); |
| 252 | if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { |
| 253 | /* Check if we are prepared to handle this fault */ |
| 254 | entry = search_exception_tables(regs->nip); |
| 255 | if (entry) { |
| 256 | pr_debug("RIO: %s - MC Exception handled\n", |
| 257 | __func__); |
| 258 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), |
| 259 | 0); |
| 260 | regs->msr |= MSR_RI; |
| 261 | regs->nip = entry->fixup; |
| 262 | return 1; |
| 263 | } |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | if (saved_mcheck_exception) |
| 268 | return saved_mcheck_exception(regs); |
| 269 | else |
| 270 | return cur_cpu_spec->machine_check(regs); |
| 271 | } |
| 272 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 273 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 274 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 275 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 276 | * @index: ID of RapidIO interface |
| 277 | * @destid: Destination ID of target device |
| 278 | * @data: 16-bit info field of RapidIO doorbell message |
| 279 | * |
| 280 | * Sends a MPC85xx doorbell message. Returns %0 on success or |
| 281 | * %-EINVAL on failure. |
| 282 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 283 | static int fsl_rio_doorbell_send(struct rio_mport *mport, |
| 284 | int index, u16 destid, u16 data) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 285 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 286 | struct rio_priv *priv = mport->priv; |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 287 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 288 | index, destid, data); |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 289 | switch (mport->phy_type) { |
| 290 | case RIO_PHY_PARALLEL: |
| 291 | out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); |
| 292 | out_be16(priv->dbell_win, data); |
| 293 | break; |
| 294 | case RIO_PHY_SERIAL: |
| 295 | /* In the serial version silicons, such as MPC8548, MPC8641, |
| 296 | * below operations is must be. |
| 297 | */ |
| 298 | out_be32(&priv->msg_regs->odmr, 0x00000000); |
| 299 | out_be32(&priv->msg_regs->odretcr, 0x00000004); |
| 300 | out_be32(&priv->msg_regs->oddpr, destid << 16); |
| 301 | out_be32(&priv->msg_regs->oddatr, data); |
| 302 | out_be32(&priv->msg_regs->odmr, 0x00000001); |
| 303 | break; |
| 304 | } |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 305 | |
| 306 | return 0; |
| 307 | } |
| 308 | |
| 309 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 310 | * fsl_local_config_read - Generate a MPC85xx local config space read |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 311 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 312 | * @index: ID of RapdiIO interface |
| 313 | * @offset: Offset into configuration space |
| 314 | * @len: Length (in bytes) of the maintenance transaction |
| 315 | * @data: Value to be read into |
| 316 | * |
| 317 | * Generates a MPC85xx local configuration space read. Returns %0 on |
| 318 | * success or %-EINVAL on failure. |
| 319 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 320 | static int fsl_local_config_read(struct rio_mport *mport, |
| 321 | int index, u32 offset, int len, u32 *data) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 322 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 323 | struct rio_priv *priv = mport->priv; |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 324 | pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 325 | offset); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 326 | *data = in_be32(priv->regs_win + offset); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 327 | |
| 328 | return 0; |
| 329 | } |
| 330 | |
| 331 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 332 | * fsl_local_config_write - Generate a MPC85xx local config space write |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 333 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 334 | * @index: ID of RapdiIO interface |
| 335 | * @offset: Offset into configuration space |
| 336 | * @len: Length (in bytes) of the maintenance transaction |
| 337 | * @data: Value to be written |
| 338 | * |
| 339 | * Generates a MPC85xx local configuration space write. Returns %0 on |
| 340 | * success or %-EINVAL on failure. |
| 341 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 342 | static int fsl_local_config_write(struct rio_mport *mport, |
| 343 | int index, u32 offset, int len, u32 data) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 344 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 345 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 346 | pr_debug |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 347 | ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 348 | index, offset, data); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 349 | out_be32(priv->regs_win + offset, data); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 350 | |
| 351 | return 0; |
| 352 | } |
| 353 | |
| 354 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 355 | * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 356 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 357 | * @index: ID of RapdiIO interface |
| 358 | * @destid: Destination ID of transaction |
| 359 | * @hopcount: Number of hops to target device |
| 360 | * @offset: Offset into configuration space |
| 361 | * @len: Length (in bytes) of the maintenance transaction |
| 362 | * @val: Location to be read into |
| 363 | * |
| 364 | * Generates a MPC85xx read maintenance transaction. Returns %0 on |
| 365 | * success or %-EINVAL on failure. |
| 366 | */ |
| 367 | static int |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 368 | fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, |
| 369 | u8 hopcount, u32 offset, int len, u32 *val) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 370 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 371 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 372 | u8 *data; |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 373 | u32 rval, err = 0; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 374 | |
| 375 | pr_debug |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 376 | ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 377 | index, destid, hopcount, offset, len); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 378 | |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 379 | /* 16MB maintenance window possible */ |
| 380 | /* allow only aligned access to maintenance registers */ |
| 381 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) |
| 382 | return -EINVAL; |
| 383 | |
| 384 | out_be32(&priv->maint_atmu_regs->rowtar, |
| 385 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
| 386 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
| 387 | |
| 388 | data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 389 | switch (len) { |
| 390 | case 1: |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 391 | __fsl_read_rio_config(rval, data, err, "lbz"); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 392 | break; |
| 393 | case 2: |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 394 | __fsl_read_rio_config(rval, data, err, "lhz"); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 395 | break; |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 396 | case 4: |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 397 | __fsl_read_rio_config(rval, data, err, "lwz"); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 398 | break; |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 399 | default: |
| 400 | return -EINVAL; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 401 | } |
| 402 | |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 403 | if (err) { |
| 404 | pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", |
| 405 | err, destid, hopcount, offset); |
| 406 | } |
| 407 | |
| 408 | *val = rval; |
| 409 | |
| 410 | return err; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 414 | * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 415 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 416 | * @index: ID of RapdiIO interface |
| 417 | * @destid: Destination ID of transaction |
| 418 | * @hopcount: Number of hops to target device |
| 419 | * @offset: Offset into configuration space |
| 420 | * @len: Length (in bytes) of the maintenance transaction |
| 421 | * @val: Value to be written |
| 422 | * |
| 423 | * Generates an MPC85xx write maintenance transaction. Returns %0 on |
| 424 | * success or %-EINVAL on failure. |
| 425 | */ |
| 426 | static int |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 427 | fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, |
| 428 | u8 hopcount, u32 offset, int len, u32 val) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 429 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 430 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 431 | u8 *data; |
| 432 | pr_debug |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 433 | ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 434 | index, destid, hopcount, offset, len, val); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 435 | |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 436 | /* 16MB maintenance windows possible */ |
| 437 | /* allow only aligned access to maintenance registers */ |
| 438 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) |
| 439 | return -EINVAL; |
| 440 | |
| 441 | out_be32(&priv->maint_atmu_regs->rowtar, |
| 442 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
| 443 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
| 444 | |
| 445 | data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 446 | switch (len) { |
| 447 | case 1: |
| 448 | out_8((u8 *) data, val); |
| 449 | break; |
| 450 | case 2: |
| 451 | out_be16((u16 *) data, val); |
| 452 | break; |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 453 | case 4: |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 454 | out_be32((u32 *) data, val); |
| 455 | break; |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 456 | default: |
| 457 | return -EINVAL; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | return 0; |
| 461 | } |
| 462 | |
| 463 | /** |
| 464 | * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue |
| 465 | * @mport: Master port with outbound message queue |
| 466 | * @rdev: Target of outbound message |
| 467 | * @mbox: Outbound mailbox |
| 468 | * @buffer: Message to add to outbound queue |
| 469 | * @len: Length of message |
| 470 | * |
| 471 | * Adds the @buffer message to the MPC85xx outbound message queue. Returns |
| 472 | * %0 on success or %-EINVAL on failure. |
| 473 | */ |
| 474 | int |
| 475 | rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, |
| 476 | void *buffer, size_t len) |
| 477 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 478 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 479 | u32 omr; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 480 | struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt |
| 481 | + priv->msg_tx_ring.tx_slot; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 482 | int ret = 0; |
| 483 | |
| 484 | pr_debug |
| 485 | ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n", |
| 486 | rdev->destid, mbox, (int)buffer, len); |
| 487 | |
| 488 | if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { |
| 489 | ret = -EINVAL; |
| 490 | goto out; |
| 491 | } |
| 492 | |
| 493 | /* Copy and clear rest of buffer */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 494 | memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, |
| 495 | len); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 496 | if (len < (RIO_MAX_MSG_SIZE - 4)) |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 497 | memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] |
| 498 | + len, 0, RIO_MAX_MSG_SIZE - len); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 499 | |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 500 | switch (mport->phy_type) { |
| 501 | case RIO_PHY_PARALLEL: |
| 502 | /* Set mbox field for message */ |
| 503 | desc->dport = mbox & 0x3; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 504 | |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 505 | /* Enable EOMI interrupt, set priority, and set destid */ |
| 506 | desc->dattr = 0x28000000 | (rdev->destid << 2); |
| 507 | break; |
| 508 | case RIO_PHY_SERIAL: |
| 509 | /* Set mbox field for message, and set destid */ |
| 510 | desc->dport = (rdev->destid << 16) | (mbox & 0x3); |
| 511 | |
| 512 | /* Enable EOMI interrupt and priority */ |
| 513 | desc->dattr = 0x28000000; |
| 514 | break; |
| 515 | } |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 516 | |
| 517 | /* Set transfer size aligned to next power of 2 (in double words) */ |
| 518 | desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); |
| 519 | |
| 520 | /* Set snooping and source buffer address */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 521 | desc->saddr = 0x00000004 |
| 522 | | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 523 | |
| 524 | /* Increment enqueue pointer */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 525 | omr = in_be32(&priv->msg_regs->omr); |
| 526 | out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 527 | |
| 528 | /* Go to next descriptor */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 529 | if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) |
| 530 | priv->msg_tx_ring.tx_slot = 0; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 531 | |
| 532 | out: |
| 533 | return ret; |
| 534 | } |
| 535 | |
| 536 | EXPORT_SYMBOL_GPL(rio_hw_add_outb_message); |
| 537 | |
| 538 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 539 | * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 540 | * @irq: Linux interrupt number |
| 541 | * @dev_instance: Pointer to interrupt-specific data |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 542 | * |
| 543 | * Handles outbound message interrupts. Executes a register outbound |
Simon Arlott | a8de5ce | 2007-05-12 05:42:54 +1000 | [diff] [blame] | 544 | * mailbox event handler and acks the interrupt occurrence. |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 545 | */ |
| 546 | static irqreturn_t |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 547 | fsl_rio_tx_handler(int irq, void *dev_instance) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 548 | { |
| 549 | int osr; |
| 550 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 551 | struct rio_priv *priv = port->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 552 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 553 | osr = in_be32(&priv->msg_regs->osr); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 554 | |
| 555 | if (osr & RIO_MSG_OSR_TE) { |
| 556 | pr_info("RIO: outbound message transmission error\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 557 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 558 | goto out; |
| 559 | } |
| 560 | |
| 561 | if (osr & RIO_MSG_OSR_QOI) { |
| 562 | pr_info("RIO: outbound message queue overflow\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 563 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 564 | goto out; |
| 565 | } |
| 566 | |
| 567 | if (osr & RIO_MSG_OSR_EOMI) { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 568 | u32 dqp = in_be32(&priv->msg_regs->odqdpar); |
| 569 | int slot = (dqp - priv->msg_tx_ring.phys) >> 5; |
| 570 | port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, |
| 571 | slot); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 572 | |
| 573 | /* Ack the end-of-message interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 574 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 575 | } |
| 576 | |
| 577 | out: |
| 578 | return IRQ_HANDLED; |
| 579 | } |
| 580 | |
| 581 | /** |
| 582 | * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox |
| 583 | * @mport: Master port implementing the outbound message unit |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 584 | * @dev_id: Device specific pointer to pass on event |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 585 | * @mbox: Mailbox to open |
| 586 | * @entries: Number of entries in the outbound mailbox ring |
| 587 | * |
| 588 | * Initializes buffer ring, request the outbound message interrupt, |
| 589 | * and enables the outbound message unit. Returns %0 on success and |
| 590 | * %-EINVAL or %-ENOMEM on failure. |
| 591 | */ |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 592 | int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 593 | { |
| 594 | int i, j, rc = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 595 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 596 | |
| 597 | if ((entries < RIO_MIN_TX_RING_SIZE) || |
| 598 | (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { |
| 599 | rc = -EINVAL; |
| 600 | goto out; |
| 601 | } |
| 602 | |
| 603 | /* Initialize shadow copy ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 604 | priv->msg_tx_ring.dev_id = dev_id; |
| 605 | priv->msg_tx_ring.size = entries; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 606 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 607 | for (i = 0; i < priv->msg_tx_ring.size; i++) { |
| 608 | priv->msg_tx_ring.virt_buffer[i] = |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 609 | dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 610 | &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); |
| 611 | if (!priv->msg_tx_ring.virt_buffer[i]) { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 612 | rc = -ENOMEM; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 613 | for (j = 0; j < priv->msg_tx_ring.size; j++) |
| 614 | if (priv->msg_tx_ring.virt_buffer[j]) |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 615 | dma_free_coherent(priv->dev, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 616 | RIO_MSG_BUFFER_SIZE, |
| 617 | priv->msg_tx_ring. |
| 618 | virt_buffer[j], |
| 619 | priv->msg_tx_ring. |
| 620 | phys_buffer[j]); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 621 | goto out; |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | /* Initialize outbound message descriptor ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 626 | priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 627 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
| 628 | &priv->msg_tx_ring.phys, GFP_KERNEL); |
| 629 | if (!priv->msg_tx_ring.virt) { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 630 | rc = -ENOMEM; |
| 631 | goto out_dma; |
| 632 | } |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 633 | memset(priv->msg_tx_ring.virt, 0, |
| 634 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); |
| 635 | priv->msg_tx_ring.tx_slot = 0; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 636 | |
| 637 | /* Point dequeue/enqueue pointers at first entry in ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 638 | out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); |
| 639 | out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 640 | |
| 641 | /* Configure for snooping */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 642 | out_be32(&priv->msg_regs->osar, 0x00000004); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 643 | |
| 644 | /* Clear interrupt status */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 645 | out_be32(&priv->msg_regs->osr, 0x000000b3); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 646 | |
| 647 | /* Hook up outbound message handler */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 648 | rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, |
| 649 | "msg_tx", (void *)mport); |
| 650 | if (rc < 0) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 651 | goto out_irq; |
| 652 | |
| 653 | /* |
| 654 | * Configure outbound message unit |
| 655 | * Snooping |
| 656 | * Interrupts (all enabled, except QEIE) |
| 657 | * Chaining mode |
| 658 | * Disable |
| 659 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 660 | out_be32(&priv->msg_regs->omr, 0x00100220); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 661 | |
| 662 | /* Set number of entries */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 663 | out_be32(&priv->msg_regs->omr, |
| 664 | in_be32(&priv->msg_regs->omr) | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 665 | ((get_bitmask_order(entries) - 2) << 12)); |
| 666 | |
| 667 | /* Now enable the unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 668 | out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 669 | |
| 670 | out: |
| 671 | return rc; |
| 672 | |
| 673 | out_irq: |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 674 | dma_free_coherent(priv->dev, |
| 675 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 676 | priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 677 | |
| 678 | out_dma: |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 679 | for (i = 0; i < priv->msg_tx_ring.size; i++) |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 680 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 681 | priv->msg_tx_ring.virt_buffer[i], |
| 682 | priv->msg_tx_ring.phys_buffer[i]); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 683 | |
| 684 | return rc; |
| 685 | } |
| 686 | |
| 687 | /** |
| 688 | * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox |
| 689 | * @mport: Master port implementing the outbound message unit |
| 690 | * @mbox: Mailbox to close |
| 691 | * |
| 692 | * Disables the outbound message unit, free all buffers, and |
| 693 | * frees the outbound message interrupt. |
| 694 | */ |
| 695 | void rio_close_outb_mbox(struct rio_mport *mport, int mbox) |
| 696 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 697 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 698 | /* Disable inbound message unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 699 | out_be32(&priv->msg_regs->omr, 0); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 700 | |
| 701 | /* Free ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 702 | dma_free_coherent(priv->dev, |
| 703 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 704 | priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 705 | |
| 706 | /* Free interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 707 | free_irq(IRQ_RIO_TX(mport), (void *)mport); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 708 | } |
| 709 | |
| 710 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 711 | * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 712 | * @irq: Linux interrupt number |
| 713 | * @dev_instance: Pointer to interrupt-specific data |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 714 | * |
| 715 | * Handles inbound message interrupts. Executes a registered inbound |
Simon Arlott | a8de5ce | 2007-05-12 05:42:54 +1000 | [diff] [blame] | 716 | * mailbox event handler and acks the interrupt occurrence. |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 717 | */ |
| 718 | static irqreturn_t |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 719 | fsl_rio_rx_handler(int irq, void *dev_instance) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 720 | { |
| 721 | int isr; |
| 722 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 723 | struct rio_priv *priv = port->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 724 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 725 | isr = in_be32(&priv->msg_regs->isr); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 726 | |
| 727 | if (isr & RIO_MSG_ISR_TE) { |
| 728 | pr_info("RIO: inbound message reception error\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 729 | out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 730 | goto out; |
| 731 | } |
| 732 | |
| 733 | /* XXX Need to check/dispatch until queue empty */ |
| 734 | if (isr & RIO_MSG_ISR_DIQI) { |
| 735 | /* |
| 736 | * We implement *only* mailbox 0, but can receive messages |
| 737 | * for any mailbox/letter to that mailbox destination. So, |
| 738 | * make the callback with an unknown/invalid mailbox number |
| 739 | * argument. |
| 740 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 741 | port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 742 | |
| 743 | /* Ack the queueing interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 744 | out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 745 | } |
| 746 | |
| 747 | out: |
| 748 | return IRQ_HANDLED; |
| 749 | } |
| 750 | |
| 751 | /** |
| 752 | * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox |
| 753 | * @mport: Master port implementing the inbound message unit |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 754 | * @dev_id: Device specific pointer to pass on event |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 755 | * @mbox: Mailbox to open |
| 756 | * @entries: Number of entries in the inbound mailbox ring |
| 757 | * |
| 758 | * Initializes buffer ring, request the inbound message interrupt, |
| 759 | * and enables the inbound message unit. Returns %0 on success |
| 760 | * and %-EINVAL or %-ENOMEM on failure. |
| 761 | */ |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 762 | int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 763 | { |
| 764 | int i, rc = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 765 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 766 | |
| 767 | if ((entries < RIO_MIN_RX_RING_SIZE) || |
| 768 | (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { |
| 769 | rc = -EINVAL; |
| 770 | goto out; |
| 771 | } |
| 772 | |
| 773 | /* Initialize client buffer ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 774 | priv->msg_rx_ring.dev_id = dev_id; |
| 775 | priv->msg_rx_ring.size = entries; |
| 776 | priv->msg_rx_ring.rx_slot = 0; |
| 777 | for (i = 0; i < priv->msg_rx_ring.size; i++) |
| 778 | priv->msg_rx_ring.virt_buffer[i] = NULL; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 779 | |
| 780 | /* Initialize inbound message ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 781 | priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 782 | priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, |
| 783 | &priv->msg_rx_ring.phys, GFP_KERNEL); |
| 784 | if (!priv->msg_rx_ring.virt) { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 785 | rc = -ENOMEM; |
| 786 | goto out; |
| 787 | } |
| 788 | |
| 789 | /* Point dequeue/enqueue pointers at first entry in ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 790 | out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); |
| 791 | out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 792 | |
| 793 | /* Clear interrupt status */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 794 | out_be32(&priv->msg_regs->isr, 0x00000091); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 795 | |
| 796 | /* Hook up inbound message handler */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 797 | rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, |
| 798 | "msg_rx", (void *)mport); |
| 799 | if (rc < 0) { |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 800 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 801 | priv->msg_tx_ring.virt_buffer[i], |
| 802 | priv->msg_tx_ring.phys_buffer[i]); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 803 | goto out; |
| 804 | } |
| 805 | |
| 806 | /* |
| 807 | * Configure inbound message unit: |
| 808 | * Snooping |
| 809 | * 4KB max message size |
| 810 | * Unmask all interrupt sources |
| 811 | * Disable |
| 812 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 813 | out_be32(&priv->msg_regs->imr, 0x001b0060); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 814 | |
| 815 | /* Set number of queue entries */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 816 | setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 817 | |
| 818 | /* Now enable the unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 819 | setbits32(&priv->msg_regs->imr, 0x1); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 820 | |
| 821 | out: |
| 822 | return rc; |
| 823 | } |
| 824 | |
| 825 | /** |
| 826 | * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox |
| 827 | * @mport: Master port implementing the inbound message unit |
| 828 | * @mbox: Mailbox to close |
| 829 | * |
| 830 | * Disables the inbound message unit, free all buffers, and |
| 831 | * frees the inbound message interrupt. |
| 832 | */ |
| 833 | void rio_close_inb_mbox(struct rio_mport *mport, int mbox) |
| 834 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 835 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 836 | /* Disable inbound message unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 837 | out_be32(&priv->msg_regs->imr, 0); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 838 | |
| 839 | /* Free ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 840 | dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 841 | priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 842 | |
| 843 | /* Free interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 844 | free_irq(IRQ_RIO_RX(mport), (void *)mport); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 845 | } |
| 846 | |
| 847 | /** |
| 848 | * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue |
| 849 | * @mport: Master port implementing the inbound message unit |
| 850 | * @mbox: Inbound mailbox number |
| 851 | * @buf: Buffer to add to inbound queue |
| 852 | * |
| 853 | * Adds the @buf buffer to the MPC85xx inbound message queue. Returns |
| 854 | * %0 on success or %-EINVAL on failure. |
| 855 | */ |
| 856 | int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) |
| 857 | { |
| 858 | int rc = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 859 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 860 | |
| 861 | pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 862 | priv->msg_rx_ring.rx_slot); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 863 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 864 | if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 865 | printk(KERN_ERR |
| 866 | "RIO: error adding inbound buffer %d, buffer exists\n", |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 867 | priv->msg_rx_ring.rx_slot); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 868 | rc = -EINVAL; |
| 869 | goto out; |
| 870 | } |
| 871 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 872 | priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; |
| 873 | if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) |
| 874 | priv->msg_rx_ring.rx_slot = 0; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 875 | |
| 876 | out: |
| 877 | return rc; |
| 878 | } |
| 879 | |
| 880 | EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer); |
| 881 | |
| 882 | /** |
| 883 | * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit |
| 884 | * @mport: Master port implementing the inbound message unit |
| 885 | * @mbox: Inbound mailbox number |
| 886 | * |
| 887 | * Gets the next available inbound message from the inbound message queue. |
| 888 | * A pointer to the message is returned on success or NULL on failure. |
| 889 | */ |
| 890 | void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox) |
| 891 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 892 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 893 | u32 phys_buf, virt_buf; |
| 894 | void *buf = NULL; |
| 895 | int buf_idx; |
| 896 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 897 | phys_buf = in_be32(&priv->msg_regs->ifqdpar); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 898 | |
| 899 | /* If no more messages, then bail out */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 900 | if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 901 | goto out2; |
| 902 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 903 | virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf |
| 904 | - priv->msg_rx_ring.phys); |
| 905 | buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; |
| 906 | buf = priv->msg_rx_ring.virt_buffer[buf_idx]; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 907 | |
| 908 | if (!buf) { |
| 909 | printk(KERN_ERR |
| 910 | "RIO: inbound message copy failed, no buffers\n"); |
| 911 | goto out1; |
| 912 | } |
| 913 | |
| 914 | /* Copy max message size, caller is expected to allocate that big */ |
| 915 | memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); |
| 916 | |
| 917 | /* Clear the available buffer */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 918 | priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 919 | |
| 920 | out1: |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 921 | setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 922 | |
| 923 | out2: |
| 924 | return buf; |
| 925 | } |
| 926 | |
| 927 | EXPORT_SYMBOL_GPL(rio_hw_get_inb_message); |
| 928 | |
| 929 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 930 | * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 931 | * @irq: Linux interrupt number |
| 932 | * @dev_instance: Pointer to interrupt-specific data |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 933 | * |
| 934 | * Handles doorbell interrupts. Parses a list of registered |
| 935 | * doorbell event handlers and executes a matching event handler. |
| 936 | */ |
| 937 | static irqreturn_t |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 938 | fsl_rio_dbell_handler(int irq, void *dev_instance) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 939 | { |
| 940 | int dsr; |
| 941 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 942 | struct rio_priv *priv = port->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 943 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 944 | dsr = in_be32(&priv->msg_regs->dsr); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 945 | |
| 946 | if (dsr & DOORBELL_DSR_TE) { |
| 947 | pr_info("RIO: doorbell reception error\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 948 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 949 | goto out; |
| 950 | } |
| 951 | |
| 952 | if (dsr & DOORBELL_DSR_QFI) { |
| 953 | pr_info("RIO: doorbell queue full\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 954 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 955 | goto out; |
| 956 | } |
| 957 | |
| 958 | /* XXX Need to check/dispatch until queue empty */ |
| 959 | if (dsr & DOORBELL_DSR_DIQI) { |
| 960 | u32 dmsg = |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 961 | (u32) priv->dbell_ring.virt + |
| 962 | (in_be32(&priv->msg_regs->dqdpar) & 0xfff); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 963 | struct rio_dbell *dbell; |
| 964 | int found = 0; |
| 965 | |
| 966 | pr_debug |
| 967 | ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", |
| 968 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); |
| 969 | |
| 970 | list_for_each_entry(dbell, &port->dbells, node) { |
| 971 | if ((dbell->res->start <= DBELL_INF(dmsg)) && |
| 972 | (dbell->res->end >= DBELL_INF(dmsg))) { |
| 973 | found = 1; |
| 974 | break; |
| 975 | } |
| 976 | } |
| 977 | if (found) { |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 978 | dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 979 | DBELL_INF(dmsg)); |
| 980 | } else { |
| 981 | pr_debug |
| 982 | ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", |
| 983 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); |
| 984 | } |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 985 | setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); |
| 986 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 987 | } |
| 988 | |
| 989 | out: |
| 990 | return IRQ_HANDLED; |
| 991 | } |
| 992 | |
| 993 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 994 | * fsl_rio_doorbell_init - MPC85xx doorbell interface init |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 995 | * @mport: Master port implementing the inbound doorbell unit |
| 996 | * |
| 997 | * Initializes doorbell unit hardware and inbound DMA buffer |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 998 | * ring. Called from fsl_rio_setup(). Returns %0 on success |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 999 | * or %-ENOMEM on failure. |
| 1000 | */ |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1001 | static int fsl_rio_doorbell_init(struct rio_mport *mport) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1002 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1003 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1004 | int rc = 0; |
| 1005 | |
| 1006 | /* Map outbound doorbell window immediately after maintenance window */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1007 | priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, |
| 1008 | RIO_DBELL_WIN_SIZE); |
| 1009 | if (!priv->dbell_win) { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1010 | printk(KERN_ERR |
| 1011 | "RIO: unable to map outbound doorbell window\n"); |
| 1012 | rc = -ENOMEM; |
| 1013 | goto out; |
| 1014 | } |
| 1015 | |
| 1016 | /* Initialize inbound doorbells */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 1017 | priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1018 | DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); |
| 1019 | if (!priv->dbell_ring.virt) { |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1020 | printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); |
| 1021 | rc = -ENOMEM; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1022 | iounmap(priv->dbell_win); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1023 | goto out; |
| 1024 | } |
| 1025 | |
| 1026 | /* Point dequeue/enqueue pointers at first entry in ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1027 | out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); |
| 1028 | out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1029 | |
| 1030 | /* Clear interrupt status */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1031 | out_be32(&priv->msg_regs->dsr, 0x00000091); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1032 | |
| 1033 | /* Hook up doorbell handler */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1034 | rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, |
| 1035 | "dbell_rx", (void *)mport); |
| 1036 | if (rc < 0) { |
| 1037 | iounmap(priv->dbell_win); |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 1038 | dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1039 | priv->dbell_ring.virt, priv->dbell_ring.phys); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1040 | printk(KERN_ERR |
| 1041 | "MPC85xx RIO: unable to request inbound doorbell irq"); |
| 1042 | goto out; |
| 1043 | } |
| 1044 | |
| 1045 | /* Configure doorbells for snooping, 512 entries, and enable */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1046 | out_be32(&priv->msg_regs->dmr, 0x00108161); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1047 | |
| 1048 | out: |
| 1049 | return rc; |
| 1050 | } |
| 1051 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 1052 | /** |
| 1053 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler |
| 1054 | * @irq: Linux interrupt number |
| 1055 | * @dev_instance: Pointer to interrupt-specific data |
| 1056 | * |
| 1057 | * Handles port write interrupts. Parses a list of registered |
| 1058 | * port write event handlers and executes a matching event handler. |
| 1059 | */ |
| 1060 | static irqreturn_t |
| 1061 | fsl_rio_port_write_handler(int irq, void *dev_instance) |
| 1062 | { |
| 1063 | u32 ipwmr, ipwsr; |
| 1064 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
| 1065 | struct rio_priv *priv = port->priv; |
| 1066 | u32 epwisr, tmp; |
| 1067 | |
| 1068 | ipwmr = in_be32(&priv->msg_regs->pwmr); |
| 1069 | ipwsr = in_be32(&priv->msg_regs->pwsr); |
| 1070 | |
| 1071 | epwisr = in_be32(priv->regs_win + RIO_EPWISR); |
| 1072 | if (epwisr & 0x80000000) { |
| 1073 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); |
| 1074 | pr_info("RIO_LTLEDCSR = 0x%x\n", tmp); |
| 1075 | out_be32(priv->regs_win + RIO_LTLEDCSR, 0); |
| 1076 | } |
| 1077 | |
| 1078 | if (!(epwisr & 0x00000001)) |
| 1079 | return IRQ_HANDLED; |
| 1080 | |
| 1081 | #ifdef DEBUG_PW |
| 1082 | pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); |
| 1083 | if (ipwsr & RIO_IPWSR_QF) |
| 1084 | pr_debug(" QF"); |
| 1085 | if (ipwsr & RIO_IPWSR_TE) |
| 1086 | pr_debug(" TE"); |
| 1087 | if (ipwsr & RIO_IPWSR_QFI) |
| 1088 | pr_debug(" QFI"); |
| 1089 | if (ipwsr & RIO_IPWSR_PWD) |
| 1090 | pr_debug(" PWD"); |
| 1091 | if (ipwsr & RIO_IPWSR_PWB) |
| 1092 | pr_debug(" PWB"); |
| 1093 | pr_debug(" )\n"); |
| 1094 | #endif |
| 1095 | out_be32(&priv->msg_regs->pwsr, |
| 1096 | ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); |
| 1097 | |
| 1098 | if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { |
| 1099 | priv->port_write_msg.err_count++; |
| 1100 | pr_info("RIO: Port-Write Transaction Err (%d)\n", |
| 1101 | priv->port_write_msg.err_count); |
| 1102 | } |
| 1103 | if (ipwsr & RIO_IPWSR_PWD) { |
| 1104 | priv->port_write_msg.discard_count++; |
| 1105 | pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n", |
| 1106 | priv->port_write_msg.discard_count); |
| 1107 | } |
| 1108 | |
| 1109 | /* Schedule deferred processing if PW was received */ |
| 1110 | if (ipwsr & RIO_IPWSR_QFI) { |
| 1111 | /* Save PW message (if there is room in FIFO), |
| 1112 | * otherwise discard it. |
| 1113 | */ |
| 1114 | if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { |
| 1115 | priv->port_write_msg.msg_count++; |
| 1116 | kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, |
| 1117 | RIO_PW_MSG_SIZE); |
| 1118 | } else { |
| 1119 | priv->port_write_msg.discard_count++; |
| 1120 | pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", |
| 1121 | priv->port_write_msg.discard_count); |
| 1122 | } |
| 1123 | schedule_work(&priv->pw_work); |
| 1124 | } |
| 1125 | |
| 1126 | /* Issue Clear Queue command. This allows another |
| 1127 | * port-write to be received. |
| 1128 | */ |
| 1129 | out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); |
| 1130 | |
| 1131 | return IRQ_HANDLED; |
| 1132 | } |
| 1133 | |
| 1134 | static void fsl_pw_dpc(struct work_struct *work) |
| 1135 | { |
| 1136 | struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); |
| 1137 | unsigned long flags; |
| 1138 | u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; |
| 1139 | |
| 1140 | /* |
| 1141 | * Process port-write messages |
| 1142 | */ |
| 1143 | spin_lock_irqsave(&priv->pw_fifo_lock, flags); |
| 1144 | while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, |
| 1145 | RIO_PW_MSG_SIZE)) { |
| 1146 | /* Process one message */ |
| 1147 | spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); |
| 1148 | #ifdef DEBUG_PW |
| 1149 | { |
| 1150 | u32 i; |
| 1151 | pr_debug("%s : Port-Write Message:", __func__); |
| 1152 | for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { |
| 1153 | if ((i%4) == 0) |
| 1154 | pr_debug("\n0x%02x: 0x%08x", i*4, |
| 1155 | msg_buffer[i]); |
| 1156 | else |
| 1157 | pr_debug(" 0x%08x", msg_buffer[i]); |
| 1158 | } |
| 1159 | pr_debug("\n"); |
| 1160 | } |
| 1161 | #endif |
| 1162 | /* Pass the port-write message to RIO core for processing */ |
| 1163 | rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); |
| 1164 | spin_lock_irqsave(&priv->pw_fifo_lock, flags); |
| 1165 | } |
| 1166 | spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); |
| 1167 | } |
| 1168 | |
| 1169 | /** |
| 1170 | * fsl_rio_pw_enable - enable/disable port-write interface init |
| 1171 | * @mport: Master port implementing the port write unit |
| 1172 | * @enable: 1=enable; 0=disable port-write message handling |
| 1173 | */ |
| 1174 | static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) |
| 1175 | { |
| 1176 | struct rio_priv *priv = mport->priv; |
| 1177 | u32 rval; |
| 1178 | |
| 1179 | rval = in_be32(&priv->msg_regs->pwmr); |
| 1180 | |
| 1181 | if (enable) |
| 1182 | rval |= RIO_IPWMR_PWE; |
| 1183 | else |
| 1184 | rval &= ~RIO_IPWMR_PWE; |
| 1185 | |
| 1186 | out_be32(&priv->msg_regs->pwmr, rval); |
| 1187 | |
| 1188 | return 0; |
| 1189 | } |
| 1190 | |
| 1191 | /** |
| 1192 | * fsl_rio_port_write_init - MPC85xx port write interface init |
| 1193 | * @mport: Master port implementing the port write unit |
| 1194 | * |
| 1195 | * Initializes port write unit hardware and DMA buffer |
| 1196 | * ring. Called from fsl_rio_setup(). Returns %0 on success |
| 1197 | * or %-ENOMEM on failure. |
| 1198 | */ |
| 1199 | static int fsl_rio_port_write_init(struct rio_mport *mport) |
| 1200 | { |
| 1201 | struct rio_priv *priv = mport->priv; |
| 1202 | int rc = 0; |
| 1203 | |
| 1204 | /* Following configurations require a disabled port write controller */ |
| 1205 | out_be32(&priv->msg_regs->pwmr, |
| 1206 | in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); |
| 1207 | |
| 1208 | /* Initialize port write */ |
| 1209 | priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, |
| 1210 | RIO_PW_MSG_SIZE, |
| 1211 | &priv->port_write_msg.phys, GFP_KERNEL); |
| 1212 | if (!priv->port_write_msg.virt) { |
| 1213 | pr_err("RIO: unable allocate port write queue\n"); |
| 1214 | return -ENOMEM; |
| 1215 | } |
| 1216 | |
| 1217 | priv->port_write_msg.err_count = 0; |
| 1218 | priv->port_write_msg.discard_count = 0; |
| 1219 | |
| 1220 | /* Point dequeue/enqueue pointers at first entry */ |
| 1221 | out_be32(&priv->msg_regs->epwqbar, 0); |
| 1222 | out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); |
| 1223 | |
| 1224 | pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", |
| 1225 | in_be32(&priv->msg_regs->epwqbar), |
| 1226 | in_be32(&priv->msg_regs->pwqbar)); |
| 1227 | |
| 1228 | /* Clear interrupt status IPWSR */ |
| 1229 | out_be32(&priv->msg_regs->pwsr, |
| 1230 | (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); |
| 1231 | |
| 1232 | /* Configure port write contoller for snooping enable all reporting, |
| 1233 | clear queue full */ |
| 1234 | out_be32(&priv->msg_regs->pwmr, |
| 1235 | RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); |
| 1236 | |
| 1237 | |
| 1238 | /* Hook up port-write handler */ |
| 1239 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, |
| 1240 | "port-write", (void *)mport); |
| 1241 | if (rc < 0) { |
| 1242 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); |
| 1243 | goto err_out; |
| 1244 | } |
| 1245 | |
| 1246 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); |
| 1247 | spin_lock_init(&priv->pw_fifo_lock); |
| 1248 | if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { |
| 1249 | pr_err("FIFO allocation failed\n"); |
| 1250 | rc = -ENOMEM; |
| 1251 | goto err_out_irq; |
| 1252 | } |
| 1253 | |
| 1254 | pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", |
| 1255 | in_be32(&priv->msg_regs->pwmr), |
| 1256 | in_be32(&priv->msg_regs->pwsr)); |
| 1257 | |
| 1258 | return rc; |
| 1259 | |
| 1260 | err_out_irq: |
| 1261 | free_irq(IRQ_RIO_PW(mport), (void *)mport); |
| 1262 | err_out: |
| 1263 | dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, |
| 1264 | priv->port_write_msg.virt, |
| 1265 | priv->port_write_msg.phys); |
| 1266 | return rc; |
| 1267 | } |
| 1268 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1269 | static char *cmdline = NULL; |
| 1270 | |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1271 | static int fsl_rio_get_hdid(int index) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1272 | { |
| 1273 | /* XXX Need to parse multiple entries in some format */ |
| 1274 | if (!cmdline) |
| 1275 | return -1; |
| 1276 | |
| 1277 | return simple_strtol(cmdline, NULL, 0); |
| 1278 | } |
| 1279 | |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1280 | static int fsl_rio_get_cmdline(char *s) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1281 | { |
| 1282 | if (!s) |
| 1283 | return 0; |
| 1284 | |
| 1285 | cmdline = s; |
| 1286 | return 1; |
| 1287 | } |
| 1288 | |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1289 | __setup("riohdid=", fsl_rio_get_cmdline); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1290 | |
Zhang Wei | 7f620df | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1291 | static inline void fsl_rio_info(struct device *dev, u32 ccsr) |
| 1292 | { |
| 1293 | const char *str; |
| 1294 | if (ccsr & 1) { |
| 1295 | /* Serial phy */ |
| 1296 | switch (ccsr >> 30) { |
| 1297 | case 0: |
| 1298 | str = "1"; |
| 1299 | break; |
| 1300 | case 1: |
| 1301 | str = "4"; |
| 1302 | break; |
| 1303 | default: |
| 1304 | str = "Unknown"; |
Joe Perches | d258e64 | 2009-06-28 06:26:10 +0000 | [diff] [blame] | 1305 | break; |
Zhang Wei | 7f620df | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1306 | } |
| 1307 | dev_info(dev, "Hardware port width: %s\n", str); |
| 1308 | |
| 1309 | switch ((ccsr >> 27) & 7) { |
| 1310 | case 0: |
| 1311 | str = "Single-lane 0"; |
| 1312 | break; |
| 1313 | case 1: |
| 1314 | str = "Single-lane 2"; |
| 1315 | break; |
| 1316 | case 2: |
| 1317 | str = "Four-lane"; |
| 1318 | break; |
| 1319 | default: |
| 1320 | str = "Unknown"; |
| 1321 | break; |
| 1322 | } |
| 1323 | dev_info(dev, "Training connection status: %s\n", str); |
| 1324 | } else { |
| 1325 | /* Parallel phy */ |
| 1326 | if (!(ccsr & 0x80000000)) |
| 1327 | dev_info(dev, "Output port operating in 8-bit mode\n"); |
| 1328 | if (!(ccsr & 0x08000000)) |
| 1329 | dev_info(dev, "Input port operating in 8-bit mode\n"); |
| 1330 | } |
| 1331 | } |
| 1332 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1333 | /** |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 1334 | * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface |
| 1335 | * @dev: of_device pointer |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1336 | * |
| 1337 | * Initializes MPC85xx RapidIO hardware interface, configures |
| 1338 | * master port with system-specific info, and registers the |
| 1339 | * master port with the RapidIO subsystem. |
| 1340 | */ |
Grant Likely | a454dc5 | 2010-07-22 15:52:34 -0600 | [diff] [blame] | 1341 | int fsl_rio_setup(struct platform_device *dev) |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1342 | { |
| 1343 | struct rio_ops *ops; |
| 1344 | struct rio_mport *port; |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1345 | struct rio_priv *priv; |
| 1346 | int rc = 0; |
| 1347 | const u32 *dt_range, *cell; |
| 1348 | struct resource regs; |
| 1349 | int rlen; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1350 | u32 ccsr; |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1351 | u64 law_start, law_size; |
| 1352 | int paw, aw, sw; |
| 1353 | |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1354 | if (!dev->dev.of_node) { |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1355 | dev_err(&dev->dev, "Device OF-Node is NULL"); |
| 1356 | return -EFAULT; |
| 1357 | } |
| 1358 | |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1359 | rc = of_address_to_resource(dev->dev.of_node, 0, ®s); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1360 | if (rc) { |
| 1361 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1362 | dev->dev.of_node->full_name); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1363 | return -EFAULT; |
| 1364 | } |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1365 | dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); |
Kumar Gala | fc274a1 | 2009-05-13 17:02:24 -0500 | [diff] [blame] | 1366 | dev_info(&dev->dev, "Regs: %pR\n", ®s); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1367 | |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1368 | dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1369 | if (!dt_range) { |
| 1370 | dev_err(&dev->dev, "Can't get %s property 'ranges'\n", |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1371 | dev->dev.of_node->full_name); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1372 | return -EFAULT; |
| 1373 | } |
| 1374 | |
| 1375 | /* Get node address wide */ |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1376 | cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1377 | if (cell) |
| 1378 | aw = *cell; |
| 1379 | else |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1380 | aw = of_n_addr_cells(dev->dev.of_node); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1381 | /* Get node size wide */ |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1382 | cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1383 | if (cell) |
| 1384 | sw = *cell; |
| 1385 | else |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1386 | sw = of_n_size_cells(dev->dev.of_node); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1387 | /* Get parent address wide wide */ |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1388 | paw = of_n_addr_cells(dev->dev.of_node); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1389 | |
| 1390 | law_start = of_read_number(dt_range + aw, paw); |
| 1391 | law_size = of_read_number(dt_range + aw + paw, sw); |
| 1392 | |
| 1393 | dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", |
| 1394 | law_start, law_size); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1395 | |
Alexandre Bounine | e5cabeb | 2010-05-26 14:43:59 -0700 | [diff] [blame] | 1396 | ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1397 | if (!ops) { |
| 1398 | rc = -ENOMEM; |
| 1399 | goto err_ops; |
| 1400 | } |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1401 | ops->lcread = fsl_local_config_read; |
| 1402 | ops->lcwrite = fsl_local_config_write; |
| 1403 | ops->cread = fsl_rio_config_read; |
| 1404 | ops->cwrite = fsl_rio_config_write; |
| 1405 | ops->dsend = fsl_rio_doorbell_send; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 1406 | ops->pwenable = fsl_rio_pw_enable; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1407 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1408 | port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1409 | if (!port) { |
| 1410 | rc = -ENOMEM; |
| 1411 | goto err_port; |
| 1412 | } |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1413 | port->id = 0; |
| 1414 | port->index = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1415 | |
| 1416 | priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); |
| 1417 | if (!priv) { |
| 1418 | printk(KERN_ERR "Can't alloc memory for 'priv'\n"); |
| 1419 | rc = -ENOMEM; |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1420 | goto err_priv; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1421 | } |
| 1422 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1423 | INIT_LIST_HEAD(&port->dbells); |
| 1424 | port->iores.start = law_start; |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1425 | port->iores.end = law_start + law_size - 1; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1426 | port->iores.flags = IORESOURCE_MEM; |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1427 | port->iores.name = "rio_io_win"; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1428 | |
Alexandre Bounine | 45fdf00 | 2010-05-28 13:56:17 -0400 | [diff] [blame] | 1429 | priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1430 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); |
| 1431 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); |
| 1432 | priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 1433 | dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", |
| 1434 | priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1435 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1436 | rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); |
| 1437 | rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); |
| 1438 | rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); |
| 1439 | strcpy(port->name, "RIO0 mport"); |
| 1440 | |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 1441 | priv->dev = &dev->dev; |
| 1442 | |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1443 | port->ops = ops; |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1444 | port->host_deviceid = fsl_rio_get_hdid(port->id); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1445 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1446 | port->priv = priv; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1447 | rio_register_mport(port); |
| 1448 | |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1449 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 1450 | rio_regs_win = priv->regs_win; |
Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 1451 | |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1452 | /* Probe the master port phy type */ |
| 1453 | ccsr = in_be32(priv->regs_win + RIO_CCSR); |
| 1454 | port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; |
| 1455 | dev_info(&dev->dev, "RapidIO PHY type: %s\n", |
| 1456 | (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : |
| 1457 | ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : |
| 1458 | "unknown")); |
Zhang Wei | 7f620df | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1459 | /* Checking the port training status */ |
| 1460 | if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { |
| 1461 | dev_err(&dev->dev, "Port is not ready. " |
| 1462 | "Try to restart connection...\n"); |
| 1463 | switch (port->phy_type) { |
| 1464 | case RIO_PHY_SERIAL: |
| 1465 | /* Disable ports */ |
| 1466 | out_be32(priv->regs_win + RIO_CCSR, 0); |
| 1467 | /* Set 1x lane */ |
| 1468 | setbits32(priv->regs_win + RIO_CCSR, 0x02000000); |
| 1469 | /* Enable ports */ |
| 1470 | setbits32(priv->regs_win + RIO_CCSR, 0x00600000); |
| 1471 | break; |
| 1472 | case RIO_PHY_PARALLEL: |
| 1473 | /* Disable ports */ |
| 1474 | out_be32(priv->regs_win + RIO_CCSR, 0x22000000); |
| 1475 | /* Enable ports */ |
| 1476 | out_be32(priv->regs_win + RIO_CCSR, 0x44000000); |
| 1477 | break; |
| 1478 | } |
| 1479 | msleep(100); |
| 1480 | if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { |
| 1481 | dev_err(&dev->dev, "Port restart failed.\n"); |
| 1482 | rc = -ENOLINK; |
| 1483 | goto err; |
| 1484 | } |
| 1485 | dev_info(&dev->dev, "Port restart success!\n"); |
| 1486 | } |
| 1487 | fsl_rio_info(&dev->dev, ccsr); |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1488 | |
Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 1489 | port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) |
| 1490 | & RIO_PEF_CTLS) >> 4; |
| 1491 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
| 1492 | port->sys_size ? 65536 : 256); |
| 1493 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1494 | priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win |
| 1495 | + RIO_ATMU_REGS_OFFSET); |
| 1496 | priv->maint_atmu_regs = priv->atmu_regs + 1; |
| 1497 | priv->dbell_atmu_regs = priv->atmu_regs + 2; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1498 | priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + |
| 1499 | ((port->phy_type == RIO_PHY_SERIAL) ? |
| 1500 | RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); |
| 1501 | |
| 1502 | /* Set to receive any dist ID for serial RapidIO controller. */ |
| 1503 | if (port->phy_type == RIO_PHY_SERIAL) |
| 1504 | out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1505 | |
| 1506 | /* Configure maintenance transaction window */ |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1507 | out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); |
Thomas Moll | bd4fb65 | 2010-05-26 14:44:05 -0700 | [diff] [blame] | 1508 | out_be32(&priv->maint_atmu_regs->rowar, |
| 1509 | 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1510 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1511 | priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1512 | |
| 1513 | /* Configure outbound doorbell window */ |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1514 | out_be32(&priv->dbell_atmu_regs->rowbar, |
| 1515 | (law_start + RIO_MAINT_WIN_SIZE) >> 12); |
| 1516 | out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1517 | fsl_rio_doorbell_init(port); |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 1518 | fsl_rio_port_write_init(port); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1519 | |
Alexandre Bounine | a52c8f5 | 2010-05-26 14:44:00 -0700 | [diff] [blame] | 1520 | saved_mcheck_exception = ppc_md.machine_check_exception; |
| 1521 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; |
| 1522 | /* Ensure that RFXE is set */ |
| 1523 | mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); |
| 1524 | |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1525 | return 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1526 | err: |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1527 | iounmap(priv->regs_win); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1528 | kfree(priv); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1529 | err_priv: |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1530 | kfree(port); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1531 | err_port: |
| 1532 | kfree(ops); |
| 1533 | err_ops: |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1534 | return rc; |
Matt Porter | 2b0c28d7f | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1535 | } |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1536 | |
| 1537 | /* The probe function for RapidIO peer-to-peer network. |
| 1538 | */ |
Grant Likely | a454dc5 | 2010-07-22 15:52:34 -0600 | [diff] [blame] | 1539 | static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev, |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1540 | const struct of_device_id *match) |
| 1541 | { |
| 1542 | int rc; |
| 1543 | printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1544 | dev->dev.of_node->full_name); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1545 | |
| 1546 | rc = fsl_rio_setup(dev); |
| 1547 | if (rc) |
| 1548 | goto out; |
| 1549 | |
| 1550 | /* Enumerate all registered ports */ |
| 1551 | rc = rio_init_mports(); |
| 1552 | out: |
| 1553 | return rc; |
| 1554 | }; |
| 1555 | |
| 1556 | static const struct of_device_id fsl_of_rio_rpn_ids[] = { |
| 1557 | { |
| 1558 | .compatible = "fsl,rapidio-delta", |
| 1559 | }, |
| 1560 | {}, |
| 1561 | }; |
| 1562 | |
| 1563 | static struct of_platform_driver fsl_of_rio_rpn_driver = { |
Grant Likely | 4018294 | 2010-04-13 16:13:02 -0700 | [diff] [blame] | 1564 | .driver = { |
| 1565 | .name = "fsl-of-rio", |
| 1566 | .owner = THIS_MODULE, |
| 1567 | .of_match_table = fsl_of_rio_rpn_ids, |
| 1568 | }, |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1569 | .probe = fsl_of_rio_rpn_probe, |
| 1570 | }; |
| 1571 | |
| 1572 | static __init int fsl_of_rio_rpn_init(void) |
| 1573 | { |
| 1574 | return of_register_platform_driver(&fsl_of_rio_rpn_driver); |
| 1575 | } |
| 1576 | |
| 1577 | subsys_initcall(fsl_of_rio_rpn_init); |