Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1 | /* |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 2 | * Freescale MPC85xx/MPC86xx RapidIO support |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 3 | * |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 4 | * Copyright 2009 Integrated Device Technology, Inc. |
| 5 | * Alex Bounine <alexandre.bounine@idt.com> |
| 6 | * - Added Port-Write message handling |
| 7 | * - Added Machine Check exception handling |
| 8 | * |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 9 | * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. |
| 10 | * Zhang Wei <wei.zhang@freescale.com> |
| 11 | * |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 12 | * Copyright 2005 MontaVista Software, Inc. |
| 13 | * Matt Porter <mporter@kernel.crashing.org> |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or modify it |
| 16 | * under the terms of the GNU General Public License as published by the |
| 17 | * Free Software Foundation; either version 2 of the License, or (at your |
| 18 | * option) any later version. |
| 19 | */ |
| 20 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 21 | #include <linux/init.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/dma-mapping.h> |
| 25 | #include <linux/interrupt.h> |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 26 | #include <linux/device.h> |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 27 | #include <linux/rio.h> |
| 28 | #include <linux/rio_drv.h> |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 29 | #include <linux/of_platform.h> |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 30 | #include <linux/delay.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 32 | #include <linux/kfifo.h> |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 33 | |
| 34 | #include <asm/io.h> |
| 35 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 36 | #undef DEBUG_PW /* Port-Write debugging */ |
| 37 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 38 | /* RapidIO definition irq, which read from OF-tree */ |
| 39 | #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) |
| 40 | #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) |
| 41 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 42 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 43 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 44 | #define RIO_ATMU_REGS_OFFSET 0x10c00 |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 45 | #define RIO_P_MSG_REGS_OFFSET 0x11000 |
| 46 | #define RIO_S_MSG_REGS_OFFSET 0x13000 |
| 47 | #define RIO_ESCSR 0x158 |
| 48 | #define RIO_CCSR 0x15c |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 49 | #define RIO_LTLEDCSR 0x0608 |
| 50 | #define RIO_LTLEECSR 0x060c |
| 51 | #define RIO_EPWISR 0x10010 |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 52 | #define RIO_ISR_AACR 0x10120 |
| 53 | #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 54 | #define RIO_MAINT_WIN_SIZE 0x400000 |
| 55 | #define RIO_DBELL_WIN_SIZE 0x1000 |
| 56 | |
| 57 | #define RIO_MSG_OMR_MUI 0x00000002 |
| 58 | #define RIO_MSG_OSR_TE 0x00000080 |
| 59 | #define RIO_MSG_OSR_QOI 0x00000020 |
| 60 | #define RIO_MSG_OSR_QFI 0x00000010 |
| 61 | #define RIO_MSG_OSR_MUB 0x00000004 |
| 62 | #define RIO_MSG_OSR_EOMI 0x00000002 |
| 63 | #define RIO_MSG_OSR_QEI 0x00000001 |
| 64 | |
| 65 | #define RIO_MSG_IMR_MI 0x00000002 |
| 66 | #define RIO_MSG_ISR_TE 0x00000080 |
| 67 | #define RIO_MSG_ISR_QFI 0x00000010 |
| 68 | #define RIO_MSG_ISR_DIQI 0x00000001 |
| 69 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 70 | #define RIO_IPWMR_SEN 0x00100000 |
| 71 | #define RIO_IPWMR_QFIE 0x00000100 |
| 72 | #define RIO_IPWMR_EIE 0x00000020 |
| 73 | #define RIO_IPWMR_CQ 0x00000002 |
| 74 | #define RIO_IPWMR_PWE 0x00000001 |
| 75 | |
| 76 | #define RIO_IPWSR_QF 0x00100000 |
| 77 | #define RIO_IPWSR_TE 0x00000080 |
| 78 | #define RIO_IPWSR_QFI 0x00000010 |
| 79 | #define RIO_IPWSR_PWD 0x00000008 |
| 80 | #define RIO_IPWSR_PWB 0x00000004 |
| 81 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 82 | #define RIO_MSG_DESC_SIZE 32 |
| 83 | #define RIO_MSG_BUFFER_SIZE 4096 |
| 84 | #define RIO_MIN_TX_RING_SIZE 2 |
| 85 | #define RIO_MAX_TX_RING_SIZE 2048 |
| 86 | #define RIO_MIN_RX_RING_SIZE 2 |
| 87 | #define RIO_MAX_RX_RING_SIZE 2048 |
| 88 | |
| 89 | #define DOORBELL_DMR_DI 0x00000002 |
| 90 | #define DOORBELL_DSR_TE 0x00000080 |
| 91 | #define DOORBELL_DSR_QFI 0x00000010 |
| 92 | #define DOORBELL_DSR_DIQI 0x00000001 |
Zhang Wei | 6c39103 | 2008-04-18 13:33:48 -0700 | [diff] [blame] | 93 | #define DOORBELL_TID_OFFSET 0x02 |
| 94 | #define DOORBELL_SID_OFFSET 0x04 |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 95 | #define DOORBELL_INFO_OFFSET 0x06 |
| 96 | |
| 97 | #define DOORBELL_MESSAGE_SIZE 0x08 |
Zhang Wei | 6c39103 | 2008-04-18 13:33:48 -0700 | [diff] [blame] | 98 | #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) |
| 99 | #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 100 | #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) |
| 101 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 102 | struct rio_atmu_regs { |
| 103 | u32 rowtar; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 104 | u32 rowtear; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 105 | u32 rowbar; |
| 106 | u32 pad2; |
| 107 | u32 rowar; |
| 108 | u32 pad3[3]; |
| 109 | }; |
| 110 | |
| 111 | struct rio_msg_regs { |
| 112 | u32 omr; |
| 113 | u32 osr; |
| 114 | u32 pad1; |
| 115 | u32 odqdpar; |
| 116 | u32 pad2; |
| 117 | u32 osar; |
| 118 | u32 odpr; |
| 119 | u32 odatr; |
| 120 | u32 odcr; |
| 121 | u32 pad3; |
| 122 | u32 odqepar; |
| 123 | u32 pad4[13]; |
| 124 | u32 imr; |
| 125 | u32 isr; |
| 126 | u32 pad5; |
| 127 | u32 ifqdpar; |
| 128 | u32 pad6; |
| 129 | u32 ifqepar; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 130 | u32 pad7[226]; |
| 131 | u32 odmr; |
| 132 | u32 odsr; |
| 133 | u32 res0[4]; |
| 134 | u32 oddpr; |
| 135 | u32 oddatr; |
| 136 | u32 res1[3]; |
| 137 | u32 odretcr; |
| 138 | u32 res2[12]; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 139 | u32 dmr; |
| 140 | u32 dsr; |
| 141 | u32 pad8; |
| 142 | u32 dqdpar; |
| 143 | u32 pad9; |
| 144 | u32 dqepar; |
| 145 | u32 pad10[26]; |
| 146 | u32 pwmr; |
| 147 | u32 pwsr; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 148 | u32 epwqbar; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 149 | u32 pwqbar; |
| 150 | }; |
| 151 | |
| 152 | struct rio_tx_desc { |
| 153 | u32 res1; |
| 154 | u32 saddr; |
| 155 | u32 dport; |
| 156 | u32 dattr; |
| 157 | u32 res2; |
| 158 | u32 res3; |
| 159 | u32 dwcnt; |
| 160 | u32 res4; |
| 161 | }; |
| 162 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 163 | struct rio_dbell_ring { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 164 | void *virt; |
| 165 | dma_addr_t phys; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 166 | }; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 167 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 168 | struct rio_msg_tx_ring { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 169 | void *virt; |
| 170 | dma_addr_t phys; |
| 171 | void *virt_buffer[RIO_MAX_TX_RING_SIZE]; |
| 172 | dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; |
| 173 | int tx_slot; |
| 174 | int size; |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 175 | void *dev_id; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 176 | }; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 177 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 178 | struct rio_msg_rx_ring { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 179 | void *virt; |
| 180 | dma_addr_t phys; |
| 181 | void *virt_buffer[RIO_MAX_RX_RING_SIZE]; |
| 182 | int rx_slot; |
| 183 | int size; |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 184 | void *dev_id; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 185 | }; |
| 186 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 187 | struct rio_port_write_msg { |
| 188 | void *virt; |
| 189 | dma_addr_t phys; |
| 190 | u32 msg_count; |
| 191 | u32 err_count; |
| 192 | u32 discard_count; |
| 193 | }; |
| 194 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 195 | struct rio_priv { |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 196 | struct device *dev; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 197 | void __iomem *regs_win; |
| 198 | struct rio_atmu_regs __iomem *atmu_regs; |
| 199 | struct rio_atmu_regs __iomem *maint_atmu_regs; |
| 200 | struct rio_atmu_regs __iomem *dbell_atmu_regs; |
| 201 | void __iomem *dbell_win; |
| 202 | void __iomem *maint_win; |
| 203 | struct rio_msg_regs __iomem *msg_regs; |
| 204 | struct rio_dbell_ring dbell_ring; |
| 205 | struct rio_msg_tx_ring msg_tx_ring; |
| 206 | struct rio_msg_rx_ring msg_rx_ring; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 207 | struct rio_port_write_msg port_write_msg; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 208 | int bellirq; |
| 209 | int txirq; |
| 210 | int rxirq; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 211 | int pwirq; |
| 212 | struct work_struct pw_work; |
| 213 | struct kfifo pw_fifo; |
| 214 | spinlock_t pw_fifo_lock; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 215 | }; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 216 | |
| 217 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 218 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 219 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 220 | * @index: ID of RapidIO interface |
| 221 | * @destid: Destination ID of target device |
| 222 | * @data: 16-bit info field of RapidIO doorbell message |
| 223 | * |
| 224 | * Sends a MPC85xx doorbell message. Returns %0 on success or |
| 225 | * %-EINVAL on failure. |
| 226 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 227 | static int fsl_rio_doorbell_send(struct rio_mport *mport, |
| 228 | int index, u16 destid, u16 data) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 229 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 230 | struct rio_priv *priv = mport->priv; |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 231 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 232 | index, destid, data); |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 233 | switch (mport->phy_type) { |
| 234 | case RIO_PHY_PARALLEL: |
| 235 | out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); |
| 236 | out_be16(priv->dbell_win, data); |
| 237 | break; |
| 238 | case RIO_PHY_SERIAL: |
| 239 | /* In the serial version silicons, such as MPC8548, MPC8641, |
| 240 | * below operations is must be. |
| 241 | */ |
| 242 | out_be32(&priv->msg_regs->odmr, 0x00000000); |
| 243 | out_be32(&priv->msg_regs->odretcr, 0x00000004); |
| 244 | out_be32(&priv->msg_regs->oddpr, destid << 16); |
| 245 | out_be32(&priv->msg_regs->oddatr, data); |
| 246 | out_be32(&priv->msg_regs->odmr, 0x00000001); |
| 247 | break; |
| 248 | } |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 249 | |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 254 | * fsl_local_config_read - Generate a MPC85xx local config space read |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 255 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 256 | * @index: ID of RapdiIO interface |
| 257 | * @offset: Offset into configuration space |
| 258 | * @len: Length (in bytes) of the maintenance transaction |
| 259 | * @data: Value to be read into |
| 260 | * |
| 261 | * Generates a MPC85xx local configuration space read. Returns %0 on |
| 262 | * success or %-EINVAL on failure. |
| 263 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 264 | static int fsl_local_config_read(struct rio_mport *mport, |
| 265 | int index, u32 offset, int len, u32 *data) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 266 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 267 | struct rio_priv *priv = mport->priv; |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 268 | pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 269 | offset); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 270 | *data = in_be32(priv->regs_win + offset); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 271 | |
| 272 | return 0; |
| 273 | } |
| 274 | |
| 275 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 276 | * fsl_local_config_write - Generate a MPC85xx local config space write |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 277 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 278 | * @index: ID of RapdiIO interface |
| 279 | * @offset: Offset into configuration space |
| 280 | * @len: Length (in bytes) of the maintenance transaction |
| 281 | * @data: Value to be written |
| 282 | * |
| 283 | * Generates a MPC85xx local configuration space write. Returns %0 on |
| 284 | * success or %-EINVAL on failure. |
| 285 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 286 | static int fsl_local_config_write(struct rio_mport *mport, |
| 287 | int index, u32 offset, int len, u32 data) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 288 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 289 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 290 | pr_debug |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 291 | ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 292 | index, offset, data); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 293 | out_be32(priv->regs_win + offset, data); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 294 | |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 299 | * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 300 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 301 | * @index: ID of RapdiIO interface |
| 302 | * @destid: Destination ID of transaction |
| 303 | * @hopcount: Number of hops to target device |
| 304 | * @offset: Offset into configuration space |
| 305 | * @len: Length (in bytes) of the maintenance transaction |
| 306 | * @val: Location to be read into |
| 307 | * |
| 308 | * Generates a MPC85xx read maintenance transaction. Returns %0 on |
| 309 | * success or %-EINVAL on failure. |
| 310 | */ |
| 311 | static int |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 312 | fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, |
| 313 | u8 hopcount, u32 offset, int len, u32 *val) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 314 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 315 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 316 | u8 *data; |
| 317 | |
| 318 | pr_debug |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 319 | ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 320 | index, destid, hopcount, offset, len); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 321 | out_be32(&priv->maint_atmu_regs->rowtar, |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 322 | (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); |
| 323 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 324 | data = (u8 *) priv->maint_win + offset; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 325 | switch (len) { |
| 326 | case 1: |
| 327 | *val = in_8((u8 *) data); |
| 328 | break; |
| 329 | case 2: |
| 330 | *val = in_be16((u16 *) data); |
| 331 | break; |
| 332 | default: |
| 333 | *val = in_be32((u32 *) data); |
| 334 | break; |
| 335 | } |
| 336 | |
| 337 | return 0; |
| 338 | } |
| 339 | |
| 340 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 341 | * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 342 | * @mport: RapidIO master port info |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 343 | * @index: ID of RapdiIO interface |
| 344 | * @destid: Destination ID of transaction |
| 345 | * @hopcount: Number of hops to target device |
| 346 | * @offset: Offset into configuration space |
| 347 | * @len: Length (in bytes) of the maintenance transaction |
| 348 | * @val: Value to be written |
| 349 | * |
| 350 | * Generates an MPC85xx write maintenance transaction. Returns %0 on |
| 351 | * success or %-EINVAL on failure. |
| 352 | */ |
| 353 | static int |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 354 | fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, |
| 355 | u8 hopcount, u32 offset, int len, u32 val) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 356 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 357 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 358 | u8 *data; |
| 359 | pr_debug |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 360 | ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 361 | index, destid, hopcount, offset, len, val); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 362 | out_be32(&priv->maint_atmu_regs->rowtar, |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 363 | (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); |
| 364 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 365 | data = (u8 *) priv->maint_win + offset; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 366 | switch (len) { |
| 367 | case 1: |
| 368 | out_8((u8 *) data, val); |
| 369 | break; |
| 370 | case 2: |
| 371 | out_be16((u16 *) data, val); |
| 372 | break; |
| 373 | default: |
| 374 | out_be32((u32 *) data, val); |
| 375 | break; |
| 376 | } |
| 377 | |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue |
| 383 | * @mport: Master port with outbound message queue |
| 384 | * @rdev: Target of outbound message |
| 385 | * @mbox: Outbound mailbox |
| 386 | * @buffer: Message to add to outbound queue |
| 387 | * @len: Length of message |
| 388 | * |
| 389 | * Adds the @buffer message to the MPC85xx outbound message queue. Returns |
| 390 | * %0 on success or %-EINVAL on failure. |
| 391 | */ |
| 392 | int |
| 393 | rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, |
| 394 | void *buffer, size_t len) |
| 395 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 396 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 397 | u32 omr; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 398 | struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt |
| 399 | + priv->msg_tx_ring.tx_slot; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 400 | int ret = 0; |
| 401 | |
| 402 | pr_debug |
| 403 | ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n", |
| 404 | rdev->destid, mbox, (int)buffer, len); |
| 405 | |
| 406 | if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { |
| 407 | ret = -EINVAL; |
| 408 | goto out; |
| 409 | } |
| 410 | |
| 411 | /* Copy and clear rest of buffer */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 412 | memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, |
| 413 | len); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 414 | if (len < (RIO_MAX_MSG_SIZE - 4)) |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 415 | memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] |
| 416 | + len, 0, RIO_MAX_MSG_SIZE - len); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 417 | |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 418 | switch (mport->phy_type) { |
| 419 | case RIO_PHY_PARALLEL: |
| 420 | /* Set mbox field for message */ |
| 421 | desc->dport = mbox & 0x3; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 422 | |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 423 | /* Enable EOMI interrupt, set priority, and set destid */ |
| 424 | desc->dattr = 0x28000000 | (rdev->destid << 2); |
| 425 | break; |
| 426 | case RIO_PHY_SERIAL: |
| 427 | /* Set mbox field for message, and set destid */ |
| 428 | desc->dport = (rdev->destid << 16) | (mbox & 0x3); |
| 429 | |
| 430 | /* Enable EOMI interrupt and priority */ |
| 431 | desc->dattr = 0x28000000; |
| 432 | break; |
| 433 | } |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 434 | |
| 435 | /* Set transfer size aligned to next power of 2 (in double words) */ |
| 436 | desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); |
| 437 | |
| 438 | /* Set snooping and source buffer address */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 439 | desc->saddr = 0x00000004 |
| 440 | | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 441 | |
| 442 | /* Increment enqueue pointer */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 443 | omr = in_be32(&priv->msg_regs->omr); |
| 444 | out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 445 | |
| 446 | /* Go to next descriptor */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 447 | if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) |
| 448 | priv->msg_tx_ring.tx_slot = 0; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 449 | |
| 450 | out: |
| 451 | return ret; |
| 452 | } |
| 453 | |
| 454 | EXPORT_SYMBOL_GPL(rio_hw_add_outb_message); |
| 455 | |
| 456 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 457 | * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 458 | * @irq: Linux interrupt number |
| 459 | * @dev_instance: Pointer to interrupt-specific data |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 460 | * |
| 461 | * Handles outbound message interrupts. Executes a register outbound |
Simon Arlott | a8de5ce | 2007-05-12 05:42:54 +1000 | [diff] [blame] | 462 | * mailbox event handler and acks the interrupt occurrence. |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 463 | */ |
| 464 | static irqreturn_t |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 465 | fsl_rio_tx_handler(int irq, void *dev_instance) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 466 | { |
| 467 | int osr; |
| 468 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 469 | struct rio_priv *priv = port->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 470 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 471 | osr = in_be32(&priv->msg_regs->osr); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 472 | |
| 473 | if (osr & RIO_MSG_OSR_TE) { |
| 474 | pr_info("RIO: outbound message transmission error\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 475 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 476 | goto out; |
| 477 | } |
| 478 | |
| 479 | if (osr & RIO_MSG_OSR_QOI) { |
| 480 | pr_info("RIO: outbound message queue overflow\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 481 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 482 | goto out; |
| 483 | } |
| 484 | |
| 485 | if (osr & RIO_MSG_OSR_EOMI) { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 486 | u32 dqp = in_be32(&priv->msg_regs->odqdpar); |
| 487 | int slot = (dqp - priv->msg_tx_ring.phys) >> 5; |
| 488 | port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, |
| 489 | slot); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 490 | |
| 491 | /* Ack the end-of-message interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 492 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | out: |
| 496 | return IRQ_HANDLED; |
| 497 | } |
| 498 | |
| 499 | /** |
| 500 | * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox |
| 501 | * @mport: Master port implementing the outbound message unit |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 502 | * @dev_id: Device specific pointer to pass on event |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 503 | * @mbox: Mailbox to open |
| 504 | * @entries: Number of entries in the outbound mailbox ring |
| 505 | * |
| 506 | * Initializes buffer ring, request the outbound message interrupt, |
| 507 | * and enables the outbound message unit. Returns %0 on success and |
| 508 | * %-EINVAL or %-ENOMEM on failure. |
| 509 | */ |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 510 | int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 511 | { |
| 512 | int i, j, rc = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 513 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 514 | |
| 515 | if ((entries < RIO_MIN_TX_RING_SIZE) || |
| 516 | (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { |
| 517 | rc = -EINVAL; |
| 518 | goto out; |
| 519 | } |
| 520 | |
| 521 | /* Initialize shadow copy ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 522 | priv->msg_tx_ring.dev_id = dev_id; |
| 523 | priv->msg_tx_ring.size = entries; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 524 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 525 | for (i = 0; i < priv->msg_tx_ring.size; i++) { |
| 526 | priv->msg_tx_ring.virt_buffer[i] = |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 527 | dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 528 | &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); |
| 529 | if (!priv->msg_tx_ring.virt_buffer[i]) { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 530 | rc = -ENOMEM; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 531 | for (j = 0; j < priv->msg_tx_ring.size; j++) |
| 532 | if (priv->msg_tx_ring.virt_buffer[j]) |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 533 | dma_free_coherent(priv->dev, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 534 | RIO_MSG_BUFFER_SIZE, |
| 535 | priv->msg_tx_ring. |
| 536 | virt_buffer[j], |
| 537 | priv->msg_tx_ring. |
| 538 | phys_buffer[j]); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 539 | goto out; |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | /* Initialize outbound message descriptor ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 544 | priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 545 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
| 546 | &priv->msg_tx_ring.phys, GFP_KERNEL); |
| 547 | if (!priv->msg_tx_ring.virt) { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 548 | rc = -ENOMEM; |
| 549 | goto out_dma; |
| 550 | } |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 551 | memset(priv->msg_tx_ring.virt, 0, |
| 552 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); |
| 553 | priv->msg_tx_ring.tx_slot = 0; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 554 | |
| 555 | /* Point dequeue/enqueue pointers at first entry in ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 556 | out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); |
| 557 | out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 558 | |
| 559 | /* Configure for snooping */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 560 | out_be32(&priv->msg_regs->osar, 0x00000004); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 561 | |
| 562 | /* Clear interrupt status */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 563 | out_be32(&priv->msg_regs->osr, 0x000000b3); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 564 | |
| 565 | /* Hook up outbound message handler */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 566 | rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, |
| 567 | "msg_tx", (void *)mport); |
| 568 | if (rc < 0) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 569 | goto out_irq; |
| 570 | |
| 571 | /* |
| 572 | * Configure outbound message unit |
| 573 | * Snooping |
| 574 | * Interrupts (all enabled, except QEIE) |
| 575 | * Chaining mode |
| 576 | * Disable |
| 577 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 578 | out_be32(&priv->msg_regs->omr, 0x00100220); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 579 | |
| 580 | /* Set number of entries */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 581 | out_be32(&priv->msg_regs->omr, |
| 582 | in_be32(&priv->msg_regs->omr) | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 583 | ((get_bitmask_order(entries) - 2) << 12)); |
| 584 | |
| 585 | /* Now enable the unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 586 | out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 587 | |
| 588 | out: |
| 589 | return rc; |
| 590 | |
| 591 | out_irq: |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 592 | dma_free_coherent(priv->dev, |
| 593 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 594 | priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 595 | |
| 596 | out_dma: |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 597 | for (i = 0; i < priv->msg_tx_ring.size; i++) |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 598 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 599 | priv->msg_tx_ring.virt_buffer[i], |
| 600 | priv->msg_tx_ring.phys_buffer[i]); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 601 | |
| 602 | return rc; |
| 603 | } |
| 604 | |
| 605 | /** |
| 606 | * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox |
| 607 | * @mport: Master port implementing the outbound message unit |
| 608 | * @mbox: Mailbox to close |
| 609 | * |
| 610 | * Disables the outbound message unit, free all buffers, and |
| 611 | * frees the outbound message interrupt. |
| 612 | */ |
| 613 | void rio_close_outb_mbox(struct rio_mport *mport, int mbox) |
| 614 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 615 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 616 | /* Disable inbound message unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 617 | out_be32(&priv->msg_regs->omr, 0); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 618 | |
| 619 | /* Free ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 620 | dma_free_coherent(priv->dev, |
| 621 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 622 | priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 623 | |
| 624 | /* Free interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 625 | free_irq(IRQ_RIO_TX(mport), (void *)mport); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 626 | } |
| 627 | |
| 628 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 629 | * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 630 | * @irq: Linux interrupt number |
| 631 | * @dev_instance: Pointer to interrupt-specific data |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 632 | * |
| 633 | * Handles inbound message interrupts. Executes a registered inbound |
Simon Arlott | a8de5ce | 2007-05-12 05:42:54 +1000 | [diff] [blame] | 634 | * mailbox event handler and acks the interrupt occurrence. |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 635 | */ |
| 636 | static irqreturn_t |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 637 | fsl_rio_rx_handler(int irq, void *dev_instance) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 638 | { |
| 639 | int isr; |
| 640 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 641 | struct rio_priv *priv = port->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 642 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 643 | isr = in_be32(&priv->msg_regs->isr); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 644 | |
| 645 | if (isr & RIO_MSG_ISR_TE) { |
| 646 | pr_info("RIO: inbound message reception error\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 647 | out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 648 | goto out; |
| 649 | } |
| 650 | |
| 651 | /* XXX Need to check/dispatch until queue empty */ |
| 652 | if (isr & RIO_MSG_ISR_DIQI) { |
| 653 | /* |
| 654 | * We implement *only* mailbox 0, but can receive messages |
| 655 | * for any mailbox/letter to that mailbox destination. So, |
| 656 | * make the callback with an unknown/invalid mailbox number |
| 657 | * argument. |
| 658 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 659 | port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 660 | |
| 661 | /* Ack the queueing interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 662 | out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 663 | } |
| 664 | |
| 665 | out: |
| 666 | return IRQ_HANDLED; |
| 667 | } |
| 668 | |
| 669 | /** |
| 670 | * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox |
| 671 | * @mport: Master port implementing the inbound message unit |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 672 | * @dev_id: Device specific pointer to pass on event |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 673 | * @mbox: Mailbox to open |
| 674 | * @entries: Number of entries in the inbound mailbox ring |
| 675 | * |
| 676 | * Initializes buffer ring, request the inbound message interrupt, |
| 677 | * and enables the inbound message unit. Returns %0 on success |
| 678 | * and %-EINVAL or %-ENOMEM on failure. |
| 679 | */ |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 680 | int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 681 | { |
| 682 | int i, rc = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 683 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 684 | |
| 685 | if ((entries < RIO_MIN_RX_RING_SIZE) || |
| 686 | (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { |
| 687 | rc = -EINVAL; |
| 688 | goto out; |
| 689 | } |
| 690 | |
| 691 | /* Initialize client buffer ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 692 | priv->msg_rx_ring.dev_id = dev_id; |
| 693 | priv->msg_rx_ring.size = entries; |
| 694 | priv->msg_rx_ring.rx_slot = 0; |
| 695 | for (i = 0; i < priv->msg_rx_ring.size; i++) |
| 696 | priv->msg_rx_ring.virt_buffer[i] = NULL; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 697 | |
| 698 | /* Initialize inbound message ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 699 | priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 700 | priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, |
| 701 | &priv->msg_rx_ring.phys, GFP_KERNEL); |
| 702 | if (!priv->msg_rx_ring.virt) { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 703 | rc = -ENOMEM; |
| 704 | goto out; |
| 705 | } |
| 706 | |
| 707 | /* Point dequeue/enqueue pointers at first entry in ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 708 | out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); |
| 709 | out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 710 | |
| 711 | /* Clear interrupt status */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 712 | out_be32(&priv->msg_regs->isr, 0x00000091); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 713 | |
| 714 | /* Hook up inbound message handler */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 715 | rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, |
| 716 | "msg_rx", (void *)mport); |
| 717 | if (rc < 0) { |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 718 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 719 | priv->msg_tx_ring.virt_buffer[i], |
| 720 | priv->msg_tx_ring.phys_buffer[i]); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 721 | goto out; |
| 722 | } |
| 723 | |
| 724 | /* |
| 725 | * Configure inbound message unit: |
| 726 | * Snooping |
| 727 | * 4KB max message size |
| 728 | * Unmask all interrupt sources |
| 729 | * Disable |
| 730 | */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 731 | out_be32(&priv->msg_regs->imr, 0x001b0060); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 732 | |
| 733 | /* Set number of queue entries */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 734 | setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 735 | |
| 736 | /* Now enable the unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 737 | setbits32(&priv->msg_regs->imr, 0x1); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 738 | |
| 739 | out: |
| 740 | return rc; |
| 741 | } |
| 742 | |
| 743 | /** |
| 744 | * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox |
| 745 | * @mport: Master port implementing the inbound message unit |
| 746 | * @mbox: Mailbox to close |
| 747 | * |
| 748 | * Disables the inbound message unit, free all buffers, and |
| 749 | * frees the inbound message interrupt. |
| 750 | */ |
| 751 | void rio_close_inb_mbox(struct rio_mport *mport, int mbox) |
| 752 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 753 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 754 | /* Disable inbound message unit */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 755 | out_be32(&priv->msg_regs->imr, 0); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 756 | |
| 757 | /* Free ring */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 758 | dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 759 | priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 760 | |
| 761 | /* Free interrupt */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 762 | free_irq(IRQ_RIO_RX(mport), (void *)mport); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 763 | } |
| 764 | |
| 765 | /** |
| 766 | * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue |
| 767 | * @mport: Master port implementing the inbound message unit |
| 768 | * @mbox: Inbound mailbox number |
| 769 | * @buf: Buffer to add to inbound queue |
| 770 | * |
| 771 | * Adds the @buf buffer to the MPC85xx inbound message queue. Returns |
| 772 | * %0 on success or %-EINVAL on failure. |
| 773 | */ |
| 774 | int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) |
| 775 | { |
| 776 | int rc = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 777 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 778 | |
| 779 | pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 780 | priv->msg_rx_ring.rx_slot); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 781 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 782 | if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 783 | printk(KERN_ERR |
| 784 | "RIO: error adding inbound buffer %d, buffer exists\n", |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 785 | priv->msg_rx_ring.rx_slot); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 786 | rc = -EINVAL; |
| 787 | goto out; |
| 788 | } |
| 789 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 790 | priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; |
| 791 | if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) |
| 792 | priv->msg_rx_ring.rx_slot = 0; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 793 | |
| 794 | out: |
| 795 | return rc; |
| 796 | } |
| 797 | |
| 798 | EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer); |
| 799 | |
| 800 | /** |
| 801 | * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit |
| 802 | * @mport: Master port implementing the inbound message unit |
| 803 | * @mbox: Inbound mailbox number |
| 804 | * |
| 805 | * Gets the next available inbound message from the inbound message queue. |
| 806 | * A pointer to the message is returned on success or NULL on failure. |
| 807 | */ |
| 808 | void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox) |
| 809 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 810 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 811 | u32 phys_buf, virt_buf; |
| 812 | void *buf = NULL; |
| 813 | int buf_idx; |
| 814 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 815 | phys_buf = in_be32(&priv->msg_regs->ifqdpar); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 816 | |
| 817 | /* If no more messages, then bail out */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 818 | if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 819 | goto out2; |
| 820 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 821 | virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf |
| 822 | - priv->msg_rx_ring.phys); |
| 823 | buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; |
| 824 | buf = priv->msg_rx_ring.virt_buffer[buf_idx]; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 825 | |
| 826 | if (!buf) { |
| 827 | printk(KERN_ERR |
| 828 | "RIO: inbound message copy failed, no buffers\n"); |
| 829 | goto out1; |
| 830 | } |
| 831 | |
| 832 | /* Copy max message size, caller is expected to allocate that big */ |
| 833 | memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); |
| 834 | |
| 835 | /* Clear the available buffer */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 836 | priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 837 | |
| 838 | out1: |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 839 | setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 840 | |
| 841 | out2: |
| 842 | return buf; |
| 843 | } |
| 844 | |
| 845 | EXPORT_SYMBOL_GPL(rio_hw_get_inb_message); |
| 846 | |
| 847 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 848 | * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 849 | * @irq: Linux interrupt number |
| 850 | * @dev_instance: Pointer to interrupt-specific data |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 851 | * |
| 852 | * Handles doorbell interrupts. Parses a list of registered |
| 853 | * doorbell event handlers and executes a matching event handler. |
| 854 | */ |
| 855 | static irqreturn_t |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 856 | fsl_rio_dbell_handler(int irq, void *dev_instance) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 857 | { |
| 858 | int dsr; |
| 859 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 860 | struct rio_priv *priv = port->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 861 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 862 | dsr = in_be32(&priv->msg_regs->dsr); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 863 | |
| 864 | if (dsr & DOORBELL_DSR_TE) { |
| 865 | pr_info("RIO: doorbell reception error\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 866 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 867 | goto out; |
| 868 | } |
| 869 | |
| 870 | if (dsr & DOORBELL_DSR_QFI) { |
| 871 | pr_info("RIO: doorbell queue full\n"); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 872 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 873 | goto out; |
| 874 | } |
| 875 | |
| 876 | /* XXX Need to check/dispatch until queue empty */ |
| 877 | if (dsr & DOORBELL_DSR_DIQI) { |
| 878 | u32 dmsg = |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 879 | (u32) priv->dbell_ring.virt + |
| 880 | (in_be32(&priv->msg_regs->dqdpar) & 0xfff); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 881 | struct rio_dbell *dbell; |
| 882 | int found = 0; |
| 883 | |
| 884 | pr_debug |
| 885 | ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", |
| 886 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); |
| 887 | |
| 888 | list_for_each_entry(dbell, &port->dbells, node) { |
| 889 | if ((dbell->res->start <= DBELL_INF(dmsg)) && |
| 890 | (dbell->res->end >= DBELL_INF(dmsg))) { |
| 891 | found = 1; |
| 892 | break; |
| 893 | } |
| 894 | } |
| 895 | if (found) { |
Matt Porter | 6978bbc | 2005-11-07 01:00:20 -0800 | [diff] [blame] | 896 | dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 897 | DBELL_INF(dmsg)); |
| 898 | } else { |
| 899 | pr_debug |
| 900 | ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", |
| 901 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); |
| 902 | } |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 903 | setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); |
| 904 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 905 | } |
| 906 | |
| 907 | out: |
| 908 | return IRQ_HANDLED; |
| 909 | } |
| 910 | |
| 911 | /** |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 912 | * fsl_rio_doorbell_init - MPC85xx doorbell interface init |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 913 | * @mport: Master port implementing the inbound doorbell unit |
| 914 | * |
| 915 | * Initializes doorbell unit hardware and inbound DMA buffer |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 916 | * ring. Called from fsl_rio_setup(). Returns %0 on success |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 917 | * or %-ENOMEM on failure. |
| 918 | */ |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 919 | static int fsl_rio_doorbell_init(struct rio_mport *mport) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 920 | { |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 921 | struct rio_priv *priv = mport->priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 922 | int rc = 0; |
| 923 | |
| 924 | /* Map outbound doorbell window immediately after maintenance window */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 925 | priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, |
| 926 | RIO_DBELL_WIN_SIZE); |
| 927 | if (!priv->dbell_win) { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 928 | printk(KERN_ERR |
| 929 | "RIO: unable to map outbound doorbell window\n"); |
| 930 | rc = -ENOMEM; |
| 931 | goto out; |
| 932 | } |
| 933 | |
| 934 | /* Initialize inbound doorbells */ |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 935 | priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 936 | DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); |
| 937 | if (!priv->dbell_ring.virt) { |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 938 | printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); |
| 939 | rc = -ENOMEM; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 940 | iounmap(priv->dbell_win); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 941 | goto out; |
| 942 | } |
| 943 | |
| 944 | /* Point dequeue/enqueue pointers at first entry in ring */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 945 | out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); |
| 946 | out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 947 | |
| 948 | /* Clear interrupt status */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 949 | out_be32(&priv->msg_regs->dsr, 0x00000091); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 950 | |
| 951 | /* Hook up doorbell handler */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 952 | rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, |
| 953 | "dbell_rx", (void *)mport); |
| 954 | if (rc < 0) { |
| 955 | iounmap(priv->dbell_win); |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 956 | dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 957 | priv->dbell_ring.virt, priv->dbell_ring.phys); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 958 | printk(KERN_ERR |
| 959 | "MPC85xx RIO: unable to request inbound doorbell irq"); |
| 960 | goto out; |
| 961 | } |
| 962 | |
| 963 | /* Configure doorbells for snooping, 512 entries, and enable */ |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 964 | out_be32(&priv->msg_regs->dmr, 0x00108161); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 965 | |
| 966 | out: |
| 967 | return rc; |
| 968 | } |
| 969 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 970 | /** |
| 971 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler |
| 972 | * @irq: Linux interrupt number |
| 973 | * @dev_instance: Pointer to interrupt-specific data |
| 974 | * |
| 975 | * Handles port write interrupts. Parses a list of registered |
| 976 | * port write event handlers and executes a matching event handler. |
| 977 | */ |
| 978 | static irqreturn_t |
| 979 | fsl_rio_port_write_handler(int irq, void *dev_instance) |
| 980 | { |
| 981 | u32 ipwmr, ipwsr; |
| 982 | struct rio_mport *port = (struct rio_mport *)dev_instance; |
| 983 | struct rio_priv *priv = port->priv; |
| 984 | u32 epwisr, tmp; |
| 985 | |
| 986 | ipwmr = in_be32(&priv->msg_regs->pwmr); |
| 987 | ipwsr = in_be32(&priv->msg_regs->pwsr); |
| 988 | |
| 989 | epwisr = in_be32(priv->regs_win + RIO_EPWISR); |
| 990 | if (epwisr & 0x80000000) { |
| 991 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); |
| 992 | pr_info("RIO_LTLEDCSR = 0x%x\n", tmp); |
| 993 | out_be32(priv->regs_win + RIO_LTLEDCSR, 0); |
| 994 | } |
| 995 | |
| 996 | if (!(epwisr & 0x00000001)) |
| 997 | return IRQ_HANDLED; |
| 998 | |
| 999 | #ifdef DEBUG_PW |
| 1000 | pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); |
| 1001 | if (ipwsr & RIO_IPWSR_QF) |
| 1002 | pr_debug(" QF"); |
| 1003 | if (ipwsr & RIO_IPWSR_TE) |
| 1004 | pr_debug(" TE"); |
| 1005 | if (ipwsr & RIO_IPWSR_QFI) |
| 1006 | pr_debug(" QFI"); |
| 1007 | if (ipwsr & RIO_IPWSR_PWD) |
| 1008 | pr_debug(" PWD"); |
| 1009 | if (ipwsr & RIO_IPWSR_PWB) |
| 1010 | pr_debug(" PWB"); |
| 1011 | pr_debug(" )\n"); |
| 1012 | #endif |
| 1013 | out_be32(&priv->msg_regs->pwsr, |
| 1014 | ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); |
| 1015 | |
| 1016 | if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { |
| 1017 | priv->port_write_msg.err_count++; |
| 1018 | pr_info("RIO: Port-Write Transaction Err (%d)\n", |
| 1019 | priv->port_write_msg.err_count); |
| 1020 | } |
| 1021 | if (ipwsr & RIO_IPWSR_PWD) { |
| 1022 | priv->port_write_msg.discard_count++; |
| 1023 | pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n", |
| 1024 | priv->port_write_msg.discard_count); |
| 1025 | } |
| 1026 | |
| 1027 | /* Schedule deferred processing if PW was received */ |
| 1028 | if (ipwsr & RIO_IPWSR_QFI) { |
| 1029 | /* Save PW message (if there is room in FIFO), |
| 1030 | * otherwise discard it. |
| 1031 | */ |
| 1032 | if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { |
| 1033 | priv->port_write_msg.msg_count++; |
| 1034 | kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, |
| 1035 | RIO_PW_MSG_SIZE); |
| 1036 | } else { |
| 1037 | priv->port_write_msg.discard_count++; |
| 1038 | pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", |
| 1039 | priv->port_write_msg.discard_count); |
| 1040 | } |
| 1041 | schedule_work(&priv->pw_work); |
| 1042 | } |
| 1043 | |
| 1044 | /* Issue Clear Queue command. This allows another |
| 1045 | * port-write to be received. |
| 1046 | */ |
| 1047 | out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); |
| 1048 | |
| 1049 | return IRQ_HANDLED; |
| 1050 | } |
| 1051 | |
| 1052 | static void fsl_pw_dpc(struct work_struct *work) |
| 1053 | { |
| 1054 | struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); |
| 1055 | unsigned long flags; |
| 1056 | u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; |
| 1057 | |
| 1058 | /* |
| 1059 | * Process port-write messages |
| 1060 | */ |
| 1061 | spin_lock_irqsave(&priv->pw_fifo_lock, flags); |
| 1062 | while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, |
| 1063 | RIO_PW_MSG_SIZE)) { |
| 1064 | /* Process one message */ |
| 1065 | spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); |
| 1066 | #ifdef DEBUG_PW |
| 1067 | { |
| 1068 | u32 i; |
| 1069 | pr_debug("%s : Port-Write Message:", __func__); |
| 1070 | for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { |
| 1071 | if ((i%4) == 0) |
| 1072 | pr_debug("\n0x%02x: 0x%08x", i*4, |
| 1073 | msg_buffer[i]); |
| 1074 | else |
| 1075 | pr_debug(" 0x%08x", msg_buffer[i]); |
| 1076 | } |
| 1077 | pr_debug("\n"); |
| 1078 | } |
| 1079 | #endif |
| 1080 | /* Pass the port-write message to RIO core for processing */ |
| 1081 | rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); |
| 1082 | spin_lock_irqsave(&priv->pw_fifo_lock, flags); |
| 1083 | } |
| 1084 | spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); |
| 1085 | } |
| 1086 | |
| 1087 | /** |
| 1088 | * fsl_rio_pw_enable - enable/disable port-write interface init |
| 1089 | * @mport: Master port implementing the port write unit |
| 1090 | * @enable: 1=enable; 0=disable port-write message handling |
| 1091 | */ |
| 1092 | static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) |
| 1093 | { |
| 1094 | struct rio_priv *priv = mport->priv; |
| 1095 | u32 rval; |
| 1096 | |
| 1097 | rval = in_be32(&priv->msg_regs->pwmr); |
| 1098 | |
| 1099 | if (enable) |
| 1100 | rval |= RIO_IPWMR_PWE; |
| 1101 | else |
| 1102 | rval &= ~RIO_IPWMR_PWE; |
| 1103 | |
| 1104 | out_be32(&priv->msg_regs->pwmr, rval); |
| 1105 | |
| 1106 | return 0; |
| 1107 | } |
| 1108 | |
| 1109 | /** |
| 1110 | * fsl_rio_port_write_init - MPC85xx port write interface init |
| 1111 | * @mport: Master port implementing the port write unit |
| 1112 | * |
| 1113 | * Initializes port write unit hardware and DMA buffer |
| 1114 | * ring. Called from fsl_rio_setup(). Returns %0 on success |
| 1115 | * or %-ENOMEM on failure. |
| 1116 | */ |
| 1117 | static int fsl_rio_port_write_init(struct rio_mport *mport) |
| 1118 | { |
| 1119 | struct rio_priv *priv = mport->priv; |
| 1120 | int rc = 0; |
| 1121 | |
| 1122 | /* Following configurations require a disabled port write controller */ |
| 1123 | out_be32(&priv->msg_regs->pwmr, |
| 1124 | in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); |
| 1125 | |
| 1126 | /* Initialize port write */ |
| 1127 | priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, |
| 1128 | RIO_PW_MSG_SIZE, |
| 1129 | &priv->port_write_msg.phys, GFP_KERNEL); |
| 1130 | if (!priv->port_write_msg.virt) { |
| 1131 | pr_err("RIO: unable allocate port write queue\n"); |
| 1132 | return -ENOMEM; |
| 1133 | } |
| 1134 | |
| 1135 | priv->port_write_msg.err_count = 0; |
| 1136 | priv->port_write_msg.discard_count = 0; |
| 1137 | |
| 1138 | /* Point dequeue/enqueue pointers at first entry */ |
| 1139 | out_be32(&priv->msg_regs->epwqbar, 0); |
| 1140 | out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); |
| 1141 | |
| 1142 | pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", |
| 1143 | in_be32(&priv->msg_regs->epwqbar), |
| 1144 | in_be32(&priv->msg_regs->pwqbar)); |
| 1145 | |
| 1146 | /* Clear interrupt status IPWSR */ |
| 1147 | out_be32(&priv->msg_regs->pwsr, |
| 1148 | (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); |
| 1149 | |
| 1150 | /* Configure port write contoller for snooping enable all reporting, |
| 1151 | clear queue full */ |
| 1152 | out_be32(&priv->msg_regs->pwmr, |
| 1153 | RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); |
| 1154 | |
| 1155 | |
| 1156 | /* Hook up port-write handler */ |
| 1157 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, |
| 1158 | "port-write", (void *)mport); |
| 1159 | if (rc < 0) { |
| 1160 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); |
| 1161 | goto err_out; |
| 1162 | } |
| 1163 | |
| 1164 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); |
| 1165 | spin_lock_init(&priv->pw_fifo_lock); |
| 1166 | if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { |
| 1167 | pr_err("FIFO allocation failed\n"); |
| 1168 | rc = -ENOMEM; |
| 1169 | goto err_out_irq; |
| 1170 | } |
| 1171 | |
| 1172 | pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", |
| 1173 | in_be32(&priv->msg_regs->pwmr), |
| 1174 | in_be32(&priv->msg_regs->pwsr)); |
| 1175 | |
| 1176 | return rc; |
| 1177 | |
| 1178 | err_out_irq: |
| 1179 | free_irq(IRQ_RIO_PW(mport), (void *)mport); |
| 1180 | err_out: |
| 1181 | dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, |
| 1182 | priv->port_write_msg.virt, |
| 1183 | priv->port_write_msg.phys); |
| 1184 | return rc; |
| 1185 | } |
| 1186 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1187 | static char *cmdline = NULL; |
| 1188 | |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1189 | static int fsl_rio_get_hdid(int index) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1190 | { |
| 1191 | /* XXX Need to parse multiple entries in some format */ |
| 1192 | if (!cmdline) |
| 1193 | return -1; |
| 1194 | |
| 1195 | return simple_strtol(cmdline, NULL, 0); |
| 1196 | } |
| 1197 | |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1198 | static int fsl_rio_get_cmdline(char *s) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1199 | { |
| 1200 | if (!s) |
| 1201 | return 0; |
| 1202 | |
| 1203 | cmdline = s; |
| 1204 | return 1; |
| 1205 | } |
| 1206 | |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1207 | __setup("riohdid=", fsl_rio_get_cmdline); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1208 | |
Zhang Wei | 7f620df | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1209 | static inline void fsl_rio_info(struct device *dev, u32 ccsr) |
| 1210 | { |
| 1211 | const char *str; |
| 1212 | if (ccsr & 1) { |
| 1213 | /* Serial phy */ |
| 1214 | switch (ccsr >> 30) { |
| 1215 | case 0: |
| 1216 | str = "1"; |
| 1217 | break; |
| 1218 | case 1: |
| 1219 | str = "4"; |
| 1220 | break; |
| 1221 | default: |
| 1222 | str = "Unknown"; |
Joe Perches | d258e64 | 2009-06-28 06:26:10 +0000 | [diff] [blame] | 1223 | break; |
Zhang Wei | 7f620df | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1224 | } |
| 1225 | dev_info(dev, "Hardware port width: %s\n", str); |
| 1226 | |
| 1227 | switch ((ccsr >> 27) & 7) { |
| 1228 | case 0: |
| 1229 | str = "Single-lane 0"; |
| 1230 | break; |
| 1231 | case 1: |
| 1232 | str = "Single-lane 2"; |
| 1233 | break; |
| 1234 | case 2: |
| 1235 | str = "Four-lane"; |
| 1236 | break; |
| 1237 | default: |
| 1238 | str = "Unknown"; |
| 1239 | break; |
| 1240 | } |
| 1241 | dev_info(dev, "Training connection status: %s\n", str); |
| 1242 | } else { |
| 1243 | /* Parallel phy */ |
| 1244 | if (!(ccsr & 0x80000000)) |
| 1245 | dev_info(dev, "Output port operating in 8-bit mode\n"); |
| 1246 | if (!(ccsr & 0x08000000)) |
| 1247 | dev_info(dev, "Input port operating in 8-bit mode\n"); |
| 1248 | } |
| 1249 | } |
| 1250 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1251 | /** |
Randy Dunlap | 9941d94 | 2008-04-30 16:45:58 -0700 | [diff] [blame] | 1252 | * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface |
| 1253 | * @dev: of_device pointer |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1254 | * |
| 1255 | * Initializes MPC85xx RapidIO hardware interface, configures |
| 1256 | * master port with system-specific info, and registers the |
| 1257 | * master port with the RapidIO subsystem. |
| 1258 | */ |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1259 | int fsl_rio_setup(struct of_device *dev) |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1260 | { |
| 1261 | struct rio_ops *ops; |
| 1262 | struct rio_mport *port; |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1263 | struct rio_priv *priv; |
| 1264 | int rc = 0; |
| 1265 | const u32 *dt_range, *cell; |
| 1266 | struct resource regs; |
| 1267 | int rlen; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1268 | u32 ccsr; |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1269 | u64 law_start, law_size; |
| 1270 | int paw, aw, sw; |
| 1271 | |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1272 | if (!dev->dev.of_node) { |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1273 | dev_err(&dev->dev, "Device OF-Node is NULL"); |
| 1274 | return -EFAULT; |
| 1275 | } |
| 1276 | |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1277 | rc = of_address_to_resource(dev->dev.of_node, 0, ®s); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1278 | if (rc) { |
| 1279 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1280 | dev->dev.of_node->full_name); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1281 | return -EFAULT; |
| 1282 | } |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1283 | dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); |
Kumar Gala | fc274a1 | 2009-05-13 17:02:24 -0500 | [diff] [blame] | 1284 | dev_info(&dev->dev, "Regs: %pR\n", ®s); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1285 | |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1286 | dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1287 | if (!dt_range) { |
| 1288 | dev_err(&dev->dev, "Can't get %s property 'ranges'\n", |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1289 | dev->dev.of_node->full_name); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1290 | return -EFAULT; |
| 1291 | } |
| 1292 | |
| 1293 | /* Get node address wide */ |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1294 | cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1295 | if (cell) |
| 1296 | aw = *cell; |
| 1297 | else |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1298 | aw = of_n_addr_cells(dev->dev.of_node); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1299 | /* Get node size wide */ |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1300 | cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1301 | if (cell) |
| 1302 | sw = *cell; |
| 1303 | else |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1304 | sw = of_n_size_cells(dev->dev.of_node); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1305 | /* Get parent address wide wide */ |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1306 | paw = of_n_addr_cells(dev->dev.of_node); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1307 | |
| 1308 | law_start = of_read_number(dt_range + aw, paw); |
| 1309 | law_size = of_read_number(dt_range + aw + paw, sw); |
| 1310 | |
| 1311 | dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", |
| 1312 | law_start, law_size); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1313 | |
Alexandre Bounine | e5cabeb | 2010-05-26 14:43:59 -0700 | [diff] [blame] | 1314 | ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1315 | if (!ops) { |
| 1316 | rc = -ENOMEM; |
| 1317 | goto err_ops; |
| 1318 | } |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1319 | ops->lcread = fsl_local_config_read; |
| 1320 | ops->lcwrite = fsl_local_config_write; |
| 1321 | ops->cread = fsl_rio_config_read; |
| 1322 | ops->cwrite = fsl_rio_config_write; |
| 1323 | ops->dsend = fsl_rio_doorbell_send; |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 1324 | ops->pwenable = fsl_rio_pw_enable; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1325 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1326 | port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1327 | if (!port) { |
| 1328 | rc = -ENOMEM; |
| 1329 | goto err_port; |
| 1330 | } |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1331 | port->id = 0; |
| 1332 | port->index = 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1333 | |
| 1334 | priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); |
| 1335 | if (!priv) { |
| 1336 | printk(KERN_ERR "Can't alloc memory for 'priv'\n"); |
| 1337 | rc = -ENOMEM; |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1338 | goto err_priv; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1339 | } |
| 1340 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1341 | INIT_LIST_HEAD(&port->dbells); |
| 1342 | port->iores.start = law_start; |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1343 | port->iores.end = law_start + law_size - 1; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1344 | port->iores.flags = IORESOURCE_MEM; |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1345 | port->iores.name = "rio_io_win"; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1346 | |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 1347 | priv->pwirq = irq_of_parse_and_map(dev->node, 0); |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1348 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); |
| 1349 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); |
| 1350 | priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 1351 | dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", |
| 1352 | priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1353 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1354 | rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); |
| 1355 | rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); |
| 1356 | rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); |
| 1357 | strcpy(port->name, "RIO0 mport"); |
| 1358 | |
Anton Vorontsov | 0dbbbf1 | 2009-04-18 21:48:52 +0400 | [diff] [blame] | 1359 | priv->dev = &dev->dev; |
| 1360 | |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1361 | port->ops = ops; |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1362 | port->host_deviceid = fsl_rio_get_hdid(port->id); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1363 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1364 | port->priv = priv; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1365 | rio_register_mport(port); |
| 1366 | |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1367 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); |
Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 1368 | |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1369 | /* Probe the master port phy type */ |
| 1370 | ccsr = in_be32(priv->regs_win + RIO_CCSR); |
| 1371 | port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; |
| 1372 | dev_info(&dev->dev, "RapidIO PHY type: %s\n", |
| 1373 | (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : |
| 1374 | ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : |
| 1375 | "unknown")); |
Zhang Wei | 7f620df | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1376 | /* Checking the port training status */ |
| 1377 | if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { |
| 1378 | dev_err(&dev->dev, "Port is not ready. " |
| 1379 | "Try to restart connection...\n"); |
| 1380 | switch (port->phy_type) { |
| 1381 | case RIO_PHY_SERIAL: |
| 1382 | /* Disable ports */ |
| 1383 | out_be32(priv->regs_win + RIO_CCSR, 0); |
| 1384 | /* Set 1x lane */ |
| 1385 | setbits32(priv->regs_win + RIO_CCSR, 0x02000000); |
| 1386 | /* Enable ports */ |
| 1387 | setbits32(priv->regs_win + RIO_CCSR, 0x00600000); |
| 1388 | break; |
| 1389 | case RIO_PHY_PARALLEL: |
| 1390 | /* Disable ports */ |
| 1391 | out_be32(priv->regs_win + RIO_CCSR, 0x22000000); |
| 1392 | /* Enable ports */ |
| 1393 | out_be32(priv->regs_win + RIO_CCSR, 0x44000000); |
| 1394 | break; |
| 1395 | } |
| 1396 | msleep(100); |
| 1397 | if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { |
| 1398 | dev_err(&dev->dev, "Port restart failed.\n"); |
| 1399 | rc = -ENOLINK; |
| 1400 | goto err; |
| 1401 | } |
| 1402 | dev_info(&dev->dev, "Port restart success!\n"); |
| 1403 | } |
| 1404 | fsl_rio_info(&dev->dev, ccsr); |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1405 | |
Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 1406 | port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) |
| 1407 | & RIO_PEF_CTLS) >> 4; |
| 1408 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
| 1409 | port->sys_size ? 65536 : 256); |
| 1410 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1411 | priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win |
| 1412 | + RIO_ATMU_REGS_OFFSET); |
| 1413 | priv->maint_atmu_regs = priv->atmu_regs + 1; |
| 1414 | priv->dbell_atmu_regs = priv->atmu_regs + 2; |
Zhang Wei | 61b2691 | 2008-04-18 13:33:44 -0700 | [diff] [blame] | 1415 | priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + |
| 1416 | ((port->phy_type == RIO_PHY_SERIAL) ? |
| 1417 | RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); |
| 1418 | |
| 1419 | /* Set to receive any dist ID for serial RapidIO controller. */ |
| 1420 | if (port->phy_type == RIO_PHY_SERIAL) |
| 1421 | out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1422 | |
| 1423 | /* Configure maintenance transaction window */ |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1424 | out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); |
| 1425 | out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */ |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1426 | |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1427 | priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1428 | |
| 1429 | /* Configure outbound doorbell window */ |
Li Yang | 186e74b | 2009-05-12 16:35:59 +0800 | [diff] [blame] | 1430 | out_be32(&priv->dbell_atmu_regs->rowbar, |
| 1431 | (law_start + RIO_MAINT_WIN_SIZE) >> 12); |
| 1432 | out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ |
Zhang Wei | d02443a | 2008-04-18 13:33:38 -0700 | [diff] [blame] | 1433 | fsl_rio_doorbell_init(port); |
Alexandre Bounine | 5b2074a | 2010-05-26 14:44:00 -0700 | [diff] [blame^] | 1434 | fsl_rio_port_write_init(port); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1435 | |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1436 | return 0; |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1437 | err: |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1438 | iounmap(priv->regs_win); |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1439 | kfree(priv); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1440 | err_priv: |
Zhang Wei | ad1e938 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1441 | kfree(port); |
Julia Lawall | 6c75933 | 2009-08-07 09:00:34 +0200 | [diff] [blame] | 1442 | err_port: |
| 1443 | kfree(ops); |
| 1444 | err_ops: |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1445 | return rc; |
Matt Porter | 2b0c28d | 2005-11-07 01:00:19 -0800 | [diff] [blame] | 1446 | } |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1447 | |
| 1448 | /* The probe function for RapidIO peer-to-peer network. |
| 1449 | */ |
| 1450 | static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev, |
| 1451 | const struct of_device_id *match) |
| 1452 | { |
| 1453 | int rc; |
| 1454 | printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 1455 | dev->dev.of_node->full_name); |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1456 | |
| 1457 | rc = fsl_rio_setup(dev); |
| 1458 | if (rc) |
| 1459 | goto out; |
| 1460 | |
| 1461 | /* Enumerate all registered ports */ |
| 1462 | rc = rio_init_mports(); |
| 1463 | out: |
| 1464 | return rc; |
| 1465 | }; |
| 1466 | |
| 1467 | static const struct of_device_id fsl_of_rio_rpn_ids[] = { |
| 1468 | { |
| 1469 | .compatible = "fsl,rapidio-delta", |
| 1470 | }, |
| 1471 | {}, |
| 1472 | }; |
| 1473 | |
| 1474 | static struct of_platform_driver fsl_of_rio_rpn_driver = { |
Grant Likely | 4018294 | 2010-04-13 16:13:02 -0700 | [diff] [blame] | 1475 | .driver = { |
| 1476 | .name = "fsl-of-rio", |
| 1477 | .owner = THIS_MODULE, |
| 1478 | .of_match_table = fsl_of_rio_rpn_ids, |
| 1479 | }, |
Zhang Wei | cc2bb69 | 2008-04-18 13:33:41 -0700 | [diff] [blame] | 1480 | .probe = fsl_of_rio_rpn_probe, |
| 1481 | }; |
| 1482 | |
| 1483 | static __init int fsl_of_rio_rpn_init(void) |
| 1484 | { |
| 1485 | return of_register_platform_driver(&fsl_of_rio_rpn_driver); |
| 1486 | } |
| 1487 | |
| 1488 | subsys_initcall(fsl_of_rio_rpn_init); |